/* * These routines enable/disable the pagefault handler in that * it will not take any locks and go straight to the fixup table. * * They have great resemblance to the preempt_disable/enable calls * and in fact they are identical; this is because currently there is * no other way to make the pagefault handlers do this. So we do * disable preemption but we don't necessarily care about that. */ staticinlinevoidpagefault_disable(void) { preempt_count_inc(); /* * make sure to have issued the store before a pagefault * can hit. */ barrier(); }
/* * The reason for kmap_high_get() is to ensure that the currently kmap'd * page usage count does not decrease to zero while we're using its * existing virtual mapping in an atomic context.
lock应该是保护pkmap_count,BUG_ON就是确保原子操作的count不会为0。
如何获取high memory page地址:
/** * page_address - get the mapped virtual address of a page * @page: &struct page to get the virtual address of * * Returns the page's virtual address. */ void *page_address(conststruct page *page) { unsignedlong flags; void *ret; structpage_address_slot *pas;
if (!PageHighMem(page)) return lowmem_page_address(page);
pas = page_slot(page); ret = NULL; spin_lock_irqsave(&pas->lock, flags); if (!list_empty(&pas->lh)) { structpage_address_map *pam;
idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM /* * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif /* * When debugging is off, kunmap_atomic leaves the previous mapping * in place, so the contained TLB flush ensures the TLB is updated * with the new mapping. */ set_fixmap_pte(idx, mk_pte(page, kmap_prot));
return (void *)vaddr; }
因为是SMP架构内存共享,kmap_high_get用了lock的方式防止临界访问,这里用的方法:
type = kmap_atomic_idx_push(); //每映射一次,idx++ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();