/* * There is always at least global CMA area and a few optional * areas configured in kernel .config. */ #ifdef CONFIG_CMA_AREAS #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
#else #define MAX_CMA_AREAS (0)
config CMA_AREAS int "Maximum count of the CMA areas" depends on CMA default 7 help CMA allows to create CMA areas for particular purpose, mainly, used as device private area. This parameter sets the maximum number of CMA area in the system.
If unsure, leave the default value "7".
看下初始化:
/** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area * @size: Size of the reserved area (in bytes), * @order_per_bit: Order of pages represented by one bit on bitmap. * @name: The name of the area. If this parameter is NULL, the name of * the area will be set to "cmaN", where N is a running counter of * used areas. * @res_cma: Pointer to store the created cma region. * * This function creates custom contiguous area from already reserved memory. */ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsignedint order_per_bit, constchar *name, struct cma **res_cma) { ... /* * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ cma = &cma_areas[cma_area_count]; if (name) { cma->name = name; } else { cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count); if (!cma->name) return -ENOMEM; } cma->base_pfn = PFN_DOWN(base); cma->count = size >> PAGE_SHIFT; //tj: cma area size in pages cma->order_per_bit = order_per_bit; *res_cma = cma; cma_area_count++; totalcma_pages += (size / PAGE_SIZE);
if (!of_get_flat_dt_prop(node, "reusable", NULL) || of_get_flat_dt_prop(node, "no-map", NULL)) return -EINVAL;
if ((rmem->base & mask) || (rmem->size & mask)) { pr_err("Reserved memory: incorrect alignment of CMA region\n"); return -EINVAL; }
err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); //tj:here if (err) { pr_err("Reserved memory: unable to setup CMA region\n"); return err; } /* Architecture specific contiguous memory fixup. */ dma_contiguous_early_fixup(rmem->base, rmem->size);
if (of_get_flat_dt_prop(node, "linux,cma-default", NULL)) dma_contiguous_set_default(cma);
rmem->ops = &rmem_cma_ops; rmem->priv = cma;
pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", &rmem->base, (unsignedlong)rmem->size / SZ_1M);
return0; }
另一处调用from dma:
/** * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. */ void __init dma_contiguous_reserve(phys_addr_t limit) { phys_addr_t selected_size = 0; phys_addr_t selected_base = 0; phys_addr_t selected_limit = limit; bool fixed = false;
/* * Default global CMA area size can be defined in kernel's .config. * This is useful mainly for distro maintainers to create a kernel * that works correctly for most supported systems. * The size can be set in bytes or as a percentage of the total memory * in the system. * * Users, who want to set the size of global CMA area for their system * should use cma= kernel parameter. */ staticconstphys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; staticphys_addr_t size_cmdline = -1;
默认的global CMA大小是16M:
if DMA_CMA comment "Default contiguous memory area size:"
config CMA_SIZE_MBYTES int "Size in Mega Bytes" depends on !CMA_SIZE_SEL_PERCENTAGE default 0 if X86 default 16 help Defines the size (in MiB) of the default memory area for Contiguous Memory Allocator. If the size of 0 is selected, CMA is disabled by default, but it can be enabled by passing cma=size[MG] to the kernel.
继续看:
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, struct cma **res_cma, bool fixed) { int ret;
ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, "reserved", res_cma);
/* * All pages in the reserved area must come from the same zone. * If the requested region crosses the low/high memory boundary, * try allocating from high memory first and fall back to low * memory in case of failure. */ if (base < highmem_start && limit > highmem_start) { addr = memblock_alloc_range(size, alignment, highmem_start, limit, MEMBLOCK_NONE); limit = highmem_start; }
if (!addr) { addr = memblock_alloc_range(size, alignment, base, limit, MEMBLOCK_NONE); if (!addr) { ret = -ENOMEM; goto err; } }
/* * kmemleak scans/reads tracked objects for pointers to other * objects but this address isn't mapped and accessible */ kmemleak_ignore_phys(addr); base = addr; }
ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); if (ret) goto err;
pr_info("Reserved %ld MiB at %pa\n", (unsignedlong)size / SZ_1M, &base); return0;
先申请bitmap大小,cma area size in pages就是cma->count,cma area size in bits就是cma->count >> order_per_bit,cma area size in pageblock就是cma->count >> pageblock_order,rt?
下来按pageblock初始化:
zone = page_zone(pfn_to_page(pfn));
do { unsigned j;
base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); /* * alloc_contig_range requires the pfn range * specified to be in the same zone. Make this * simple by forcing the entire CMA resv range * to be in the same zone. */ if (page_zone(pfn_to_page(pfn)) != zone) goto not_in_zone; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i);
主要接口是init_cma_reserved_pageblock():
#ifdef CONFIG_CMA /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ void __init init_cma_reserved_pageblock(struct page *page) { unsigned i = pageblock_nr_pages; structpage *p = page;
do { __ClearPageReserved(p); set_page_count(p, 0); } while (++p, --i);
set_pageblock_migratetype(page, MIGRATE_CMA);
if (pageblock_order >= MAX_ORDER) { i = pageblock_nr_pages; p = page; do { set_page_refcounted(p); __free_pages(p, MAX_ORDER - 1); p += MAX_ORDER_NR_PAGES; } while (i -= MAX_ORDER_NR_PAGES); } else { set_page_refcounted(page); __free_pages(page, pageblock_order); }
/** * cma_alloc() - allocate pages from contiguous area * @cma: Contiguous memory region for which the allocation is performed. * @count: Requested number of pages. * @align: Requested alignment of pages (in PAGE_SIZE order). * @gfp_mask: GFP mask to use during compaction * * This function allocates part of contiguous memory on specific * contiguous memory area. */ struct page *cma_alloc(struct cma *cma, size_t count, unsignedint align, gfp_t gfp_mask) { ... for (;;) { mutex_lock(&cma->lock); bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, bitmap_maxno, start, bitmap_count, mask, offset); if (bitmap_no >= bitmap_maxno) { mutex_unlock(&cma->lock); break; } bitmap_set(cma->bitmap, bitmap_no, bitmap_count); /* * It's safe to drop the lock here. We've marked this region for * our exclusive use. If the migration fails we will take the * lock again and unmark it. */ mutex_unlock(&cma->lock);