static bool arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, szind_t binind) { arena_chunk_t *chunk; arena_chunk_map_misc_t *miscelm; size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; assert(binind != BININD_INVALID); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); miscelm = arena_run_to_miscelm(run); run_ind = arena_miscelm_to_pageind(miscelm); flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); // size 是 bin 对应的 run_size need_pages = (size >> LG_PAGE); assert(need_pages > 0); if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, run_ind << LG_PAGE, size, arena->ind)) return (true); arena_run_split_remove(arena, chunk, run_ind, flag_dirty, flag_decommitted, need_pages); // 设置分配出去的 page 对应的 map_bits for (i = 0; i < need_pages; i++) { size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, run_ind+i); arena_mapbits_small_set(chunk, run_ind+i, i, binind, flag_unzeroed); if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) arena_run_page_validate_zeroed(chunk, run_ind+i); } JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); return (false); } // 从 run_ind 对应 run 中分配出 need_pages,剩余的再次插入到 avail_runs 中 static void arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, size_t flag_dirty, size_t flag_decommitted, size_t need_pages) { size_t total_pages, rem_pages; assert(flag_dirty == 0 || flag_decommitted == 0); total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> LG_PAGE; assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == flag_dirty); assert(need_pages <= total_pages); rem_pages = total_pages - need_pages; arena_avail_remove(arena, chunk, run_ind, total_pages); if (flag_dirty != 0) arena_run_dirty_remove(arena, chunk, run_ind, total_pages); arena_cactive_update(arena, need_pages, 0); arena->nactive += need_pages; /* Keep track of trailing unused pages for later use. */ if (rem_pages > 0) { size_t flags = flag_dirty | flag_decommitted; size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : 0; // 设置 run 对应的 page 的信息,设置开头和结尾的 page 对应的 map_bits 的 // 未分配内存大小 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, (rem_pages << LG_PAGE), flags | (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & flag_unzeroed_mask)); arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, (rem_pages << LG_PAGE), flags | (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & flag_unzeroed_mask)); if (flag_dirty != 0) { arena_run_dirty_insert(arena, chunk, run_ind+need_pages, rem_pages); } arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); } }