static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned intorder,
structalloc_context *ac)
{
boolcan_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
structpage *page = NULL;
unsignedint alloc_flags;
unsignedlong did_some_progress;
enummigrate_mode migration_mode = MIGRATE_ASYNC;
enumcompact_result compact_result;
intcompaction_retries = 0;
intno_progress_loops = 0;
/*
* In the slowpath, we sanity check order toavoid ever trying to
* reclaim >= MAX_ORDER areas which willnever succeed. Callers may
* be using allocators in order of preferencefor an area that is
* too large.
*/
if (order>= MAX_ORDER) {
WARN_ON_ONCE(!(gfp_mask& __GFP_NOWARN));
returnNULL;
}
/*
* We also sanity check to catch abuse ofatomic reserves being used by
* callers that are not in atomic context.
*/
if(WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
gfp_mask&= ~__GFP_ATOMIC;
retry:
if(gfp_mask & __GFP_KSWAPD_RECLAIM)
wake_all_kswapds(order,ac);
/*
* OK, we're below the kswapd watermark andhave kicked background
* reclaim. Now things get more complex, so setup alloc_flags according
* to how we want to proceed.
*/
alloc_flags= gfp_to_alloc_flags(gfp_mask);
/*
* Reset the zonelist iterators if memorypolicies can be ignored.
* These allocations are high priority andsystem rather than user
* orientated.
*/
if((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)){
ac->zonelist= node_zonelist(numa_node_id(), gfp_mask);
ac->preferred_zoneref= first_zones_zonelist(ac->zonelist,
ac->high_zoneidx,ac->nodemask);
}
/* This isthe last chance, in general, before the goto nopage. */
page =get_page_from_freelist(gfp_mask, order,
alloc_flags& ~ALLOC_NO_WATERMARKS, ac);
if (page)
gotogot_pg;
/*Allocate without watermarks if the context allows */
if(alloc_flags & ALLOC_NO_WATERMARKS) {
page= get_page_from_freelist(gfp_mask, order,
ALLOC_NO_WATERMARKS,ac);
if(page)
gotogot_pg;
}
/* Calleris not willing to reclaim, we can't balance anything */
if(!can_direct_reclaim) {
/*
* All existing users of the __GFP_NOFAIL areblockable, so warn
* of any new users that actually allow thistype of allocation
* to fail.
*/
WARN_ON_ONCE(gfp_mask& __GFP_NOFAIL);
gotonopage;
}
/* Avoidrecursion of direct reclaim */
if(current->flags & PF_MEMALLOC) {
/*
* __GFP_NOFAIL request from this context israther bizarre
* because we cannot reclaim anything and onlycan loop waiting
* for somebody to do a work for us.
*/
if(WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
cond_resched();
gotoretry;
}
gotonopage;
}
/* Avoidallocations with no watermarks from looping endlessly */
if(test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
gotonopage;
/*
* Try direct compaction. The first pass isasynchronous. Subsequent
* attempts after direct reclaim aresynchronous
*/
page =__alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
migration_mode,
&compact_result);
if (page)
gotogot_pg;
/* Checksfor THP-specific high-order allocations */
if (is_thp_gfp_mask(gfp_mask)){
/*
* If compaction is deferred for high-orderallocations, it is
* because sync compaction recently failed. Ifthis is the case
* and the caller requested a THP allocation,we do not want
* to heavily disrupt the system, so we failthe allocation
* instead of entering direct reclaim.
*/
if(compact_result == COMPACT_DEFERRED)
gotonopage;
/*
* Compaction is contended so rather back offthan cause
* excessive stalls.
*/
if(compact_result== COMPACT_CONTENDED)
gotonopage;
}
if (order&& compaction_made_progress(compact_result))
compaction_retries++;
/* Trydirect reclaim and then allocating */
page =__alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
&did_some_progress);
if (page)
gotogot_pg;
/* Do notloop if specifically requested */
if(gfp_mask & __GFP_NORETRY)
gotonoretry;
/*
* Do not retry costly high order allocationsunless they are
* __GFP_REPEAT
*/
if (order> PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
gotonoretry;
/*
* Costly allocations might have made aprogress but this doesn't mean
* their order will become available due tohigh fragmentation so
* always increment the no progress counter forthem
*/
if(did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
no_progress_loops= 0;
else
no_progress_loops++;
if(should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
did_some_progress > 0, no_progress_loops))
gotoretry;
/*
* It doesn't make any sense to retry for thecompaction if the order-0
* reclaim is not able to make any progressbecause the current
* implementation of the compaction depends onthe sufficient amount
* of free memory (see __compaction_suitable)
*/
if(did_some_progress > 0 &&
should_compact_retry(ac,order, alloc_flags,
compact_result,&migration_mode,
compaction_retries))
gotoretry;
/* Reclaimhas failed us, start killing things */
page =__alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
if (page)
gotogot_pg;
/* Retryas long as the OOM killer is making progress */
if(did_some_progress) {
no_progress_loops= 0;
gotoretry;
}
noretry:
/*
* High-order allocations do not necessarilyloop after direct reclaim
* and reclaim/compaction depends on compactionbeing called after
* reclaim so call directly if necessary.
* It can become very expensive to allocatetransparent hugepages at
* fault, so use asynchronous memory compactionfor THP unless it is
* khugepaged trying to collapse. All otherrequests should tolerate
* at least light sync migration.
*/
if(is_thp_gfp_mask(gfp_mask) && !(current->flags & PF_KTHREAD))
migration_mode= MIGRATE_ASYNC;
else
migration_mode= MIGRATE_SYNC_LIGHT;
page =__alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
ac, migration_mode,
&compact_result);
if (page)
gotogot_pg;
nopage:
warn_alloc_failed(gfp_mask,order, NULL);
got_pg:
returnpage;
}
__alloc_pages_slowpath
原创
©著作权归作者所有:来自51CTO博客作者sunlei0625的原创作品,请联系作者获取转载授权,否则将追究法律责任
提问和评论都可以,用心的回复会被更多人看到
评论
发布评论
相关文章
-
【Linux 内核 内存管理】物理分配页 ⑨ ( __alloc_pages_slowpath 慢速路径调用函数源码分析 | retry 标号代码分析 )
一、retry 标号代码分析、二、retry 标号完整代码
Linux内核 内存管理 物理页分配 分区伙伴分配器 慢速路径 -
pmd_alloc/pte_alloc/pud_alloc设置页表
pte_t;} pmd_t;} pud_t;
前端 linux 服务器 #define #include