/*
 * This is the'heart' of the zoned buddy allocator.
 */
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned intorder,
                     structzonelist *zonelist, nodemask_t *nodemask)
{
       structpage *page;
       unsignedint cpuset_mems_cookie;
       unsignedint alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
       gfp_talloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
       structalloc_context ac = {
              .high_zoneidx= gfp_zone(gfp_mask),
              .zonelist= zonelist,
              .nodemask= nodemask,
              .migratetype =gfpflags_to_migratetype(gfp_mask),
       };

       if(cpusets_enabled()) {
              alloc_mask|= __GFP_HARDWALL;
              alloc_flags|= ALLOC_CPUSET;
              if(!ac.nodemask)
                     ac.nodemask= &cpuset_current_mems_allowed;
       }

       gfp_mask&= gfp_allowed_mask;

       lockdep_trace_alloc(gfp_mask);

       might_sleep_if(gfp_mask& __GFP_DIRECT_RECLAIM);

       if(should_fail_alloc_page(gfp_mask, order))
              returnNULL;

       /*
        * Check the zones suitable for the gfp_maskcontain at least one
        * valid zone. It's possible to have an emptyzonelist as a result
        * of __GFP_THISNODE and a memoryless node
        */
       if(unlikely(!zonelist->_zonerefs->zone))
              returnNULL;

       if(IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
              alloc_flags|= ALLOC_CMA;

retry_cpuset:
       cpuset_mems_cookie= read_mems_allowed_begin();

       /* Dirtyzone balancing only done in the fast path */
       ac.spread_dirty_pages= (gfp_mask & __GFP_WRITE);

       /*
        * The preferred zone is used for statisticsbut crucially it is
        * also used as the starting point for thezonelist iterator. It
        * may get reset for allocations that ignorememory policies.
        */
       ac.preferred_zoneref= first_zones_zonelist(ac.zonelist,
                                   ac.high_zoneidx,ac.nodemask);
       if(!ac.preferred_zoneref) {
              page= NULL;
              gotono_zone;
       }

       /* Firstallocation attempt */
       page =get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
       if(likely(page))
              gotoout;

       /*
        * Runtime PM, block IO and its error handlingpath can deadlock
        * because I/O on the device might notcomplete.
        */
       alloc_mask= memalloc_noio_flags(gfp_mask);
       ac.spread_dirty_pages= false;

       /*
        * Restore the original nodemask if it waspotentially replaced with
        * &cpuset_current_mems_allowed to optimizethe fast-path attempt.
        */
       if(cpusets_enabled())
              ac.nodemask= nodemask;
       page =__alloc_pages_slowpath(alloc_mask, order, &ac);

no_zone:
       /*
        * When updating a task's mems_allowed, it ispossible to race with
        * parallel threads in such a way that an allocationcan fail while
        * the mask is being updated. If a pageallocation is about to fail,
        * check if the cpuset changed duringallocation and if so, retry.
        */
       if(unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
              alloc_mask= gfp_mask;
              gotoretry_cpuset;
       }

out:
       if(kmemcheck_enabled && page)
              kmemcheck_pagealloc_alloc(page,order, gfp_mask);

       trace_mm_page_alloc(page,order, alloc_mask, ac.migratetype);

       returnpage;
}