percpu.c 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312
  1. /*
  2. * mm/percpu.c - percpu memory allocator
  3. *
  4. * Copyright (C) 2009 SUSE Linux Products GmbH
  5. * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
  6. *
  7. * This file is released under the GPLv2.
  8. *
  9. * This is percpu allocator which can handle both static and dynamic
  10. * areas. Percpu areas are allocated in chunks. Each chunk is
  11. * consisted of boot-time determined number of units and the first
  12. * chunk is used for static percpu variables in the kernel image
  13. * (special boot time alloc/init handling necessary as these areas
  14. * need to be brought up before allocation services are running).
  15. * Unit grows as necessary and all units grow or shrink in unison.
  16. * When a chunk is filled up, another chunk is allocated.
  17. *
  18. * c0 c1 c2
  19. * ------------------- ------------------- ------------
  20. * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
  21. * ------------------- ...... ------------------- .... ------------
  22. *
  23. * Allocation is done in offset-size areas of single unit space. Ie,
  24. * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
  25. * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
  26. * cpus. On NUMA, the mapping can be non-linear and even sparse.
  27. * Percpu access can be done by configuring percpu base registers
  28. * according to cpu to unit mapping and pcpu_unit_size.
  29. *
  30. * There are usually many small percpu allocations many of them being
  31. * as small as 4 bytes. The allocator organizes chunks into lists
  32. * according to free size and tries to allocate from the fullest one.
  33. * Each chunk keeps the maximum contiguous area size hint which is
  34. * guaranteed to be equal to or larger than the maximum contiguous
  35. * area in the chunk. This helps the allocator not to iterate the
  36. * chunk maps unnecessarily.
  37. *
  38. * Allocation state in each chunk is kept using an array of integers
  39. * on chunk->map. A positive value in the map represents a free
  40. * region and negative allocated. Allocation inside a chunk is done
  41. * by scanning this map sequentially and serving the first matching
  42. * entry. This is mostly copied from the percpu_modalloc() allocator.
  43. * Chunks can be determined from the address using the index field
  44. * in the page struct. The index field contains a pointer to the chunk.
  45. *
  46. * To use this allocator, arch code should do the followings.
  47. *
  48. * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  49. * regular address to percpu pointer and back if they need to be
  50. * different from the default
  51. *
  52. * - use pcpu_setup_first_chunk() during percpu area initialization to
  53. * setup the first chunk containing the kernel static percpu area
  54. */
  55. #include <linux/bitmap.h>
  56. #include <linux/bootmem.h>
  57. #include <linux/err.h>
  58. #include <linux/list.h>
  59. #include <linux/log2.h>
  60. #include <linux/mm.h>
  61. #include <linux/module.h>
  62. #include <linux/mutex.h>
  63. #include <linux/percpu.h>
  64. #include <linux/pfn.h>
  65. #include <linux/slab.h>
  66. #include <linux/spinlock.h>
  67. #include <linux/vmalloc.h>
  68. #include <linux/workqueue.h>
  69. #include <linux/kmemleak.h>
  70. #include <asm/cacheflush.h>
  71. #include <asm/sections.h>
  72. #include <asm/tlbflush.h>
  73. #include <asm/io.h>
  74. #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
  75. #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
  76. #define PCPU_ATOMIC_MAP_MARGIN_LOW 32
  77. #define PCPU_ATOMIC_MAP_MARGIN_HIGH 64
  78. #define PCPU_EMPTY_POP_PAGES_LOW 2
  79. #define PCPU_EMPTY_POP_PAGES_HIGH 4
  80. #ifdef CONFIG_SMP
  81. /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
  82. #ifndef __addr_to_pcpu_ptr
  83. #define __addr_to_pcpu_ptr(addr) \
  84. (void __percpu *)((unsigned long)(addr) - \
  85. (unsigned long)pcpu_base_addr + \
  86. (unsigned long)__per_cpu_start)
  87. #endif
  88. #ifndef __pcpu_ptr_to_addr
  89. #define __pcpu_ptr_to_addr(ptr) \
  90. (void __force *)((unsigned long)(ptr) + \
  91. (unsigned long)pcpu_base_addr - \
  92. (unsigned long)__per_cpu_start)
  93. #endif
  94. #else /* CONFIG_SMP */
  95. /* on UP, it's always identity mapped */
  96. #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
  97. #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
  98. #endif /* CONFIG_SMP */
  99. struct pcpu_chunk {
  100. struct list_head list; /* linked to pcpu_slot lists */
  101. int free_size; /* free bytes in the chunk */
  102. int contig_hint; /* max contiguous size hint */
  103. void *base_addr; /* base address of this chunk */
  104. int map_used; /* # of map entries used before the sentry */
  105. int map_alloc; /* # of map entries allocated */
  106. int *map; /* allocation map */
  107. struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
  108. void *data; /* chunk data */
  109. int first_free; /* no free below this */
  110. bool immutable; /* no [de]population allowed */
  111. int nr_populated; /* # of populated pages */
  112. unsigned long populated[]; /* populated bitmap */
  113. };
  114. static int pcpu_unit_pages __read_mostly;
  115. static int pcpu_unit_size __read_mostly;
  116. static int pcpu_nr_units __read_mostly;
  117. static int pcpu_atom_size __read_mostly;
  118. static int pcpu_nr_slots __read_mostly;
  119. static size_t pcpu_chunk_struct_size __read_mostly;
  120. /* cpus with the lowest and highest unit addresses */
  121. static unsigned int pcpu_low_unit_cpu __read_mostly;
  122. static unsigned int pcpu_high_unit_cpu __read_mostly;
  123. /* the address of the first chunk which starts with the kernel static area */
  124. void *pcpu_base_addr __read_mostly;
  125. EXPORT_SYMBOL_GPL(pcpu_base_addr);
  126. static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
  127. const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
  128. /* group information, used for vm allocation */
  129. static int pcpu_nr_groups __read_mostly;
  130. static const unsigned long *pcpu_group_offsets __read_mostly;
  131. static const size_t *pcpu_group_sizes __read_mostly;
  132. /*
  133. * The first chunk which always exists. Note that unlike other
  134. * chunks, this one can be allocated and mapped in several different
  135. * ways and thus often doesn't live in the vmalloc area.
  136. */
  137. static struct pcpu_chunk *pcpu_first_chunk;
  138. /*
  139. * Optional reserved chunk. This chunk reserves part of the first
  140. * chunk and serves it for reserved allocations. The amount of
  141. * reserved offset is in pcpu_reserved_chunk_limit. When reserved
  142. * area doesn't exist, the following variables contain NULL and 0
  143. * respectively.
  144. */
  145. static struct pcpu_chunk *pcpu_reserved_chunk;
  146. static int pcpu_reserved_chunk_limit;
  147. static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
  148. static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
  149. static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
  150. /* chunks which need their map areas extended, protected by pcpu_lock */
  151. static LIST_HEAD(pcpu_map_extend_chunks);
  152. /*
  153. * The number of empty populated pages, protected by pcpu_lock. The
  154. * reserved chunk doesn't contribute to the count.
  155. */
  156. static int pcpu_nr_empty_pop_pages;
  157. /*
  158. * Balance work is used to populate or destroy chunks asynchronously. We
  159. * try to keep the number of populated free pages between
  160. * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
  161. * empty chunk.
  162. */
  163. static void pcpu_balance_workfn(struct work_struct *work);
  164. static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
  165. static bool pcpu_async_enabled __read_mostly;
  166. static bool pcpu_atomic_alloc_failed;
  167. static void pcpu_schedule_balance_work(void)
  168. {
  169. if (pcpu_async_enabled)
  170. schedule_work(&pcpu_balance_work);
  171. }
  172. static bool pcpu_addr_in_first_chunk(void *addr)
  173. {
  174. void *first_start = pcpu_first_chunk->base_addr;
  175. return addr >= first_start && addr < first_start + pcpu_unit_size;
  176. }
  177. static bool pcpu_addr_in_reserved_chunk(void *addr)
  178. {
  179. void *first_start = pcpu_first_chunk->base_addr;
  180. return addr >= first_start &&
  181. addr < first_start + pcpu_reserved_chunk_limit;
  182. }
  183. static int __pcpu_size_to_slot(int size)
  184. {
  185. int highbit = fls(size); /* size is in bytes */
  186. return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
  187. }
  188. static int pcpu_size_to_slot(int size)
  189. {
  190. if (size == pcpu_unit_size)
  191. return pcpu_nr_slots - 1;
  192. return __pcpu_size_to_slot(size);
  193. }
  194. static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
  195. {
  196. if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
  197. return 0;
  198. return pcpu_size_to_slot(chunk->free_size);
  199. }
  200. /* set the pointer to a chunk in a page struct */
  201. static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
  202. {
  203. page->index = (unsigned long)pcpu;
  204. }
  205. /* obtain pointer to a chunk from a page struct */
  206. static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
  207. {
  208. return (struct pcpu_chunk *)page->index;
  209. }
  210. static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
  211. {
  212. return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
  213. }
  214. static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
  215. unsigned int cpu, int page_idx)
  216. {
  217. return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
  218. (page_idx << PAGE_SHIFT);
  219. }
  220. static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
  221. int *rs, int *re, int end)
  222. {
  223. *rs = find_next_zero_bit(chunk->populated, end, *rs);
  224. *re = find_next_bit(chunk->populated, end, *rs + 1);
  225. }
  226. static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
  227. int *rs, int *re, int end)
  228. {
  229. *rs = find_next_bit(chunk->populated, end, *rs);
  230. *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
  231. }
  232. /*
  233. * (Un)populated page region iterators. Iterate over (un)populated
  234. * page regions between @start and @end in @chunk. @rs and @re should
  235. * be integer variables and will be set to start and end page index of
  236. * the current region.
  237. */
  238. #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
  239. for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
  240. (rs) < (re); \
  241. (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
  242. #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
  243. for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
  244. (rs) < (re); \
  245. (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
  246. /**
  247. * pcpu_mem_zalloc - allocate memory
  248. * @size: bytes to allocate
  249. *
  250. * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
  251. * kzalloc() is used; otherwise, vzalloc() is used. The returned
  252. * memory is always zeroed.
  253. *
  254. * CONTEXT:
  255. * Does GFP_KERNEL allocation.
  256. *
  257. * RETURNS:
  258. * Pointer to the allocated area on success, NULL on failure.
  259. */
  260. static void *pcpu_mem_zalloc(size_t size)
  261. {
  262. if (WARN_ON_ONCE(!slab_is_available()))
  263. return NULL;
  264. if (size <= PAGE_SIZE)
  265. return kzalloc(size, GFP_KERNEL);
  266. else
  267. return vzalloc(size);
  268. }
  269. /**
  270. * pcpu_mem_free - free memory
  271. * @ptr: memory to free
  272. * @size: size of the area
  273. *
  274. * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
  275. */
  276. static void pcpu_mem_free(void *ptr, size_t size)
  277. {
  278. if (size <= PAGE_SIZE)
  279. kfree(ptr);
  280. else
  281. vfree(ptr);
  282. }
  283. /**
  284. * pcpu_count_occupied_pages - count the number of pages an area occupies
  285. * @chunk: chunk of interest
  286. * @i: index of the area in question
  287. *
  288. * Count the number of pages chunk's @i'th area occupies. When the area's
  289. * start and/or end address isn't aligned to page boundary, the straddled
  290. * page is included in the count iff the rest of the page is free.
  291. */
  292. static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
  293. {
  294. int off = chunk->map[i] & ~1;
  295. int end = chunk->map[i + 1] & ~1;
  296. if (!PAGE_ALIGNED(off) && i > 0) {
  297. int prev = chunk->map[i - 1];
  298. if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
  299. off = round_down(off, PAGE_SIZE);
  300. }
  301. if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
  302. int next = chunk->map[i + 1];
  303. int nend = chunk->map[i + 2] & ~1;
  304. if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
  305. end = round_up(end, PAGE_SIZE);
  306. }
  307. return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
  308. }
  309. /**
  310. * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
  311. * @chunk: chunk of interest
  312. * @oslot: the previous slot it was on
  313. *
  314. * This function is called after an allocation or free changed @chunk.
  315. * New slot according to the changed state is determined and @chunk is
  316. * moved to the slot. Note that the reserved chunk is never put on
  317. * chunk slots.
  318. *
  319. * CONTEXT:
  320. * pcpu_lock.
  321. */
  322. static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
  323. {
  324. int nslot = pcpu_chunk_slot(chunk);
  325. if (chunk != pcpu_reserved_chunk && oslot != nslot) {
  326. if (oslot < nslot)
  327. list_move(&chunk->list, &pcpu_slot[nslot]);
  328. else
  329. list_move_tail(&chunk->list, &pcpu_slot[nslot]);
  330. }
  331. }
  332. /**
  333. * pcpu_need_to_extend - determine whether chunk area map needs to be extended
  334. * @chunk: chunk of interest
  335. * @is_atomic: the allocation context
  336. *
  337. * Determine whether area map of @chunk needs to be extended. If
  338. * @is_atomic, only the amount necessary for a new allocation is
  339. * considered; however, async extension is scheduled if the left amount is
  340. * low. If !@is_atomic, it aims for more empty space. Combined, this
  341. * ensures that the map is likely to have enough available space to
  342. * accomodate atomic allocations which can't extend maps directly.
  343. *
  344. * CONTEXT:
  345. * pcpu_lock.
  346. *
  347. * RETURNS:
  348. * New target map allocation length if extension is necessary, 0
  349. * otherwise.
  350. */
  351. static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
  352. {
  353. int margin, new_alloc;
  354. lockdep_assert_held(&pcpu_lock);
  355. if (is_atomic) {
  356. margin = 3;
  357. if (chunk->map_alloc <
  358. chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
  359. if (list_empty(&chunk->map_extend_list)) {
  360. list_add_tail(&chunk->map_extend_list,
  361. &pcpu_map_extend_chunks);
  362. pcpu_schedule_balance_work();
  363. }
  364. }
  365. } else {
  366. margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
  367. }
  368. if (chunk->map_alloc >= chunk->map_used + margin)
  369. return 0;
  370. new_alloc = PCPU_DFL_MAP_ALLOC;
  371. while (new_alloc < chunk->map_used + margin)
  372. new_alloc *= 2;
  373. return new_alloc;
  374. }
  375. /**
  376. * pcpu_extend_area_map - extend area map of a chunk
  377. * @chunk: chunk of interest
  378. * @new_alloc: new target allocation length of the area map
  379. *
  380. * Extend area map of @chunk to have @new_alloc entries.
  381. *
  382. * CONTEXT:
  383. * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
  384. *
  385. * RETURNS:
  386. * 0 on success, -errno on failure.
  387. */
  388. static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
  389. {
  390. int *old = NULL, *new = NULL;
  391. size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
  392. unsigned long flags;
  393. lockdep_assert_held(&pcpu_alloc_mutex);
  394. new = pcpu_mem_zalloc(new_size);
  395. if (!new)
  396. return -ENOMEM;
  397. /* acquire pcpu_lock and switch to new area map */
  398. spin_lock_irqsave(&pcpu_lock, flags);
  399. if (new_alloc <= chunk->map_alloc)
  400. goto out_unlock;
  401. old_size = chunk->map_alloc * sizeof(chunk->map[0]);
  402. old = chunk->map;
  403. memcpy(new, old, old_size);
  404. chunk->map_alloc = new_alloc;
  405. chunk->map = new;
  406. new = NULL;
  407. out_unlock:
  408. spin_unlock_irqrestore(&pcpu_lock, flags);
  409. /*
  410. * pcpu_mem_free() might end up calling vfree() which uses
  411. * IRQ-unsafe lock and thus can't be called under pcpu_lock.
  412. */
  413. pcpu_mem_free(old, old_size);
  414. pcpu_mem_free(new, new_size);
  415. return 0;
  416. }
  417. /**
  418. * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
  419. * @chunk: chunk the candidate area belongs to
  420. * @off: the offset to the start of the candidate area
  421. * @this_size: the size of the candidate area
  422. * @size: the size of the target allocation
  423. * @align: the alignment of the target allocation
  424. * @pop_only: only allocate from already populated region
  425. *
  426. * We're trying to allocate @size bytes aligned at @align. @chunk's area
  427. * at @off sized @this_size is a candidate. This function determines
  428. * whether the target allocation fits in the candidate area and returns the
  429. * number of bytes to pad after @off. If the target area doesn't fit, -1
  430. * is returned.
  431. *
  432. * If @pop_only is %true, this function only considers the already
  433. * populated part of the candidate area.
  434. */
  435. static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
  436. int size, int align, bool pop_only)
  437. {
  438. int cand_off = off;
  439. while (true) {
  440. int head = ALIGN(cand_off, align) - off;
  441. int page_start, page_end, rs, re;
  442. if (this_size < head + size)
  443. return -1;
  444. if (!pop_only)
  445. return head;
  446. /*
  447. * If the first unpopulated page is beyond the end of the
  448. * allocation, the whole allocation is populated;
  449. * otherwise, retry from the end of the unpopulated area.
  450. */
  451. page_start = PFN_DOWN(head + off);
  452. page_end = PFN_UP(head + off + size);
  453. rs = page_start;
  454. pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
  455. if (rs >= page_end)
  456. return head;
  457. cand_off = re * PAGE_SIZE;
  458. }
  459. }
  460. /**
  461. * pcpu_alloc_area - allocate area from a pcpu_chunk
  462. * @chunk: chunk of interest
  463. * @size: wanted size in bytes
  464. * @align: wanted align
  465. * @pop_only: allocate only from the populated area
  466. * @occ_pages_p: out param for the number of pages the area occupies
  467. *
  468. * Try to allocate @size bytes area aligned at @align from @chunk.
  469. * Note that this function only allocates the offset. It doesn't
  470. * populate or map the area.
  471. *
  472. * @chunk->map must have at least two free slots.
  473. *
  474. * CONTEXT:
  475. * pcpu_lock.
  476. *
  477. * RETURNS:
  478. * Allocated offset in @chunk on success, -1 if no matching area is
  479. * found.
  480. */
  481. static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
  482. bool pop_only, int *occ_pages_p)
  483. {
  484. int oslot = pcpu_chunk_slot(chunk);
  485. int max_contig = 0;
  486. int i, off;
  487. bool seen_free = false;
  488. int *p;
  489. for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
  490. int head, tail;
  491. int this_size;
  492. off = *p;
  493. if (off & 1)
  494. continue;
  495. this_size = (p[1] & ~1) - off;
  496. head = pcpu_fit_in_area(chunk, off, this_size, size, align,
  497. pop_only);
  498. if (head < 0) {
  499. if (!seen_free) {
  500. chunk->first_free = i;
  501. seen_free = true;
  502. }
  503. max_contig = max(this_size, max_contig);
  504. continue;
  505. }
  506. /*
  507. * If head is small or the previous block is free,
  508. * merge'em. Note that 'small' is defined as smaller
  509. * than sizeof(int), which is very small but isn't too
  510. * uncommon for percpu allocations.
  511. */
  512. if (head && (head < sizeof(int) || !(p[-1] & 1))) {
  513. *p = off += head;
  514. if (p[-1] & 1)
  515. chunk->free_size -= head;
  516. else
  517. max_contig = max(*p - p[-1], max_contig);
  518. this_size -= head;
  519. head = 0;
  520. }
  521. /* if tail is small, just keep it around */
  522. tail = this_size - head - size;
  523. if (tail < sizeof(int)) {
  524. tail = 0;
  525. size = this_size - head;
  526. }
  527. /* split if warranted */
  528. if (head || tail) {
  529. int nr_extra = !!head + !!tail;
  530. /* insert new subblocks */
  531. memmove(p + nr_extra + 1, p + 1,
  532. sizeof(chunk->map[0]) * (chunk->map_used - i));
  533. chunk->map_used += nr_extra;
  534. if (head) {
  535. if (!seen_free) {
  536. chunk->first_free = i;
  537. seen_free = true;
  538. }
  539. *++p = off += head;
  540. ++i;
  541. max_contig = max(head, max_contig);
  542. }
  543. if (tail) {
  544. p[1] = off + size;
  545. max_contig = max(tail, max_contig);
  546. }
  547. }
  548. if (!seen_free)
  549. chunk->first_free = i + 1;
  550. /* update hint and mark allocated */
  551. if (i + 1 == chunk->map_used)
  552. chunk->contig_hint = max_contig; /* fully scanned */
  553. else
  554. chunk->contig_hint = max(chunk->contig_hint,
  555. max_contig);
  556. chunk->free_size -= size;
  557. *p |= 1;
  558. *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
  559. pcpu_chunk_relocate(chunk, oslot);
  560. return off;
  561. }
  562. chunk->contig_hint = max_contig; /* fully scanned */
  563. pcpu_chunk_relocate(chunk, oslot);
  564. /* tell the upper layer that this chunk has no matching area */
  565. return -1;
  566. }
  567. /**
  568. * pcpu_free_area - free area to a pcpu_chunk
  569. * @chunk: chunk of interest
  570. * @freeme: offset of area to free
  571. * @occ_pages_p: out param for the number of pages the area occupies
  572. *
  573. * Free area starting from @freeme to @chunk. Note that this function
  574. * only modifies the allocation map. It doesn't depopulate or unmap
  575. * the area.
  576. *
  577. * CONTEXT:
  578. * pcpu_lock.
  579. */
  580. static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
  581. int *occ_pages_p)
  582. {
  583. int oslot = pcpu_chunk_slot(chunk);
  584. int off = 0;
  585. unsigned i, j;
  586. int to_free = 0;
  587. int *p;
  588. freeme |= 1; /* we are searching for <given offset, in use> pair */
  589. i = 0;
  590. j = chunk->map_used;
  591. while (i != j) {
  592. unsigned k = (i + j) / 2;
  593. off = chunk->map[k];
  594. if (off < freeme)
  595. i = k + 1;
  596. else if (off > freeme)
  597. j = k;
  598. else
  599. i = j = k;
  600. }
  601. BUG_ON(off != freeme);
  602. if (i < chunk->first_free)
  603. chunk->first_free = i;
  604. p = chunk->map + i;
  605. *p = off &= ~1;
  606. chunk->free_size += (p[1] & ~1) - off;
  607. *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
  608. /* merge with next? */
  609. if (!(p[1] & 1))
  610. to_free++;
  611. /* merge with previous? */
  612. if (i > 0 && !(p[-1] & 1)) {
  613. to_free++;
  614. i--;
  615. p--;
  616. }
  617. if (to_free) {
  618. chunk->map_used -= to_free;
  619. memmove(p + 1, p + 1 + to_free,
  620. (chunk->map_used - i) * sizeof(chunk->map[0]));
  621. }
  622. chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
  623. pcpu_chunk_relocate(chunk, oslot);
  624. }
  625. static struct pcpu_chunk *pcpu_alloc_chunk(void)
  626. {
  627. struct pcpu_chunk *chunk;
  628. chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
  629. if (!chunk)
  630. return NULL;
  631. chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
  632. sizeof(chunk->map[0]));
  633. if (!chunk->map) {
  634. pcpu_mem_free(chunk, pcpu_chunk_struct_size);
  635. return NULL;
  636. }
  637. chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
  638. chunk->map[0] = 0;
  639. chunk->map[1] = pcpu_unit_size | 1;
  640. chunk->map_used = 1;
  641. INIT_LIST_HEAD(&chunk->list);
  642. INIT_LIST_HEAD(&chunk->map_extend_list);
  643. chunk->free_size = pcpu_unit_size;
  644. chunk->contig_hint = pcpu_unit_size;
  645. return chunk;
  646. }
  647. static void pcpu_free_chunk(struct pcpu_chunk *chunk)
  648. {
  649. if (!chunk)
  650. return;
  651. pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
  652. pcpu_mem_free(chunk, pcpu_chunk_struct_size);
  653. }
  654. /**
  655. * pcpu_chunk_populated - post-population bookkeeping
  656. * @chunk: pcpu_chunk which got populated
  657. * @page_start: the start page
  658. * @page_end: the end page
  659. *
  660. * Pages in [@page_start,@page_end) have been populated to @chunk. Update
  661. * the bookkeeping information accordingly. Must be called after each
  662. * successful population.
  663. */
  664. static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
  665. int page_start, int page_end)
  666. {
  667. int nr = page_end - page_start;
  668. lockdep_assert_held(&pcpu_lock);
  669. bitmap_set(chunk->populated, page_start, nr);
  670. chunk->nr_populated += nr;
  671. pcpu_nr_empty_pop_pages += nr;
  672. }
  673. /**
  674. * pcpu_chunk_depopulated - post-depopulation bookkeeping
  675. * @chunk: pcpu_chunk which got depopulated
  676. * @page_start: the start page
  677. * @page_end: the end page
  678. *
  679. * Pages in [@page_start,@page_end) have been depopulated from @chunk.
  680. * Update the bookkeeping information accordingly. Must be called after
  681. * each successful depopulation.
  682. */
  683. static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
  684. int page_start, int page_end)
  685. {
  686. int nr = page_end - page_start;
  687. lockdep_assert_held(&pcpu_lock);
  688. bitmap_clear(chunk->populated, page_start, nr);
  689. chunk->nr_populated -= nr;
  690. pcpu_nr_empty_pop_pages -= nr;
  691. }
  692. /*
  693. * Chunk management implementation.
  694. *
  695. * To allow different implementations, chunk alloc/free and
  696. * [de]population are implemented in a separate file which is pulled
  697. * into this file and compiled together. The following functions
  698. * should be implemented.
  699. *
  700. * pcpu_populate_chunk - populate the specified range of a chunk
  701. * pcpu_depopulate_chunk - depopulate the specified range of a chunk
  702. * pcpu_create_chunk - create a new chunk
  703. * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
  704. * pcpu_addr_to_page - translate address to physical address
  705. * pcpu_verify_alloc_info - check alloc_info is acceptable during init
  706. */
  707. static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
  708. static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
  709. static struct pcpu_chunk *pcpu_create_chunk(void);
  710. static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
  711. static struct page *pcpu_addr_to_page(void *addr);
  712. static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
  713. #ifdef CONFIG_NEED_PER_CPU_KM
  714. #include "percpu-km.c"
  715. #else
  716. #include "percpu-vm.c"
  717. #endif
  718. /**
  719. * pcpu_chunk_addr_search - determine chunk containing specified address
  720. * @addr: address for which the chunk needs to be determined.
  721. *
  722. * RETURNS:
  723. * The address of the found chunk.
  724. */
  725. static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
  726. {
  727. /* is it in the first chunk? */
  728. if (pcpu_addr_in_first_chunk(addr)) {
  729. /* is it in the reserved area? */
  730. if (pcpu_addr_in_reserved_chunk(addr))
  731. return pcpu_reserved_chunk;
  732. return pcpu_first_chunk;
  733. }
  734. /*
  735. * The address is relative to unit0 which might be unused and
  736. * thus unmapped. Offset the address to the unit space of the
  737. * current processor before looking it up in the vmalloc
  738. * space. Note that any possible cpu id can be used here, so
  739. * there's no need to worry about preemption or cpu hotplug.
  740. */
  741. addr += pcpu_unit_offsets[raw_smp_processor_id()];
  742. return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
  743. }
  744. /**
  745. * pcpu_alloc - the percpu allocator
  746. * @size: size of area to allocate in bytes
  747. * @align: alignment of area (max PAGE_SIZE)
  748. * @reserved: allocate from the reserved chunk if available
  749. * @gfp: allocation flags
  750. *
  751. * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
  752. * contain %GFP_KERNEL, the allocation is atomic.
  753. *
  754. * RETURNS:
  755. * Percpu pointer to the allocated area on success, NULL on failure.
  756. */
  757. static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
  758. gfp_t gfp)
  759. {
  760. static int warn_limit = 10;
  761. struct pcpu_chunk *chunk;
  762. const char *err;
  763. bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
  764. int occ_pages = 0;
  765. int slot, off, new_alloc, cpu, ret;
  766. unsigned long flags;
  767. void __percpu *ptr;
  768. /*
  769. * We want the lowest bit of offset available for in-use/free
  770. * indicator, so force >= 16bit alignment and make size even.
  771. */
  772. if (unlikely(align < 2))
  773. align = 2;
  774. size = ALIGN(size, 2);
  775. if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
  776. WARN(true, "illegal size (%zu) or align (%zu) for "
  777. "percpu allocation\n", size, align);
  778. return NULL;
  779. }
  780. if (!is_atomic)
  781. mutex_lock(&pcpu_alloc_mutex);
  782. spin_lock_irqsave(&pcpu_lock, flags);
  783. /* serve reserved allocations from the reserved chunk if available */
  784. if (reserved && pcpu_reserved_chunk) {
  785. chunk = pcpu_reserved_chunk;
  786. if (size > chunk->contig_hint) {
  787. err = "alloc from reserved chunk failed";
  788. goto fail_unlock;
  789. }
  790. while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
  791. spin_unlock_irqrestore(&pcpu_lock, flags);
  792. if (is_atomic ||
  793. pcpu_extend_area_map(chunk, new_alloc) < 0) {
  794. err = "failed to extend area map of reserved chunk";
  795. goto fail;
  796. }
  797. spin_lock_irqsave(&pcpu_lock, flags);
  798. }
  799. off = pcpu_alloc_area(chunk, size, align, is_atomic,
  800. &occ_pages);
  801. if (off >= 0)
  802. goto area_found;
  803. err = "alloc from reserved chunk failed";
  804. goto fail_unlock;
  805. }
  806. restart:
  807. /* search through normal chunks */
  808. for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
  809. list_for_each_entry(chunk, &pcpu_slot[slot], list) {
  810. if (size > chunk->contig_hint)
  811. continue;
  812. new_alloc = pcpu_need_to_extend(chunk, is_atomic);
  813. if (new_alloc) {
  814. if (is_atomic)
  815. continue;
  816. spin_unlock_irqrestore(&pcpu_lock, flags);
  817. if (pcpu_extend_area_map(chunk,
  818. new_alloc) < 0) {
  819. err = "failed to extend area map";
  820. goto fail;
  821. }
  822. spin_lock_irqsave(&pcpu_lock, flags);
  823. /*
  824. * pcpu_lock has been dropped, need to
  825. * restart cpu_slot list walking.
  826. */
  827. goto restart;
  828. }
  829. off = pcpu_alloc_area(chunk, size, align, is_atomic,
  830. &occ_pages);
  831. if (off >= 0)
  832. goto area_found;
  833. }
  834. }
  835. spin_unlock_irqrestore(&pcpu_lock, flags);
  836. /*
  837. * No space left. Create a new chunk. We don't want multiple
  838. * tasks to create chunks simultaneously. Serialize and create iff
  839. * there's still no empty chunk after grabbing the mutex.
  840. */
  841. if (is_atomic)
  842. goto fail;
  843. if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
  844. chunk = pcpu_create_chunk();
  845. if (!chunk) {
  846. err = "failed to allocate new chunk";
  847. goto fail;
  848. }
  849. spin_lock_irqsave(&pcpu_lock, flags);
  850. pcpu_chunk_relocate(chunk, -1);
  851. } else {
  852. spin_lock_irqsave(&pcpu_lock, flags);
  853. }
  854. goto restart;
  855. area_found:
  856. spin_unlock_irqrestore(&pcpu_lock, flags);
  857. /* populate if not all pages are already there */
  858. if (!is_atomic) {
  859. int page_start, page_end, rs, re;
  860. page_start = PFN_DOWN(off);
  861. page_end = PFN_UP(off + size);
  862. pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
  863. WARN_ON(chunk->immutable);
  864. ret = pcpu_populate_chunk(chunk, rs, re);
  865. spin_lock_irqsave(&pcpu_lock, flags);
  866. if (ret) {
  867. pcpu_free_area(chunk, off, &occ_pages);
  868. err = "failed to populate";
  869. goto fail_unlock;
  870. }
  871. pcpu_chunk_populated(chunk, rs, re);
  872. spin_unlock_irqrestore(&pcpu_lock, flags);
  873. }
  874. mutex_unlock(&pcpu_alloc_mutex);
  875. }
  876. if (chunk != pcpu_reserved_chunk)
  877. pcpu_nr_empty_pop_pages -= occ_pages;
  878. if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
  879. pcpu_schedule_balance_work();
  880. /* clear the areas and return address relative to base address */
  881. for_each_possible_cpu(cpu)
  882. memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
  883. ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
  884. kmemleak_alloc_percpu(ptr, size, gfp);
  885. return ptr;
  886. fail_unlock:
  887. spin_unlock_irqrestore(&pcpu_lock, flags);
  888. fail:
  889. if (!is_atomic && warn_limit) {
  890. pr_warning("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n",
  891. size, align, is_atomic, err);
  892. dump_stack();
  893. if (!--warn_limit)
  894. pr_info("PERCPU: limit reached, disable warning\n");
  895. }
  896. if (is_atomic) {
  897. /* see the flag handling in pcpu_blance_workfn() */
  898. pcpu_atomic_alloc_failed = true;
  899. pcpu_schedule_balance_work();
  900. } else {
  901. mutex_unlock(&pcpu_alloc_mutex);
  902. }
  903. return NULL;
  904. }
  905. /**
  906. * __alloc_percpu_gfp - allocate dynamic percpu area
  907. * @size: size of area to allocate in bytes
  908. * @align: alignment of area (max PAGE_SIZE)
  909. * @gfp: allocation flags
  910. *
  911. * Allocate zero-filled percpu area of @size bytes aligned at @align. If
  912. * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
  913. * be called from any context but is a lot more likely to fail.
  914. *
  915. * RETURNS:
  916. * Percpu pointer to the allocated area on success, NULL on failure.
  917. */
  918. void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
  919. {
  920. return pcpu_alloc(size, align, false, gfp);
  921. }
  922. EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
  923. /**
  924. * __alloc_percpu - allocate dynamic percpu area
  925. * @size: size of area to allocate in bytes
  926. * @align: alignment of area (max PAGE_SIZE)
  927. *
  928. * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
  929. */
  930. void __percpu *__alloc_percpu(size_t size, size_t align)
  931. {
  932. return pcpu_alloc(size, align, false, GFP_KERNEL);
  933. }
  934. EXPORT_SYMBOL_GPL(__alloc_percpu);
  935. /**
  936. * __alloc_reserved_percpu - allocate reserved percpu area
  937. * @size: size of area to allocate in bytes
  938. * @align: alignment of area (max PAGE_SIZE)
  939. *
  940. * Allocate zero-filled percpu area of @size bytes aligned at @align
  941. * from reserved percpu area if arch has set it up; otherwise,
  942. * allocation is served from the same dynamic area. Might sleep.
  943. * Might trigger writeouts.
  944. *
  945. * CONTEXT:
  946. * Does GFP_KERNEL allocation.
  947. *
  948. * RETURNS:
  949. * Percpu pointer to the allocated area on success, NULL on failure.
  950. */
  951. void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
  952. {
  953. return pcpu_alloc(size, align, true, GFP_KERNEL);
  954. }
  955. /**
  956. * pcpu_balance_workfn - manage the amount of free chunks and populated pages
  957. * @work: unused
  958. *
  959. * Reclaim all fully free chunks except for the first one.
  960. */
  961. static void pcpu_balance_workfn(struct work_struct *work)
  962. {
  963. LIST_HEAD(to_free);
  964. struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
  965. struct pcpu_chunk *chunk, *next;
  966. int slot, nr_to_pop, ret;
  967. /*
  968. * There's no reason to keep around multiple unused chunks and VM
  969. * areas can be scarce. Destroy all free chunks except for one.
  970. */
  971. mutex_lock(&pcpu_alloc_mutex);
  972. spin_lock_irq(&pcpu_lock);
  973. list_for_each_entry_safe(chunk, next, free_head, list) {
  974. WARN_ON(chunk->immutable);
  975. /* spare the first one */
  976. if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
  977. continue;
  978. list_del_init(&chunk->map_extend_list);
  979. list_move(&chunk->list, &to_free);
  980. }
  981. spin_unlock_irq(&pcpu_lock);
  982. list_for_each_entry_safe(chunk, next, &to_free, list) {
  983. int rs, re;
  984. pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
  985. pcpu_depopulate_chunk(chunk, rs, re);
  986. spin_lock_irq(&pcpu_lock);
  987. pcpu_chunk_depopulated(chunk, rs, re);
  988. spin_unlock_irq(&pcpu_lock);
  989. }
  990. pcpu_destroy_chunk(chunk);
  991. }
  992. /* service chunks which requested async area map extension */
  993. do {
  994. int new_alloc = 0;
  995. spin_lock_irq(&pcpu_lock);
  996. chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
  997. struct pcpu_chunk, map_extend_list);
  998. if (chunk) {
  999. list_del_init(&chunk->map_extend_list);
  1000. new_alloc = pcpu_need_to_extend(chunk, false);
  1001. }
  1002. spin_unlock_irq(&pcpu_lock);
  1003. if (new_alloc)
  1004. pcpu_extend_area_map(chunk, new_alloc);
  1005. } while (chunk);
  1006. /*
  1007. * Ensure there are certain number of free populated pages for
  1008. * atomic allocs. Fill up from the most packed so that atomic
  1009. * allocs don't increase fragmentation. If atomic allocation
  1010. * failed previously, always populate the maximum amount. This
  1011. * should prevent atomic allocs larger than PAGE_SIZE from keeping
  1012. * failing indefinitely; however, large atomic allocs are not
  1013. * something we support properly and can be highly unreliable and
  1014. * inefficient.
  1015. */
  1016. retry_pop:
  1017. if (pcpu_atomic_alloc_failed) {
  1018. nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
  1019. /* best effort anyway, don't worry about synchronization */
  1020. pcpu_atomic_alloc_failed = false;
  1021. } else {
  1022. nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
  1023. pcpu_nr_empty_pop_pages,
  1024. 0, PCPU_EMPTY_POP_PAGES_HIGH);
  1025. }
  1026. for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
  1027. int nr_unpop = 0, rs, re;
  1028. if (!nr_to_pop)
  1029. break;
  1030. spin_lock_irq(&pcpu_lock);
  1031. list_for_each_entry(chunk, &pcpu_slot[slot], list) {
  1032. nr_unpop = pcpu_unit_pages - chunk->nr_populated;
  1033. if (nr_unpop)
  1034. break;
  1035. }
  1036. spin_unlock_irq(&pcpu_lock);
  1037. if (!nr_unpop)
  1038. continue;
  1039. /* @chunk can't go away while pcpu_alloc_mutex is held */
  1040. pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
  1041. int nr = min(re - rs, nr_to_pop);
  1042. ret = pcpu_populate_chunk(chunk, rs, rs + nr);
  1043. if (!ret) {
  1044. nr_to_pop -= nr;
  1045. spin_lock_irq(&pcpu_lock);
  1046. pcpu_chunk_populated(chunk, rs, rs + nr);
  1047. spin_unlock_irq(&pcpu_lock);
  1048. } else {
  1049. nr_to_pop = 0;
  1050. }
  1051. if (!nr_to_pop)
  1052. break;
  1053. }
  1054. }
  1055. if (nr_to_pop) {
  1056. /* ran out of chunks to populate, create a new one and retry */
  1057. chunk = pcpu_create_chunk();
  1058. if (chunk) {
  1059. spin_lock_irq(&pcpu_lock);
  1060. pcpu_chunk_relocate(chunk, -1);
  1061. spin_unlock_irq(&pcpu_lock);
  1062. goto retry_pop;
  1063. }
  1064. }
  1065. mutex_unlock(&pcpu_alloc_mutex);
  1066. }
  1067. /**
  1068. * free_percpu - free percpu area
  1069. * @ptr: pointer to area to free
  1070. *
  1071. * Free percpu area @ptr.
  1072. *
  1073. * CONTEXT:
  1074. * Can be called from atomic context.
  1075. */
  1076. void free_percpu(void __percpu *ptr)
  1077. {
  1078. void *addr;
  1079. struct pcpu_chunk *chunk;
  1080. unsigned long flags;
  1081. int off, occ_pages;
  1082. if (!ptr)
  1083. return;
  1084. kmemleak_free_percpu(ptr);
  1085. addr = __pcpu_ptr_to_addr(ptr);
  1086. spin_lock_irqsave(&pcpu_lock, flags);
  1087. chunk = pcpu_chunk_addr_search(addr);
  1088. off = addr - chunk->base_addr;
  1089. pcpu_free_area(chunk, off, &occ_pages);
  1090. if (chunk != pcpu_reserved_chunk)
  1091. pcpu_nr_empty_pop_pages += occ_pages;
  1092. /* if there are more than one fully free chunks, wake up grim reaper */
  1093. if (chunk->free_size == pcpu_unit_size) {
  1094. struct pcpu_chunk *pos;
  1095. list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
  1096. if (pos != chunk) {
  1097. pcpu_schedule_balance_work();
  1098. break;
  1099. }
  1100. }
  1101. spin_unlock_irqrestore(&pcpu_lock, flags);
  1102. }
  1103. EXPORT_SYMBOL_GPL(free_percpu);
  1104. /**
  1105. * is_kernel_percpu_address - test whether address is from static percpu area
  1106. * @addr: address to test
  1107. *
  1108. * Test whether @addr belongs to in-kernel static percpu area. Module
  1109. * static percpu areas are not considered. For those, use
  1110. * is_module_percpu_address().
  1111. *
  1112. * RETURNS:
  1113. * %true if @addr is from in-kernel static percpu area, %false otherwise.
  1114. */
  1115. bool is_kernel_percpu_address(unsigned long addr)
  1116. {
  1117. #ifdef CONFIG_SMP
  1118. const size_t static_size = __per_cpu_end - __per_cpu_start;
  1119. void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
  1120. unsigned int cpu;
  1121. for_each_possible_cpu(cpu) {
  1122. void *start = per_cpu_ptr(base, cpu);
  1123. if ((void *)addr >= start && (void *)addr < start + static_size)
  1124. return true;
  1125. }
  1126. #endif
  1127. /* on UP, can't distinguish from other static vars, always false */
  1128. return false;
  1129. }
  1130. /**
  1131. * per_cpu_ptr_to_phys - convert translated percpu address to physical address
  1132. * @addr: the address to be converted to physical address
  1133. *
  1134. * Given @addr which is dereferenceable address obtained via one of
  1135. * percpu access macros, this function translates it into its physical
  1136. * address. The caller is responsible for ensuring @addr stays valid
  1137. * until this function finishes.
  1138. *
  1139. * percpu allocator has special setup for the first chunk, which currently
  1140. * supports either embedding in linear address space or vmalloc mapping,
  1141. * and, from the second one, the backing allocator (currently either vm or
  1142. * km) provides translation.
  1143. *
  1144. * The addr can be tranlated simply without checking if it falls into the
  1145. * first chunk. But the current code reflects better how percpu allocator
  1146. * actually works, and the verification can discover both bugs in percpu
  1147. * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
  1148. * code.
  1149. *
  1150. * RETURNS:
  1151. * The physical address for @addr.
  1152. */
  1153. phys_addr_t per_cpu_ptr_to_phys(void *addr)
  1154. {
  1155. void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
  1156. bool in_first_chunk = false;
  1157. unsigned long first_low, first_high;
  1158. unsigned int cpu;
  1159. /*
  1160. * The following test on unit_low/high isn't strictly
  1161. * necessary but will speed up lookups of addresses which
  1162. * aren't in the first chunk.
  1163. */
  1164. first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
  1165. first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
  1166. pcpu_unit_pages);
  1167. if ((unsigned long)addr >= first_low &&
  1168. (unsigned long)addr < first_high) {
  1169. for_each_possible_cpu(cpu) {
  1170. void *start = per_cpu_ptr(base, cpu);
  1171. if (addr >= start && addr < start + pcpu_unit_size) {
  1172. in_first_chunk = true;
  1173. break;
  1174. }
  1175. }
  1176. }
  1177. if (in_first_chunk) {
  1178. if (!is_vmalloc_addr(addr))
  1179. return __pa(addr);
  1180. else
  1181. return page_to_phys(vmalloc_to_page(addr)) +
  1182. offset_in_page(addr);
  1183. } else
  1184. return page_to_phys(pcpu_addr_to_page(addr)) +
  1185. offset_in_page(addr);
  1186. }
  1187. /**
  1188. * pcpu_alloc_alloc_info - allocate percpu allocation info
  1189. * @nr_groups: the number of groups
  1190. * @nr_units: the number of units
  1191. *
  1192. * Allocate ai which is large enough for @nr_groups groups containing
  1193. * @nr_units units. The returned ai's groups[0].cpu_map points to the
  1194. * cpu_map array which is long enough for @nr_units and filled with
  1195. * NR_CPUS. It's the caller's responsibility to initialize cpu_map
  1196. * pointer of other groups.
  1197. *
  1198. * RETURNS:
  1199. * Pointer to the allocated pcpu_alloc_info on success, NULL on
  1200. * failure.
  1201. */
  1202. struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
  1203. int nr_units)
  1204. {
  1205. struct pcpu_alloc_info *ai;
  1206. size_t base_size, ai_size;
  1207. void *ptr;
  1208. int unit;
  1209. base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
  1210. __alignof__(ai->groups[0].cpu_map[0]));
  1211. ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
  1212. ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
  1213. if (!ptr)
  1214. return NULL;
  1215. ai = ptr;
  1216. ptr += base_size;
  1217. ai->groups[0].cpu_map = ptr;
  1218. for (unit = 0; unit < nr_units; unit++)
  1219. ai->groups[0].cpu_map[unit] = NR_CPUS;
  1220. ai->nr_groups = nr_groups;
  1221. ai->__ai_size = PFN_ALIGN(ai_size);
  1222. return ai;
  1223. }
  1224. /**
  1225. * pcpu_free_alloc_info - free percpu allocation info
  1226. * @ai: pcpu_alloc_info to free
  1227. *
  1228. * Free @ai which was allocated by pcpu_alloc_alloc_info().
  1229. */
  1230. void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
  1231. {
  1232. memblock_free_early(__pa(ai), ai->__ai_size);
  1233. }
  1234. /**
  1235. * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
  1236. * @lvl: loglevel
  1237. * @ai: allocation info to dump
  1238. *
  1239. * Print out information about @ai using loglevel @lvl.
  1240. */
  1241. static void pcpu_dump_alloc_info(const char *lvl,
  1242. const struct pcpu_alloc_info *ai)
  1243. {
  1244. int group_width = 1, cpu_width = 1, width;
  1245. char empty_str[] = "--------";
  1246. int alloc = 0, alloc_end = 0;
  1247. int group, v;
  1248. int upa, apl; /* units per alloc, allocs per line */
  1249. v = ai->nr_groups;
  1250. while (v /= 10)
  1251. group_width++;
  1252. v = num_possible_cpus();
  1253. while (v /= 10)
  1254. cpu_width++;
  1255. empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
  1256. upa = ai->alloc_size / ai->unit_size;
  1257. width = upa * (cpu_width + 1) + group_width + 3;
  1258. apl = rounddown_pow_of_two(max(60 / width, 1));
  1259. printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
  1260. lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
  1261. ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
  1262. for (group = 0; group < ai->nr_groups; group++) {
  1263. const struct pcpu_group_info *gi = &ai->groups[group];
  1264. int unit = 0, unit_end = 0;
  1265. BUG_ON(gi->nr_units % upa);
  1266. for (alloc_end += gi->nr_units / upa;
  1267. alloc < alloc_end; alloc++) {
  1268. if (!(alloc % apl)) {
  1269. printk(KERN_CONT "\n");
  1270. printk("%spcpu-alloc: ", lvl);
  1271. }
  1272. printk(KERN_CONT "[%0*d] ", group_width, group);
  1273. for (unit_end += upa; unit < unit_end; unit++)
  1274. if (gi->cpu_map[unit] != NR_CPUS)
  1275. printk(KERN_CONT "%0*d ", cpu_width,
  1276. gi->cpu_map[unit]);
  1277. else
  1278. printk(KERN_CONT "%s ", empty_str);
  1279. }
  1280. }
  1281. printk(KERN_CONT "\n");
  1282. }
  1283. /**
  1284. * pcpu_setup_first_chunk - initialize the first percpu chunk
  1285. * @ai: pcpu_alloc_info describing how to percpu area is shaped
  1286. * @base_addr: mapped address
  1287. *
  1288. * Initialize the first percpu chunk which contains the kernel static
  1289. * perpcu area. This function is to be called from arch percpu area
  1290. * setup path.
  1291. *
  1292. * @ai contains all information necessary to initialize the first
  1293. * chunk and prime the dynamic percpu allocator.
  1294. *
  1295. * @ai->static_size is the size of static percpu area.
  1296. *
  1297. * @ai->reserved_size, if non-zero, specifies the amount of bytes to
  1298. * reserve after the static area in the first chunk. This reserves
  1299. * the first chunk such that it's available only through reserved
  1300. * percpu allocation. This is primarily used to serve module percpu
  1301. * static areas on architectures where the addressing model has
  1302. * limited offset range for symbol relocations to guarantee module
  1303. * percpu symbols fall inside the relocatable range.
  1304. *
  1305. * @ai->dyn_size determines the number of bytes available for dynamic
  1306. * allocation in the first chunk. The area between @ai->static_size +
  1307. * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
  1308. *
  1309. * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
  1310. * and equal to or larger than @ai->static_size + @ai->reserved_size +
  1311. * @ai->dyn_size.
  1312. *
  1313. * @ai->atom_size is the allocation atom size and used as alignment
  1314. * for vm areas.
  1315. *
  1316. * @ai->alloc_size is the allocation size and always multiple of
  1317. * @ai->atom_size. This is larger than @ai->atom_size if
  1318. * @ai->unit_size is larger than @ai->atom_size.
  1319. *
  1320. * @ai->nr_groups and @ai->groups describe virtual memory layout of
  1321. * percpu areas. Units which should be colocated are put into the
  1322. * same group. Dynamic VM areas will be allocated according to these
  1323. * groupings. If @ai->nr_groups is zero, a single group containing
  1324. * all units is assumed.
  1325. *
  1326. * The caller should have mapped the first chunk at @base_addr and
  1327. * copied static data to each unit.
  1328. *
  1329. * If the first chunk ends up with both reserved and dynamic areas, it
  1330. * is served by two chunks - one to serve the core static and reserved
  1331. * areas and the other for the dynamic area. They share the same vm
  1332. * and page map but uses different area allocation map to stay away
  1333. * from each other. The latter chunk is circulated in the chunk slots
  1334. * and available for dynamic allocation like any other chunks.
  1335. *
  1336. * RETURNS:
  1337. * 0 on success, -errno on failure.
  1338. */
  1339. int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
  1340. void *base_addr)
  1341. {
  1342. static char cpus_buf[4096] __initdata;
  1343. static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
  1344. static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
  1345. size_t dyn_size = ai->dyn_size;
  1346. size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
  1347. struct pcpu_chunk *schunk, *dchunk = NULL;
  1348. unsigned long *group_offsets;
  1349. size_t *group_sizes;
  1350. unsigned long *unit_off;
  1351. unsigned int cpu;
  1352. int *unit_map;
  1353. int group, unit, i;
  1354. cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
  1355. #define PCPU_SETUP_BUG_ON(cond) do { \
  1356. if (unlikely(cond)) { \
  1357. pr_emerg("PERCPU: failed to initialize, %s", #cond); \
  1358. pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
  1359. pcpu_dump_alloc_info(KERN_EMERG, ai); \
  1360. BUG(); \
  1361. } \
  1362. } while (0)
  1363. /* sanity checks */
  1364. PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
  1365. #ifdef CONFIG_SMP
  1366. PCPU_SETUP_BUG_ON(!ai->static_size);
  1367. PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
  1368. #endif
  1369. PCPU_SETUP_BUG_ON(!base_addr);
  1370. PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
  1371. PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
  1372. PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
  1373. PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
  1374. PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
  1375. PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
  1376. /* process group information and build config tables accordingly */
  1377. group_offsets = memblock_virt_alloc(ai->nr_groups *
  1378. sizeof(group_offsets[0]), 0);
  1379. group_sizes = memblock_virt_alloc(ai->nr_groups *
  1380. sizeof(group_sizes[0]), 0);
  1381. unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
  1382. unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
  1383. for (cpu = 0; cpu < nr_cpu_ids; cpu++)
  1384. unit_map[cpu] = UINT_MAX;
  1385. pcpu_low_unit_cpu = NR_CPUS;
  1386. pcpu_high_unit_cpu = NR_CPUS;
  1387. for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
  1388. const struct pcpu_group_info *gi = &ai->groups[group];
  1389. group_offsets[group] = gi->base_offset;
  1390. group_sizes[group] = gi->nr_units * ai->unit_size;
  1391. for (i = 0; i < gi->nr_units; i++) {
  1392. cpu = gi->cpu_map[i];
  1393. if (cpu == NR_CPUS)
  1394. continue;
  1395. PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
  1396. PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
  1397. PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
  1398. unit_map[cpu] = unit + i;
  1399. unit_off[cpu] = gi->base_offset + i * ai->unit_size;
  1400. /* determine low/high unit_cpu */
  1401. if (pcpu_low_unit_cpu == NR_CPUS ||
  1402. unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
  1403. pcpu_low_unit_cpu = cpu;
  1404. if (pcpu_high_unit_cpu == NR_CPUS ||
  1405. unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
  1406. pcpu_high_unit_cpu = cpu;
  1407. }
  1408. }
  1409. pcpu_nr_units = unit;
  1410. for_each_possible_cpu(cpu)
  1411. PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
  1412. /* we're done parsing the input, undefine BUG macro and dump config */
  1413. #undef PCPU_SETUP_BUG_ON
  1414. pcpu_dump_alloc_info(KERN_DEBUG, ai);
  1415. pcpu_nr_groups = ai->nr_groups;
  1416. pcpu_group_offsets = group_offsets;
  1417. pcpu_group_sizes = group_sizes;
  1418. pcpu_unit_map = unit_map;
  1419. pcpu_unit_offsets = unit_off;
  1420. /* determine basic parameters */
  1421. pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
  1422. pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
  1423. pcpu_atom_size = ai->atom_size;
  1424. pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
  1425. BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
  1426. /*
  1427. * Allocate chunk slots. The additional last slot is for
  1428. * empty chunks.
  1429. */
  1430. pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
  1431. pcpu_slot = memblock_virt_alloc(
  1432. pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
  1433. for (i = 0; i < pcpu_nr_slots; i++)
  1434. INIT_LIST_HEAD(&pcpu_slot[i]);
  1435. /*
  1436. * Initialize static chunk. If reserved_size is zero, the
  1437. * static chunk covers static area + dynamic allocation area
  1438. * in the first chunk. If reserved_size is not zero, it
  1439. * covers static area + reserved area (mostly used for module
  1440. * static percpu allocation).
  1441. */
  1442. schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
  1443. INIT_LIST_HEAD(&schunk->list);
  1444. INIT_LIST_HEAD(&schunk->map_extend_list);
  1445. schunk->base_addr = base_addr;
  1446. schunk->map = smap;
  1447. schunk->map_alloc = ARRAY_SIZE(smap);
  1448. schunk->immutable = true;
  1449. bitmap_fill(schunk->populated, pcpu_unit_pages);
  1450. schunk->nr_populated = pcpu_unit_pages;
  1451. if (ai->reserved_size) {
  1452. schunk->free_size = ai->reserved_size;
  1453. pcpu_reserved_chunk = schunk;
  1454. pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
  1455. } else {
  1456. schunk->free_size = dyn_size;
  1457. dyn_size = 0; /* dynamic area covered */
  1458. }
  1459. schunk->contig_hint = schunk->free_size;
  1460. schunk->map[0] = 1;
  1461. schunk->map[1] = ai->static_size;
  1462. schunk->map_used = 1;
  1463. if (schunk->free_size)
  1464. schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size);
  1465. else
  1466. schunk->map[1] |= 1;
  1467. /* init dynamic chunk if necessary */
  1468. if (dyn_size) {
  1469. dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
  1470. INIT_LIST_HEAD(&dchunk->list);
  1471. INIT_LIST_HEAD(&dchunk->map_extend_list);
  1472. dchunk->base_addr = base_addr;
  1473. dchunk->map = dmap;
  1474. dchunk->map_alloc = ARRAY_SIZE(dmap);
  1475. dchunk->immutable = true;
  1476. bitmap_fill(dchunk->populated, pcpu_unit_pages);
  1477. dchunk->nr_populated = pcpu_unit_pages;
  1478. dchunk->contig_hint = dchunk->free_size = dyn_size;
  1479. dchunk->map[0] = 1;
  1480. dchunk->map[1] = pcpu_reserved_chunk_limit;
  1481. dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
  1482. dchunk->map_used = 2;
  1483. }
  1484. /* link the first chunk in */
  1485. pcpu_first_chunk = dchunk ?: schunk;
  1486. pcpu_nr_empty_pop_pages +=
  1487. pcpu_count_occupied_pages(pcpu_first_chunk, 1);
  1488. pcpu_chunk_relocate(pcpu_first_chunk, -1);
  1489. /* we're done */
  1490. pcpu_base_addr = base_addr;
  1491. return 0;
  1492. }
  1493. #ifdef CONFIG_SMP
  1494. const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
  1495. [PCPU_FC_AUTO] = "auto",
  1496. [PCPU_FC_EMBED] = "embed",
  1497. [PCPU_FC_PAGE] = "page",
  1498. };
  1499. enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
  1500. static int __init percpu_alloc_setup(char *str)
  1501. {
  1502. if (!str)
  1503. return -EINVAL;
  1504. if (0)
  1505. /* nada */;
  1506. #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
  1507. else if (!strcmp(str, "embed"))
  1508. pcpu_chosen_fc = PCPU_FC_EMBED;
  1509. #endif
  1510. #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
  1511. else if (!strcmp(str, "page"))
  1512. pcpu_chosen_fc = PCPU_FC_PAGE;
  1513. #endif
  1514. else
  1515. pr_warning("PERCPU: unknown allocator %s specified\n", str);
  1516. return 0;
  1517. }
  1518. early_param("percpu_alloc", percpu_alloc_setup);
  1519. /*
  1520. * pcpu_embed_first_chunk() is used by the generic percpu setup.
  1521. * Build it if needed by the arch config or the generic setup is going
  1522. * to be used.
  1523. */
  1524. #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
  1525. !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
  1526. #define BUILD_EMBED_FIRST_CHUNK
  1527. #endif
  1528. /* build pcpu_page_first_chunk() iff needed by the arch config */
  1529. #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
  1530. #define BUILD_PAGE_FIRST_CHUNK
  1531. #endif
  1532. /* pcpu_build_alloc_info() is used by both embed and page first chunk */
  1533. #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
  1534. /**
  1535. * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
  1536. * @reserved_size: the size of reserved percpu area in bytes
  1537. * @dyn_size: minimum free size for dynamic allocation in bytes
  1538. * @atom_size: allocation atom size
  1539. * @cpu_distance_fn: callback to determine distance between cpus, optional
  1540. *
  1541. * This function determines grouping of units, their mappings to cpus
  1542. * and other parameters considering needed percpu size, allocation
  1543. * atom size and distances between CPUs.
  1544. *
  1545. * Groups are always mutliples of atom size and CPUs which are of
  1546. * LOCAL_DISTANCE both ways are grouped together and share space for
  1547. * units in the same group. The returned configuration is guaranteed
  1548. * to have CPUs on different nodes on different groups and >=75% usage
  1549. * of allocated virtual address space.
  1550. *
  1551. * RETURNS:
  1552. * On success, pointer to the new allocation_info is returned. On
  1553. * failure, ERR_PTR value is returned.
  1554. */
  1555. static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
  1556. size_t reserved_size, size_t dyn_size,
  1557. size_t atom_size,
  1558. pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
  1559. {
  1560. static int group_map[NR_CPUS] __initdata;
  1561. static int group_cnt[NR_CPUS] __initdata;
  1562. const size_t static_size = __per_cpu_end - __per_cpu_start;
  1563. int nr_groups = 1, nr_units = 0;
  1564. size_t size_sum, min_unit_size, alloc_size;
  1565. int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
  1566. int last_allocs, group, unit;
  1567. unsigned int cpu, tcpu;
  1568. struct pcpu_alloc_info *ai;
  1569. unsigned int *cpu_map;
  1570. /* this function may be called multiple times */
  1571. memset(group_map, 0, sizeof(group_map));
  1572. memset(group_cnt, 0, sizeof(group_cnt));
  1573. /* calculate size_sum and ensure dyn_size is enough for early alloc */
  1574. size_sum = PFN_ALIGN(static_size + reserved_size +
  1575. max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
  1576. dyn_size = size_sum - static_size - reserved_size;
  1577. /*
  1578. * Determine min_unit_size, alloc_size and max_upa such that
  1579. * alloc_size is multiple of atom_size and is the smallest
  1580. * which can accommodate 4k aligned segments which are equal to
  1581. * or larger than min_unit_size.
  1582. */
  1583. min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
  1584. alloc_size = roundup(min_unit_size, atom_size);
  1585. upa = alloc_size / min_unit_size;
  1586. while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
  1587. upa--;
  1588. max_upa = upa;
  1589. /* group cpus according to their proximity */
  1590. for_each_possible_cpu(cpu) {
  1591. group = 0;
  1592. next_group:
  1593. for_each_possible_cpu(tcpu) {
  1594. if (cpu == tcpu)
  1595. break;
  1596. if (group_map[tcpu] == group && cpu_distance_fn &&
  1597. (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
  1598. cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
  1599. group++;
  1600. nr_groups = max(nr_groups, group + 1);
  1601. goto next_group;
  1602. }
  1603. }
  1604. group_map[cpu] = group;
  1605. group_cnt[group]++;
  1606. }
  1607. /*
  1608. * Expand unit size until address space usage goes over 75%
  1609. * and then as much as possible without using more address
  1610. * space.
  1611. */
  1612. last_allocs = INT_MAX;
  1613. for (upa = max_upa; upa; upa--) {
  1614. int allocs = 0, wasted = 0;
  1615. if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
  1616. continue;
  1617. for (group = 0; group < nr_groups; group++) {
  1618. int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
  1619. allocs += this_allocs;
  1620. wasted += this_allocs * upa - group_cnt[group];
  1621. }
  1622. /*
  1623. * Don't accept if wastage is over 1/3. The
  1624. * greater-than comparison ensures upa==1 always
  1625. * passes the following check.
  1626. */
  1627. if (wasted > num_possible_cpus() / 3)
  1628. continue;
  1629. /* and then don't consume more memory */
  1630. if (allocs > last_allocs)
  1631. break;
  1632. last_allocs = allocs;
  1633. best_upa = upa;
  1634. }
  1635. upa = best_upa;
  1636. /* allocate and fill alloc_info */
  1637. for (group = 0; group < nr_groups; group++)
  1638. nr_units += roundup(group_cnt[group], upa);
  1639. ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
  1640. if (!ai)
  1641. return ERR_PTR(-ENOMEM);
  1642. cpu_map = ai->groups[0].cpu_map;
  1643. for (group = 0; group < nr_groups; group++) {
  1644. ai->groups[group].cpu_map = cpu_map;
  1645. cpu_map += roundup(group_cnt[group], upa);
  1646. }
  1647. ai->static_size = static_size;
  1648. ai->reserved_size = reserved_size;
  1649. ai->dyn_size = dyn_size;
  1650. ai->unit_size = alloc_size / upa;
  1651. ai->atom_size = atom_size;
  1652. ai->alloc_size = alloc_size;
  1653. for (group = 0, unit = 0; group_cnt[group]; group++) {
  1654. struct pcpu_group_info *gi = &ai->groups[group];
  1655. /*
  1656. * Initialize base_offset as if all groups are located
  1657. * back-to-back. The caller should update this to
  1658. * reflect actual allocation.
  1659. */
  1660. gi->base_offset = unit * ai->unit_size;
  1661. for_each_possible_cpu(cpu)
  1662. if (group_map[cpu] == group)
  1663. gi->cpu_map[gi->nr_units++] = cpu;
  1664. gi->nr_units = roundup(gi->nr_units, upa);
  1665. unit += gi->nr_units;
  1666. }
  1667. BUG_ON(unit != nr_units);
  1668. return ai;
  1669. }
  1670. #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
  1671. #if defined(BUILD_EMBED_FIRST_CHUNK)
  1672. /**
  1673. * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
  1674. * @reserved_size: the size of reserved percpu area in bytes
  1675. * @dyn_size: minimum free size for dynamic allocation in bytes
  1676. * @atom_size: allocation atom size
  1677. * @cpu_distance_fn: callback to determine distance between cpus, optional
  1678. * @alloc_fn: function to allocate percpu page
  1679. * @free_fn: function to free percpu page
  1680. *
  1681. * This is a helper to ease setting up embedded first percpu chunk and
  1682. * can be called where pcpu_setup_first_chunk() is expected.
  1683. *
  1684. * If this function is used to setup the first chunk, it is allocated
  1685. * by calling @alloc_fn and used as-is without being mapped into
  1686. * vmalloc area. Allocations are always whole multiples of @atom_size
  1687. * aligned to @atom_size.
  1688. *
  1689. * This enables the first chunk to piggy back on the linear physical
  1690. * mapping which often uses larger page size. Please note that this
  1691. * can result in very sparse cpu->unit mapping on NUMA machines thus
  1692. * requiring large vmalloc address space. Don't use this allocator if
  1693. * vmalloc space is not orders of magnitude larger than distances
  1694. * between node memory addresses (ie. 32bit NUMA machines).
  1695. *
  1696. * @dyn_size specifies the minimum dynamic area size.
  1697. *
  1698. * If the needed size is smaller than the minimum or specified unit
  1699. * size, the leftover is returned using @free_fn.
  1700. *
  1701. * RETURNS:
  1702. * 0 on success, -errno on failure.
  1703. */
  1704. int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
  1705. size_t atom_size,
  1706. pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
  1707. pcpu_fc_alloc_fn_t alloc_fn,
  1708. pcpu_fc_free_fn_t free_fn)
  1709. {
  1710. void *base = (void *)ULONG_MAX;
  1711. void **areas = NULL;
  1712. struct pcpu_alloc_info *ai;
  1713. size_t size_sum, areas_size, max_distance;
  1714. int group, i, rc;
  1715. ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
  1716. cpu_distance_fn);
  1717. if (IS_ERR(ai))
  1718. return PTR_ERR(ai);
  1719. size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
  1720. areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
  1721. areas = memblock_virt_alloc_nopanic(areas_size, 0);
  1722. if (!areas) {
  1723. rc = -ENOMEM;
  1724. goto out_free;
  1725. }
  1726. /* allocate, copy and determine base address */
  1727. for (group = 0; group < ai->nr_groups; group++) {
  1728. struct pcpu_group_info *gi = &ai->groups[group];
  1729. unsigned int cpu = NR_CPUS;
  1730. void *ptr;
  1731. for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
  1732. cpu = gi->cpu_map[i];
  1733. BUG_ON(cpu == NR_CPUS);
  1734. /* allocate space for the whole group */
  1735. ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
  1736. if (!ptr) {
  1737. rc = -ENOMEM;
  1738. goto out_free_areas;
  1739. }
  1740. /* kmemleak tracks the percpu allocations separately */
  1741. kmemleak_free(ptr);
  1742. areas[group] = ptr;
  1743. base = min(ptr, base);
  1744. }
  1745. /*
  1746. * Copy data and free unused parts. This should happen after all
  1747. * allocations are complete; otherwise, we may end up with
  1748. * overlapping groups.
  1749. */
  1750. for (group = 0; group < ai->nr_groups; group++) {
  1751. struct pcpu_group_info *gi = &ai->groups[group];
  1752. void *ptr = areas[group];
  1753. for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
  1754. if (gi->cpu_map[i] == NR_CPUS) {
  1755. /* unused unit, free whole */
  1756. free_fn(ptr, ai->unit_size);
  1757. continue;
  1758. }
  1759. /* copy and return the unused part */
  1760. memcpy(ptr, __per_cpu_load, ai->static_size);
  1761. free_fn(ptr + size_sum, ai->unit_size - size_sum);
  1762. }
  1763. }
  1764. /* base address is now known, determine group base offsets */
  1765. max_distance = 0;
  1766. for (group = 0; group < ai->nr_groups; group++) {
  1767. ai->groups[group].base_offset = areas[group] - base;
  1768. max_distance = max_t(size_t, max_distance,
  1769. ai->groups[group].base_offset);
  1770. }
  1771. max_distance += ai->unit_size;
  1772. /* warn if maximum distance is further than 75% of vmalloc space */
  1773. if (max_distance > VMALLOC_TOTAL * 3 / 4) {
  1774. pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
  1775. "space 0x%lx\n", max_distance,
  1776. VMALLOC_TOTAL);
  1777. #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
  1778. /* and fail if we have fallback */
  1779. rc = -EINVAL;
  1780. goto out_free;
  1781. #endif
  1782. }
  1783. pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
  1784. PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
  1785. ai->dyn_size, ai->unit_size);
  1786. rc = pcpu_setup_first_chunk(ai, base);
  1787. goto out_free;
  1788. out_free_areas:
  1789. for (group = 0; group < ai->nr_groups; group++)
  1790. if (areas[group])
  1791. free_fn(areas[group],
  1792. ai->groups[group].nr_units * ai->unit_size);
  1793. out_free:
  1794. pcpu_free_alloc_info(ai);
  1795. if (areas)
  1796. memblock_free_early(__pa(areas), areas_size);
  1797. return rc;
  1798. }
  1799. #endif /* BUILD_EMBED_FIRST_CHUNK */
  1800. #ifdef BUILD_PAGE_FIRST_CHUNK
  1801. /**
  1802. * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
  1803. * @reserved_size: the size of reserved percpu area in bytes
  1804. * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
  1805. * @free_fn: function to free percpu page, always called with PAGE_SIZE
  1806. * @populate_pte_fn: function to populate pte
  1807. *
  1808. * This is a helper to ease setting up page-remapped first percpu
  1809. * chunk and can be called where pcpu_setup_first_chunk() is expected.
  1810. *
  1811. * This is the basic allocator. Static percpu area is allocated
  1812. * page-by-page into vmalloc area.
  1813. *
  1814. * RETURNS:
  1815. * 0 on success, -errno on failure.
  1816. */
  1817. int __init pcpu_page_first_chunk(size_t reserved_size,
  1818. pcpu_fc_alloc_fn_t alloc_fn,
  1819. pcpu_fc_free_fn_t free_fn,
  1820. pcpu_fc_populate_pte_fn_t populate_pte_fn)
  1821. {
  1822. static struct vm_struct vm;
  1823. struct pcpu_alloc_info *ai;
  1824. char psize_str[16];
  1825. int unit_pages;
  1826. size_t pages_size;
  1827. struct page **pages;
  1828. int unit, i, j, rc;
  1829. snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
  1830. ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
  1831. if (IS_ERR(ai))
  1832. return PTR_ERR(ai);
  1833. BUG_ON(ai->nr_groups != 1);
  1834. BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
  1835. unit_pages = ai->unit_size >> PAGE_SHIFT;
  1836. /* unaligned allocations can't be freed, round up to page size */
  1837. pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
  1838. sizeof(pages[0]));
  1839. pages = memblock_virt_alloc(pages_size, 0);
  1840. /* allocate pages */
  1841. j = 0;
  1842. for (unit = 0; unit < num_possible_cpus(); unit++)
  1843. for (i = 0; i < unit_pages; i++) {
  1844. unsigned int cpu = ai->groups[0].cpu_map[unit];
  1845. void *ptr;
  1846. ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
  1847. if (!ptr) {
  1848. pr_warning("PERCPU: failed to allocate %s page "
  1849. "for cpu%u\n", psize_str, cpu);
  1850. goto enomem;
  1851. }
  1852. /* kmemleak tracks the percpu allocations separately */
  1853. kmemleak_free(ptr);
  1854. pages[j++] = virt_to_page(ptr);
  1855. }
  1856. /* allocate vm area, map the pages and copy static data */
  1857. vm.flags = VM_ALLOC;
  1858. vm.size = num_possible_cpus() * ai->unit_size;
  1859. vm_area_register_early(&vm, PAGE_SIZE);
  1860. for (unit = 0; unit < num_possible_cpus(); unit++) {
  1861. unsigned long unit_addr =
  1862. (unsigned long)vm.addr + unit * ai->unit_size;
  1863. for (i = 0; i < unit_pages; i++)
  1864. populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
  1865. /* pte already populated, the following shouldn't fail */
  1866. rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
  1867. unit_pages);
  1868. if (rc < 0)
  1869. panic("failed to map percpu area, err=%d\n", rc);
  1870. /*
  1871. * FIXME: Archs with virtual cache should flush local
  1872. * cache for the linear mapping here - something
  1873. * equivalent to flush_cache_vmap() on the local cpu.
  1874. * flush_cache_vmap() can't be used as most supporting
  1875. * data structures are not set up yet.
  1876. */
  1877. /* copy static data */
  1878. memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
  1879. }
  1880. /* we're ready, commit */
  1881. pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
  1882. unit_pages, psize_str, vm.addr, ai->static_size,
  1883. ai->reserved_size, ai->dyn_size);
  1884. rc = pcpu_setup_first_chunk(ai, vm.addr);
  1885. goto out_free_ar;
  1886. enomem:
  1887. while (--j >= 0)
  1888. free_fn(page_address(pages[j]), PAGE_SIZE);
  1889. rc = -ENOMEM;
  1890. out_free_ar:
  1891. memblock_free_early(__pa(pages), pages_size);
  1892. pcpu_free_alloc_info(ai);
  1893. return rc;
  1894. }
  1895. #endif /* BUILD_PAGE_FIRST_CHUNK */
  1896. #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
  1897. /*
  1898. * Generic SMP percpu area setup.
  1899. *
  1900. * The embedding helper is used because its behavior closely resembles
  1901. * the original non-dynamic generic percpu area setup. This is
  1902. * important because many archs have addressing restrictions and might
  1903. * fail if the percpu area is located far away from the previous
  1904. * location. As an added bonus, in non-NUMA cases, embedding is
  1905. * generally a good idea TLB-wise because percpu area can piggy back
  1906. * on the physical linear memory mapping which uses large page
  1907. * mappings on applicable archs.
  1908. */
  1909. unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
  1910. EXPORT_SYMBOL(__per_cpu_offset);
  1911. static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
  1912. size_t align)
  1913. {
  1914. return memblock_virt_alloc_from_nopanic(
  1915. size, align, __pa(MAX_DMA_ADDRESS));
  1916. }
  1917. static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
  1918. {
  1919. memblock_free_early(__pa(ptr), size);
  1920. }
  1921. void __init setup_per_cpu_areas(void)
  1922. {
  1923. unsigned long delta;
  1924. unsigned int cpu;
  1925. int rc;
  1926. /*
  1927. * Always reserve area for module percpu variables. That's
  1928. * what the legacy allocator did.
  1929. */
  1930. rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
  1931. PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
  1932. pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
  1933. if (rc < 0)
  1934. panic("Failed to initialize percpu areas.");
  1935. delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
  1936. for_each_possible_cpu(cpu)
  1937. __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
  1938. }
  1939. #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
  1940. #else /* CONFIG_SMP */
  1941. /*
  1942. * UP percpu area setup.
  1943. *
  1944. * UP always uses km-based percpu allocator with identity mapping.
  1945. * Static percpu variables are indistinguishable from the usual static
  1946. * variables and don't require any special preparation.
  1947. */
  1948. void __init setup_per_cpu_areas(void)
  1949. {
  1950. const size_t unit_size =
  1951. roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
  1952. PERCPU_DYNAMIC_RESERVE));
  1953. struct pcpu_alloc_info *ai;
  1954. void *fc;
  1955. ai = pcpu_alloc_alloc_info(1, 1);
  1956. fc = memblock_virt_alloc_from_nopanic(unit_size,
  1957. PAGE_SIZE,
  1958. __pa(MAX_DMA_ADDRESS));
  1959. if (!ai || !fc)
  1960. panic("Failed to allocate memory for percpu areas.");
  1961. /* kmemleak tracks the percpu allocations separately */
  1962. kmemleak_free(fc);
  1963. ai->dyn_size = unit_size;
  1964. ai->unit_size = unit_size;
  1965. ai->atom_size = unit_size;
  1966. ai->alloc_size = unit_size;
  1967. ai->groups[0].nr_units = 1;
  1968. ai->groups[0].cpu_map[0] = 0;
  1969. if (pcpu_setup_first_chunk(ai, fc) < 0)
  1970. panic("Failed to initialize percpu areas.");
  1971. }
  1972. #endif /* CONFIG_SMP */
  1973. /*
  1974. * First and reserved chunks are initialized with temporary allocation
  1975. * map in initdata so that they can be used before slab is online.
  1976. * This function is called after slab is brought up and replaces those
  1977. * with properly allocated maps.
  1978. */
  1979. void __init percpu_init_late(void)
  1980. {
  1981. struct pcpu_chunk *target_chunks[] =
  1982. { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
  1983. struct pcpu_chunk *chunk;
  1984. unsigned long flags;
  1985. int i;
  1986. for (i = 0; (chunk = target_chunks[i]); i++) {
  1987. int *map;
  1988. const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
  1989. BUILD_BUG_ON(size > PAGE_SIZE);
  1990. map = pcpu_mem_zalloc(size);
  1991. BUG_ON(!map);
  1992. spin_lock_irqsave(&pcpu_lock, flags);
  1993. memcpy(map, chunk->map, size);
  1994. chunk->map = map;
  1995. spin_unlock_irqrestore(&pcpu_lock, flags);
  1996. }
  1997. }
  1998. /*
  1999. * Percpu allocator is initialized early during boot when neither slab or
  2000. * workqueue is available. Plug async management until everything is up
  2001. * and running.
  2002. */
  2003. static int __init percpu_enable_async(void)
  2004. {
  2005. pcpu_async_enabled = true;
  2006. return 0;
  2007. }
  2008. subsys_initcall(percpu_enable_async);