memblock.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595
  1. /*
  2. * Procedures for maintaining information about logical memory blocks.
  3. *
  4. * Peter Bergner, IBM Corp. June 2001.
  5. * Copyright (C) 2001 Peter Bergner.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/init.h>
  15. #include <linux/bitops.h>
  16. #include <linux/poison.h>
  17. #include <linux/pfn.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/memblock.h>
  21. #include <asm-generic/sections.h>
  22. #include <linux/io.h>
  23. #include "internal.h"
  24. static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  25. static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  26. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  27. static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
  28. #endif
  29. struct memblock memblock __initdata_memblock = {
  30. .memory.regions = memblock_memory_init_regions,
  31. .memory.cnt = 1, /* empty dummy entry */
  32. .memory.max = INIT_MEMBLOCK_REGIONS,
  33. .reserved.regions = memblock_reserved_init_regions,
  34. .reserved.cnt = 1, /* empty dummy entry */
  35. .reserved.max = INIT_MEMBLOCK_REGIONS,
  36. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  37. .physmem.regions = memblock_physmem_init_regions,
  38. .physmem.cnt = 1, /* empty dummy entry */
  39. .physmem.max = INIT_PHYSMEM_REGIONS,
  40. #endif
  41. .bottom_up = false,
  42. .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
  43. };
  44. int memblock_debug __initdata_memblock;
  45. #ifdef CONFIG_MOVABLE_NODE
  46. bool movable_node_enabled __initdata_memblock = false;
  47. #endif
  48. static int memblock_can_resize __initdata_memblock;
  49. static int memblock_memory_in_slab __initdata_memblock = 0;
  50. static int memblock_reserved_in_slab __initdata_memblock = 0;
  51. /* inline so we don't get a warning when pr_debug is compiled out */
  52. static __init_memblock const char *
  53. memblock_type_name(struct memblock_type *type)
  54. {
  55. if (type == &memblock.memory)
  56. return "memory";
  57. else if (type == &memblock.reserved)
  58. return "reserved";
  59. else
  60. return "unknown";
  61. }
  62. /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
  63. static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
  64. {
  65. return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
  66. }
  67. /*
  68. * Address comparison utilities
  69. */
  70. static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
  71. phys_addr_t base2, phys_addr_t size2)
  72. {
  73. return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  74. }
  75. static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
  76. phys_addr_t base, phys_addr_t size)
  77. {
  78. unsigned long i;
  79. for (i = 0; i < type->cnt; i++) {
  80. phys_addr_t rgnbase = type->regions[i].base;
  81. phys_addr_t rgnsize = type->regions[i].size;
  82. if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
  83. break;
  84. }
  85. return (i < type->cnt) ? i : -1;
  86. }
  87. /*
  88. * __memblock_find_range_bottom_up - find free area utility in bottom-up
  89. * @start: start of candidate range
  90. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  91. * @size: size of free area to find
  92. * @align: alignment of free area to find
  93. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  94. *
  95. * Utility called from memblock_find_in_range_node(), find free area bottom-up.
  96. *
  97. * RETURNS:
  98. * Found address on success, 0 on failure.
  99. */
  100. static phys_addr_t __init_memblock
  101. __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
  102. phys_addr_t size, phys_addr_t align, int nid)
  103. {
  104. phys_addr_t this_start, this_end, cand;
  105. u64 i;
  106. for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
  107. this_start = clamp(this_start, start, end);
  108. this_end = clamp(this_end, start, end);
  109. cand = round_up(this_start, align);
  110. if (cand < this_end && this_end - cand >= size)
  111. return cand;
  112. }
  113. return 0;
  114. }
  115. /**
  116. * __memblock_find_range_top_down - find free area utility, in top-down
  117. * @start: start of candidate range
  118. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  119. * @size: size of free area to find
  120. * @align: alignment of free area to find
  121. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  122. *
  123. * Utility called from memblock_find_in_range_node(), find free area top-down.
  124. *
  125. * RETURNS:
  126. * Found address on success, 0 on failure.
  127. */
  128. static phys_addr_t __init_memblock
  129. __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
  130. phys_addr_t size, phys_addr_t align, int nid)
  131. {
  132. phys_addr_t this_start, this_end, cand;
  133. u64 i;
  134. for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
  135. this_start = clamp(this_start, start, end);
  136. this_end = clamp(this_end, start, end);
  137. if (this_end < size)
  138. continue;
  139. cand = round_down(this_end - size, align);
  140. if (cand >= this_start)
  141. return cand;
  142. }
  143. return 0;
  144. }
  145. /**
  146. * memblock_find_in_range_node - find free area in given range and node
  147. * @size: size of free area to find
  148. * @align: alignment of free area to find
  149. * @start: start of candidate range
  150. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  151. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  152. *
  153. * Find @size free area aligned to @align in the specified range and node.
  154. *
  155. * When allocation direction is bottom-up, the @start should be greater
  156. * than the end of the kernel image. Otherwise, it will be trimmed. The
  157. * reason is that we want the bottom-up allocation just near the kernel
  158. * image so it is highly likely that the allocated memory and the kernel
  159. * will reside in the same node.
  160. *
  161. * If bottom-up allocation failed, will try to allocate memory top-down.
  162. *
  163. * RETURNS:
  164. * Found address on success, 0 on failure.
  165. */
  166. phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
  167. phys_addr_t align, phys_addr_t start,
  168. phys_addr_t end, int nid)
  169. {
  170. phys_addr_t kernel_end, ret;
  171. /* pump up @end */
  172. if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
  173. end = memblock.current_limit;
  174. /* avoid allocating the first page */
  175. start = max_t(phys_addr_t, start, PAGE_SIZE);
  176. end = max(start, end);
  177. kernel_end = __pa_symbol(_end);
  178. /*
  179. * try bottom-up allocation only when bottom-up mode
  180. * is set and @end is above the kernel image.
  181. */
  182. if (memblock_bottom_up() && end > kernel_end) {
  183. phys_addr_t bottom_up_start;
  184. /* make sure we will allocate above the kernel */
  185. bottom_up_start = max(start, kernel_end);
  186. /* ok, try bottom-up allocation first */
  187. ret = __memblock_find_range_bottom_up(bottom_up_start, end,
  188. size, align, nid);
  189. if (ret)
  190. return ret;
  191. /*
  192. * we always limit bottom-up allocation above the kernel,
  193. * but top-down allocation doesn't have the limit, so
  194. * retrying top-down allocation may succeed when bottom-up
  195. * allocation failed.
  196. *
  197. * bottom-up allocation is expected to be fail very rarely,
  198. * so we use WARN_ONCE() here to see the stack trace if
  199. * fail happens.
  200. */
  201. WARN_ONCE(1, "memblock: bottom-up allocation failed, "
  202. "memory hotunplug may be affected\n");
  203. }
  204. return __memblock_find_range_top_down(start, end, size, align, nid);
  205. }
  206. /**
  207. * memblock_find_in_range - find free area in given range
  208. * @start: start of candidate range
  209. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  210. * @size: size of free area to find
  211. * @align: alignment of free area to find
  212. *
  213. * Find @size free area aligned to @align in the specified range.
  214. *
  215. * RETURNS:
  216. * Found address on success, 0 on failure.
  217. */
  218. phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
  219. phys_addr_t end, phys_addr_t size,
  220. phys_addr_t align)
  221. {
  222. return memblock_find_in_range_node(size, align, start, end,
  223. NUMA_NO_NODE);
  224. }
  225. static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
  226. {
  227. type->total_size -= type->regions[r].size;
  228. memmove(&type->regions[r], &type->regions[r + 1],
  229. (type->cnt - (r + 1)) * sizeof(type->regions[r]));
  230. type->cnt--;
  231. /* Special case for empty arrays */
  232. if (type->cnt == 0) {
  233. WARN_ON(type->total_size != 0);
  234. type->cnt = 1;
  235. type->regions[0].base = 0;
  236. type->regions[0].size = 0;
  237. type->regions[0].flags = 0;
  238. memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
  239. }
  240. }
  241. #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
  242. phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
  243. phys_addr_t *addr)
  244. {
  245. if (memblock.reserved.regions == memblock_reserved_init_regions)
  246. return 0;
  247. *addr = __pa(memblock.reserved.regions);
  248. return PAGE_ALIGN(sizeof(struct memblock_region) *
  249. memblock.reserved.max);
  250. }
  251. phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
  252. phys_addr_t *addr)
  253. {
  254. if (memblock.memory.regions == memblock_memory_init_regions)
  255. return 0;
  256. *addr = __pa(memblock.memory.regions);
  257. return PAGE_ALIGN(sizeof(struct memblock_region) *
  258. memblock.memory.max);
  259. }
  260. #endif
  261. /**
  262. * memblock_double_array - double the size of the memblock regions array
  263. * @type: memblock type of the regions array being doubled
  264. * @new_area_start: starting address of memory range to avoid overlap with
  265. * @new_area_size: size of memory range to avoid overlap with
  266. *
  267. * Double the size of the @type regions array. If memblock is being used to
  268. * allocate memory for a new reserved regions array and there is a previously
  269. * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
  270. * waiting to be reserved, ensure the memory used by the new array does
  271. * not overlap.
  272. *
  273. * RETURNS:
  274. * 0 on success, -1 on failure.
  275. */
  276. static int __init_memblock memblock_double_array(struct memblock_type *type,
  277. phys_addr_t new_area_start,
  278. phys_addr_t new_area_size)
  279. {
  280. struct memblock_region *new_array, *old_array;
  281. phys_addr_t old_alloc_size, new_alloc_size;
  282. phys_addr_t old_size, new_size, addr;
  283. int use_slab = slab_is_available();
  284. int *in_slab;
  285. /* We don't allow resizing until we know about the reserved regions
  286. * of memory that aren't suitable for allocation
  287. */
  288. if (!memblock_can_resize)
  289. return -1;
  290. /* Calculate new doubled size */
  291. old_size = type->max * sizeof(struct memblock_region);
  292. new_size = old_size << 1;
  293. /*
  294. * We need to allocated new one align to PAGE_SIZE,
  295. * so we can free them completely later.
  296. */
  297. old_alloc_size = PAGE_ALIGN(old_size);
  298. new_alloc_size = PAGE_ALIGN(new_size);
  299. /* Retrieve the slab flag */
  300. if (type == &memblock.memory)
  301. in_slab = &memblock_memory_in_slab;
  302. else
  303. in_slab = &memblock_reserved_in_slab;
  304. /* Try to find some space for it.
  305. *
  306. * WARNING: We assume that either slab_is_available() and we use it or
  307. * we use MEMBLOCK for allocations. That means that this is unsafe to
  308. * use when bootmem is currently active (unless bootmem itself is
  309. * implemented on top of MEMBLOCK which isn't the case yet)
  310. *
  311. * This should however not be an issue for now, as we currently only
  312. * call into MEMBLOCK while it's still active, or much later when slab
  313. * is active for memory hotplug operations
  314. */
  315. if (use_slab) {
  316. new_array = kmalloc(new_size, GFP_KERNEL);
  317. addr = new_array ? __pa(new_array) : 0;
  318. } else {
  319. /* only exclude range when trying to double reserved.regions */
  320. if (type != &memblock.reserved)
  321. new_area_start = new_area_size = 0;
  322. addr = memblock_find_in_range(new_area_start + new_area_size,
  323. memblock.current_limit,
  324. new_alloc_size, PAGE_SIZE);
  325. if (!addr && new_area_size)
  326. addr = memblock_find_in_range(0,
  327. min(new_area_start, memblock.current_limit),
  328. new_alloc_size, PAGE_SIZE);
  329. new_array = addr ? __va(addr) : NULL;
  330. }
  331. if (!addr) {
  332. pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
  333. memblock_type_name(type), type->max, type->max * 2);
  334. return -1;
  335. }
  336. memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
  337. memblock_type_name(type), type->max * 2, (u64)addr,
  338. (u64)addr + new_size - 1);
  339. /*
  340. * Found space, we now need to move the array over before we add the
  341. * reserved region since it may be our reserved array itself that is
  342. * full.
  343. */
  344. memcpy(new_array, type->regions, old_size);
  345. memset(new_array + type->max, 0, old_size);
  346. old_array = type->regions;
  347. type->regions = new_array;
  348. type->max <<= 1;
  349. /* Free old array. We needn't free it if the array is the static one */
  350. if (*in_slab)
  351. kfree(old_array);
  352. else if (old_array != memblock_memory_init_regions &&
  353. old_array != memblock_reserved_init_regions)
  354. memblock_free(__pa(old_array), old_alloc_size);
  355. /*
  356. * Reserve the new array if that comes from the memblock. Otherwise, we
  357. * needn't do it
  358. */
  359. if (!use_slab)
  360. BUG_ON(memblock_reserve(addr, new_alloc_size));
  361. /* Update slab flag */
  362. *in_slab = use_slab;
  363. return 0;
  364. }
  365. /**
  366. * memblock_merge_regions - merge neighboring compatible regions
  367. * @type: memblock type to scan
  368. *
  369. * Scan @type and merge neighboring compatible regions.
  370. */
  371. static void __init_memblock memblock_merge_regions(struct memblock_type *type)
  372. {
  373. int i = 0;
  374. /* cnt never goes below 1 */
  375. while (i < type->cnt - 1) {
  376. struct memblock_region *this = &type->regions[i];
  377. struct memblock_region *next = &type->regions[i + 1];
  378. if (this->base + this->size != next->base ||
  379. memblock_get_region_node(this) !=
  380. memblock_get_region_node(next) ||
  381. this->flags != next->flags) {
  382. BUG_ON(this->base + this->size > next->base);
  383. i++;
  384. continue;
  385. }
  386. this->size += next->size;
  387. /* move forward from next + 1, index of which is i + 2 */
  388. memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
  389. type->cnt--;
  390. }
  391. }
  392. /**
  393. * memblock_insert_region - insert new memblock region
  394. * @type: memblock type to insert into
  395. * @idx: index for the insertion point
  396. * @base: base address of the new region
  397. * @size: size of the new region
  398. * @nid: node id of the new region
  399. * @flags: flags of the new region
  400. *
  401. * Insert new memblock region [@base,@base+@size) into @type at @idx.
  402. * @type must already have extra room to accomodate the new region.
  403. */
  404. static void __init_memblock memblock_insert_region(struct memblock_type *type,
  405. int idx, phys_addr_t base,
  406. phys_addr_t size,
  407. int nid, unsigned long flags)
  408. {
  409. struct memblock_region *rgn = &type->regions[idx];
  410. BUG_ON(type->cnt >= type->max);
  411. memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
  412. rgn->base = base;
  413. rgn->size = size;
  414. rgn->flags = flags;
  415. memblock_set_region_node(rgn, nid);
  416. type->cnt++;
  417. type->total_size += size;
  418. }
  419. /**
  420. * memblock_add_range - add new memblock region
  421. * @type: memblock type to add new region into
  422. * @base: base address of the new region
  423. * @size: size of the new region
  424. * @nid: nid of the new region
  425. * @flags: flags of the new region
  426. *
  427. * Add new memblock region [@base,@base+@size) into @type. The new region
  428. * is allowed to overlap with existing ones - overlaps don't affect already
  429. * existing regions. @type is guaranteed to be minimal (all neighbouring
  430. * compatible regions are merged) after the addition.
  431. *
  432. * RETURNS:
  433. * 0 on success, -errno on failure.
  434. */
  435. int __init_memblock memblock_add_range(struct memblock_type *type,
  436. phys_addr_t base, phys_addr_t size,
  437. int nid, unsigned long flags)
  438. {
  439. bool insert = false;
  440. phys_addr_t obase = base;
  441. phys_addr_t end = base + memblock_cap_size(base, &size);
  442. int i, nr_new;
  443. if (!size)
  444. return 0;
  445. /* special case for empty array */
  446. if (type->regions[0].size == 0) {
  447. WARN_ON(type->cnt != 1 || type->total_size);
  448. type->regions[0].base = base;
  449. type->regions[0].size = size;
  450. type->regions[0].flags = flags;
  451. memblock_set_region_node(&type->regions[0], nid);
  452. type->total_size = size;
  453. return 0;
  454. }
  455. repeat:
  456. /*
  457. * The following is executed twice. Once with %false @insert and
  458. * then with %true. The first counts the number of regions needed
  459. * to accomodate the new area. The second actually inserts them.
  460. */
  461. base = obase;
  462. nr_new = 0;
  463. for (i = 0; i < type->cnt; i++) {
  464. struct memblock_region *rgn = &type->regions[i];
  465. phys_addr_t rbase = rgn->base;
  466. phys_addr_t rend = rbase + rgn->size;
  467. if (rbase >= end)
  468. break;
  469. if (rend <= base)
  470. continue;
  471. /*
  472. * @rgn overlaps. If it separates the lower part of new
  473. * area, insert that portion.
  474. */
  475. if (rbase > base) {
  476. nr_new++;
  477. if (insert)
  478. memblock_insert_region(type, i++, base,
  479. rbase - base, nid,
  480. flags);
  481. }
  482. /* area below @rend is dealt with, forget about it */
  483. base = min(rend, end);
  484. }
  485. /* insert the remaining portion */
  486. if (base < end) {
  487. nr_new++;
  488. if (insert)
  489. memblock_insert_region(type, i, base, end - base,
  490. nid, flags);
  491. }
  492. /*
  493. * If this was the first round, resize array and repeat for actual
  494. * insertions; otherwise, merge and return.
  495. */
  496. if (!insert) {
  497. while (type->cnt + nr_new > type->max)
  498. if (memblock_double_array(type, obase, size) < 0)
  499. return -ENOMEM;
  500. insert = true;
  501. goto repeat;
  502. } else {
  503. memblock_merge_regions(type);
  504. return 0;
  505. }
  506. }
  507. int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
  508. int nid)
  509. {
  510. return memblock_add_range(&memblock.memory, base, size, nid, 0);
  511. }
  512. int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
  513. {
  514. return memblock_add_range(&memblock.memory, base, size,
  515. MAX_NUMNODES, 0);
  516. }
  517. /**
  518. * memblock_isolate_range - isolate given range into disjoint memblocks
  519. * @type: memblock type to isolate range for
  520. * @base: base of range to isolate
  521. * @size: size of range to isolate
  522. * @start_rgn: out parameter for the start of isolated region
  523. * @end_rgn: out parameter for the end of isolated region
  524. *
  525. * Walk @type and ensure that regions don't cross the boundaries defined by
  526. * [@base,@base+@size). Crossing regions are split at the boundaries,
  527. * which may create at most two more regions. The index of the first
  528. * region inside the range is returned in *@start_rgn and end in *@end_rgn.
  529. *
  530. * RETURNS:
  531. * 0 on success, -errno on failure.
  532. */
  533. static int __init_memblock memblock_isolate_range(struct memblock_type *type,
  534. phys_addr_t base, phys_addr_t size,
  535. int *start_rgn, int *end_rgn)
  536. {
  537. phys_addr_t end = base + memblock_cap_size(base, &size);
  538. int i;
  539. *start_rgn = *end_rgn = 0;
  540. if (!size)
  541. return 0;
  542. /* we'll create at most two more regions */
  543. while (type->cnt + 2 > type->max)
  544. if (memblock_double_array(type, base, size) < 0)
  545. return -ENOMEM;
  546. for (i = 0; i < type->cnt; i++) {
  547. struct memblock_region *rgn = &type->regions[i];
  548. phys_addr_t rbase = rgn->base;
  549. phys_addr_t rend = rbase + rgn->size;
  550. if (rbase >= end)
  551. break;
  552. if (rend <= base)
  553. continue;
  554. if (rbase < base) {
  555. /*
  556. * @rgn intersects from below. Split and continue
  557. * to process the next region - the new top half.
  558. */
  559. rgn->base = base;
  560. rgn->size -= base - rbase;
  561. type->total_size -= base - rbase;
  562. memblock_insert_region(type, i, rbase, base - rbase,
  563. memblock_get_region_node(rgn),
  564. rgn->flags);
  565. } else if (rend > end) {
  566. /*
  567. * @rgn intersects from above. Split and redo the
  568. * current region - the new bottom half.
  569. */
  570. rgn->base = end;
  571. rgn->size -= end - rbase;
  572. type->total_size -= end - rbase;
  573. memblock_insert_region(type, i--, rbase, end - rbase,
  574. memblock_get_region_node(rgn),
  575. rgn->flags);
  576. } else {
  577. /* @rgn is fully contained, record it */
  578. if (!*end_rgn)
  579. *start_rgn = i;
  580. *end_rgn = i + 1;
  581. }
  582. }
  583. return 0;
  584. }
  585. int __init_memblock memblock_remove_range(struct memblock_type *type,
  586. phys_addr_t base, phys_addr_t size)
  587. {
  588. int start_rgn, end_rgn;
  589. int i, ret;
  590. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  591. if (ret)
  592. return ret;
  593. for (i = end_rgn - 1; i >= start_rgn; i--)
  594. memblock_remove_region(type, i);
  595. return 0;
  596. }
  597. int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
  598. {
  599. kmemleak_free_part(__va(base), size); /* kmemleak free for overlaps issue */
  600. return memblock_remove_range(&memblock.memory, base, size);
  601. }
  602. int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
  603. {
  604. memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
  605. (unsigned long long)base,
  606. (unsigned long long)base + size - 1,
  607. (void *)_RET_IP_);
  608. kmemleak_free_part(__va(base), size);
  609. return memblock_remove_range(&memblock.reserved, base, size);
  610. }
  611. static int __init_memblock memblock_reserve_region(phys_addr_t base,
  612. phys_addr_t size,
  613. int nid,
  614. unsigned long flags)
  615. {
  616. struct memblock_type *_rgn = &memblock.reserved;
  617. memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
  618. (unsigned long long)base,
  619. (unsigned long long)base + size - 1,
  620. flags, (void *)_RET_IP_);
  621. return memblock_add_range(_rgn, base, size, nid, flags);
  622. }
  623. int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
  624. {
  625. return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
  626. }
  627. /**
  628. * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
  629. * @base: the base phys addr of the region
  630. * @size: the size of the region
  631. *
  632. * This function isolates region [@base, @base + @size), and mark it with flag
  633. * MEMBLOCK_HOTPLUG.
  634. *
  635. * Return 0 on succees, -errno on failure.
  636. */
  637. int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
  638. {
  639. struct memblock_type *type = &memblock.memory;
  640. int i, ret, start_rgn, end_rgn;
  641. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  642. if (ret)
  643. return ret;
  644. for (i = start_rgn; i < end_rgn; i++)
  645. memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG);
  646. memblock_merge_regions(type);
  647. return 0;
  648. }
  649. /**
  650. * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
  651. * @base: the base phys addr of the region
  652. * @size: the size of the region
  653. *
  654. * This function isolates region [@base, @base + @size), and clear flag
  655. * MEMBLOCK_HOTPLUG for the isolated regions.
  656. *
  657. * Return 0 on succees, -errno on failure.
  658. */
  659. int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
  660. {
  661. struct memblock_type *type = &memblock.memory;
  662. int i, ret, start_rgn, end_rgn;
  663. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  664. if (ret)
  665. return ret;
  666. for (i = start_rgn; i < end_rgn; i++)
  667. memblock_clear_region_flags(&type->regions[i],
  668. MEMBLOCK_HOTPLUG);
  669. memblock_merge_regions(type);
  670. return 0;
  671. }
  672. /**
  673. * __next__mem_range - next function for for_each_free_mem_range() etc.
  674. * @idx: pointer to u64 loop variable
  675. * @nid: node selector, %NUMA_NO_NODE for all nodes
  676. * @type_a: pointer to memblock_type from where the range is taken
  677. * @type_b: pointer to memblock_type which excludes memory from being taken
  678. * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  679. * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  680. * @out_nid: ptr to int for nid of the range, can be %NULL
  681. *
  682. * Find the first area from *@idx which matches @nid, fill the out
  683. * parameters, and update *@idx for the next iteration. The lower 32bit of
  684. * *@idx contains index into type_a and the upper 32bit indexes the
  685. * areas before each region in type_b. For example, if type_b regions
  686. * look like the following,
  687. *
  688. * 0:[0-16), 1:[32-48), 2:[128-130)
  689. *
  690. * The upper 32bit indexes the following regions.
  691. *
  692. * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
  693. *
  694. * As both region arrays are sorted, the function advances the two indices
  695. * in lockstep and returns each intersection.
  696. */
  697. void __init_memblock __next_mem_range(u64 *idx, int nid,
  698. struct memblock_type *type_a,
  699. struct memblock_type *type_b,
  700. phys_addr_t *out_start,
  701. phys_addr_t *out_end, int *out_nid)
  702. {
  703. int idx_a = *idx & 0xffffffff;
  704. int idx_b = *idx >> 32;
  705. if (WARN_ONCE(nid == MAX_NUMNODES,
  706. "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  707. nid = NUMA_NO_NODE;
  708. for (; idx_a < type_a->cnt; idx_a++) {
  709. struct memblock_region *m = &type_a->regions[idx_a];
  710. phys_addr_t m_start = m->base;
  711. phys_addr_t m_end = m->base + m->size;
  712. int m_nid = memblock_get_region_node(m);
  713. /* only memory regions are associated with nodes, check it */
  714. if (nid != NUMA_NO_NODE && nid != m_nid)
  715. continue;
  716. /* skip hotpluggable memory regions if needed */
  717. if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
  718. continue;
  719. if (!type_b) {
  720. if (out_start)
  721. *out_start = m_start;
  722. if (out_end)
  723. *out_end = m_end;
  724. if (out_nid)
  725. *out_nid = m_nid;
  726. idx_a++;
  727. *idx = (u32)idx_a | (u64)idx_b << 32;
  728. return;
  729. }
  730. /* scan areas before each reservation */
  731. for (; idx_b < type_b->cnt + 1; idx_b++) {
  732. struct memblock_region *r;
  733. phys_addr_t r_start;
  734. phys_addr_t r_end;
  735. r = &type_b->regions[idx_b];
  736. r_start = idx_b ? r[-1].base + r[-1].size : 0;
  737. r_end = idx_b < type_b->cnt ?
  738. r->base : ULLONG_MAX;
  739. /*
  740. * if idx_b advanced past idx_a,
  741. * break out to advance idx_a
  742. */
  743. if (r_start >= m_end)
  744. break;
  745. /* if the two regions intersect, we're done */
  746. if (m_start < r_end) {
  747. if (out_start)
  748. *out_start =
  749. max(m_start, r_start);
  750. if (out_end)
  751. *out_end = min(m_end, r_end);
  752. if (out_nid)
  753. *out_nid = m_nid;
  754. /*
  755. * The region which ends first is
  756. * advanced for the next iteration.
  757. */
  758. if (m_end <= r_end)
  759. idx_a++;
  760. else
  761. idx_b++;
  762. *idx = (u32)idx_a | (u64)idx_b << 32;
  763. return;
  764. }
  765. }
  766. }
  767. /* signal end of iteration */
  768. *idx = ULLONG_MAX;
  769. }
  770. /**
  771. * __next_mem_range_rev - generic next function for for_each_*_range_rev()
  772. *
  773. * Finds the next range from type_a which is not marked as unsuitable
  774. * in type_b.
  775. *
  776. * @idx: pointer to u64 loop variable
  777. * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
  778. * @type_a: pointer to memblock_type from where the range is taken
  779. * @type_b: pointer to memblock_type which excludes memory from being taken
  780. * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  781. * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  782. * @out_nid: ptr to int for nid of the range, can be %NULL
  783. *
  784. * Reverse of __next_mem_range().
  785. */
  786. void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
  787. struct memblock_type *type_a,
  788. struct memblock_type *type_b,
  789. phys_addr_t *out_start,
  790. phys_addr_t *out_end, int *out_nid)
  791. {
  792. int idx_a = *idx & 0xffffffff;
  793. int idx_b = *idx >> 32;
  794. if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  795. nid = NUMA_NO_NODE;
  796. if (*idx == (u64)ULLONG_MAX) {
  797. idx_a = type_a->cnt - 1;
  798. idx_b = type_b->cnt;
  799. }
  800. for (; idx_a >= 0; idx_a--) {
  801. struct memblock_region *m = &type_a->regions[idx_a];
  802. phys_addr_t m_start = m->base;
  803. phys_addr_t m_end = m->base + m->size;
  804. int m_nid = memblock_get_region_node(m);
  805. /* only memory regions are associated with nodes, check it */
  806. if (nid != NUMA_NO_NODE && nid != m_nid)
  807. continue;
  808. /* skip hotpluggable memory regions if needed */
  809. if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
  810. continue;
  811. if (!type_b) {
  812. if (out_start)
  813. *out_start = m_start;
  814. if (out_end)
  815. *out_end = m_end;
  816. if (out_nid)
  817. *out_nid = m_nid;
  818. idx_a++;
  819. *idx = (u32)idx_a | (u64)idx_b << 32;
  820. return;
  821. }
  822. /* scan areas before each reservation */
  823. for (; idx_b >= 0; idx_b--) {
  824. struct memblock_region *r;
  825. phys_addr_t r_start;
  826. phys_addr_t r_end;
  827. r = &type_b->regions[idx_b];
  828. r_start = idx_b ? r[-1].base + r[-1].size : 0;
  829. r_end = idx_b < type_b->cnt ?
  830. r->base : ULLONG_MAX;
  831. /*
  832. * if idx_b advanced past idx_a,
  833. * break out to advance idx_a
  834. */
  835. if (r_end <= m_start)
  836. break;
  837. /* if the two regions intersect, we're done */
  838. if (m_end > r_start) {
  839. if (out_start)
  840. *out_start = max(m_start, r_start);
  841. if (out_end)
  842. *out_end = min(m_end, r_end);
  843. if (out_nid)
  844. *out_nid = m_nid;
  845. if (m_start >= r_start)
  846. idx_a--;
  847. else
  848. idx_b--;
  849. *idx = (u32)idx_a | (u64)idx_b << 32;
  850. return;
  851. }
  852. }
  853. }
  854. /* signal end of iteration */
  855. *idx = ULLONG_MAX;
  856. }
  857. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  858. /*
  859. * Common iterator interface used to define for_each_mem_range().
  860. */
  861. void __init_memblock __next_mem_pfn_range(int *idx, int nid,
  862. unsigned long *out_start_pfn,
  863. unsigned long *out_end_pfn, int *out_nid)
  864. {
  865. struct memblock_type *type = &memblock.memory;
  866. struct memblock_region *r;
  867. while (++*idx < type->cnt) {
  868. r = &type->regions[*idx];
  869. if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
  870. continue;
  871. if (nid == MAX_NUMNODES || nid == r->nid)
  872. break;
  873. }
  874. if (*idx >= type->cnt) {
  875. *idx = -1;
  876. return;
  877. }
  878. if (out_start_pfn)
  879. *out_start_pfn = PFN_UP(r->base);
  880. if (out_end_pfn)
  881. *out_end_pfn = PFN_DOWN(r->base + r->size);
  882. if (out_nid)
  883. *out_nid = r->nid;
  884. }
  885. /**
  886. * memblock_set_node - set node ID on memblock regions
  887. * @base: base of area to set node ID for
  888. * @size: size of area to set node ID for
  889. * @type: memblock type to set node ID for
  890. * @nid: node ID to set
  891. *
  892. * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
  893. * Regions which cross the area boundaries are split as necessary.
  894. *
  895. * RETURNS:
  896. * 0 on success, -errno on failure.
  897. */
  898. int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
  899. struct memblock_type *type, int nid)
  900. {
  901. int start_rgn, end_rgn;
  902. int i, ret;
  903. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  904. if (ret)
  905. return ret;
  906. for (i = start_rgn; i < end_rgn; i++)
  907. memblock_set_region_node(&type->regions[i], nid);
  908. memblock_merge_regions(type);
  909. return 0;
  910. }
  911. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  912. static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
  913. phys_addr_t align, phys_addr_t start,
  914. phys_addr_t end, int nid)
  915. {
  916. phys_addr_t found;
  917. if (!align)
  918. align = SMP_CACHE_BYTES;
  919. found = memblock_find_in_range_node(size, align, start, end, nid);
  920. if (found && !memblock_reserve(found, size)) {
  921. /*
  922. * The min_count is set to 0 so that memblock allocations are
  923. * never reported as leaks.
  924. */
  925. kmemleak_alloc(__va(found), size, 0, 0);
  926. return found;
  927. }
  928. return 0;
  929. }
  930. phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
  931. phys_addr_t start, phys_addr_t end)
  932. {
  933. return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
  934. }
  935. static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
  936. phys_addr_t align, phys_addr_t max_addr,
  937. int nid)
  938. {
  939. return memblock_alloc_range_nid(size, align, 0, max_addr, nid);
  940. }
  941. phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
  942. {
  943. return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
  944. }
  945. phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  946. {
  947. return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE);
  948. }
  949. phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  950. {
  951. phys_addr_t alloc;
  952. alloc = __memblock_alloc_base(size, align, max_addr);
  953. if (alloc == 0)
  954. panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
  955. (unsigned long long) size, (unsigned long long) max_addr);
  956. return alloc;
  957. }
  958. phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
  959. {
  960. return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  961. }
  962. phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
  963. {
  964. phys_addr_t res = memblock_alloc_nid(size, align, nid);
  965. if (res)
  966. return res;
  967. return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  968. }
  969. /**
  970. * memblock_virt_alloc_internal - allocate boot memory block
  971. * @size: size of memory block to be allocated in bytes
  972. * @align: alignment of the region and block's size
  973. * @min_addr: the lower bound of the memory region to allocate (phys address)
  974. * @max_addr: the upper bound of the memory region to allocate (phys address)
  975. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  976. *
  977. * The @min_addr limit is dropped if it can not be satisfied and the allocation
  978. * will fall back to memory below @min_addr. Also, allocation may fall back
  979. * to any node in the system if the specified node can not
  980. * hold the requested memory.
  981. *
  982. * The allocation is performed from memory region limited by
  983. * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
  984. *
  985. * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
  986. *
  987. * The phys address of allocated boot memory block is converted to virtual and
  988. * allocated memory is reset to 0.
  989. *
  990. * In addition, function sets the min_count to 0 using kmemleak_alloc for
  991. * allocated boot memory block, so that it is never reported as leaks.
  992. *
  993. * RETURNS:
  994. * Virtual address of allocated memory block on success, NULL on failure.
  995. */
  996. static void * __init memblock_virt_alloc_internal(
  997. phys_addr_t size, phys_addr_t align,
  998. phys_addr_t min_addr, phys_addr_t max_addr,
  999. int nid)
  1000. {
  1001. phys_addr_t alloc;
  1002. void *ptr;
  1003. if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  1004. nid = NUMA_NO_NODE;
  1005. /*
  1006. * Detect any accidental use of these APIs after slab is ready, as at
  1007. * this moment memblock may be deinitialized already and its
  1008. * internal data may be destroyed (after execution of free_all_bootmem)
  1009. */
  1010. if (WARN_ON_ONCE(slab_is_available()))
  1011. return kzalloc_node(size, GFP_NOWAIT, nid);
  1012. if (!align)
  1013. align = SMP_CACHE_BYTES;
  1014. if (max_addr > memblock.current_limit)
  1015. max_addr = memblock.current_limit;
  1016. again:
  1017. alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
  1018. nid);
  1019. if (alloc)
  1020. goto done;
  1021. if (nid != NUMA_NO_NODE) {
  1022. alloc = memblock_find_in_range_node(size, align, min_addr,
  1023. max_addr, NUMA_NO_NODE);
  1024. if (alloc)
  1025. goto done;
  1026. }
  1027. if (min_addr) {
  1028. min_addr = 0;
  1029. goto again;
  1030. } else {
  1031. goto error;
  1032. }
  1033. done:
  1034. memblock_reserve(alloc, size);
  1035. ptr = phys_to_virt(alloc);
  1036. memset(ptr, 0, size);
  1037. /*
  1038. * The min_count is set to 0 so that bootmem allocated blocks
  1039. * are never reported as leaks. This is because many of these blocks
  1040. * are only referred via the physical address which is not
  1041. * looked up by kmemleak.
  1042. */
  1043. kmemleak_alloc(ptr, size, 0, 0);
  1044. return ptr;
  1045. error:
  1046. return NULL;
  1047. }
  1048. /**
  1049. * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
  1050. * @size: size of memory block to be allocated in bytes
  1051. * @align: alignment of the region and block's size
  1052. * @min_addr: the lower bound of the memory region from where the allocation
  1053. * is preferred (phys address)
  1054. * @max_addr: the upper bound of the memory region from where the allocation
  1055. * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
  1056. * allocate only from memory limited by memblock.current_limit value
  1057. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1058. *
  1059. * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
  1060. * additional debug information (including caller info), if enabled.
  1061. *
  1062. * RETURNS:
  1063. * Virtual address of allocated memory block on success, NULL on failure.
  1064. */
  1065. void * __init memblock_virt_alloc_try_nid_nopanic(
  1066. phys_addr_t size, phys_addr_t align,
  1067. phys_addr_t min_addr, phys_addr_t max_addr,
  1068. int nid)
  1069. {
  1070. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
  1071. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1072. (u64)max_addr, (void *)_RET_IP_);
  1073. return memblock_virt_alloc_internal(size, align, min_addr,
  1074. max_addr, nid);
  1075. }
  1076. /**
  1077. * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
  1078. * @size: size of memory block to be allocated in bytes
  1079. * @align: alignment of the region and block's size
  1080. * @min_addr: the lower bound of the memory region from where the allocation
  1081. * is preferred (phys address)
  1082. * @max_addr: the upper bound of the memory region from where the allocation
  1083. * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
  1084. * allocate only from memory limited by memblock.current_limit value
  1085. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1086. *
  1087. * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
  1088. * which provides debug information (including caller info), if enabled,
  1089. * and panics if the request can not be satisfied.
  1090. *
  1091. * RETURNS:
  1092. * Virtual address of allocated memory block on success, NULL on failure.
  1093. */
  1094. void * __init memblock_virt_alloc_try_nid(
  1095. phys_addr_t size, phys_addr_t align,
  1096. phys_addr_t min_addr, phys_addr_t max_addr,
  1097. int nid)
  1098. {
  1099. void *ptr;
  1100. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
  1101. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1102. (u64)max_addr, (void *)_RET_IP_);
  1103. ptr = memblock_virt_alloc_internal(size, align,
  1104. min_addr, max_addr, nid);
  1105. if (ptr)
  1106. return ptr;
  1107. panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
  1108. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1109. (u64)max_addr);
  1110. return NULL;
  1111. }
  1112. /**
  1113. * __memblock_free_early - free boot memory block
  1114. * @base: phys starting address of the boot memory block
  1115. * @size: size of the boot memory block in bytes
  1116. *
  1117. * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
  1118. * The freeing memory will not be released to the buddy allocator.
  1119. */
  1120. void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
  1121. {
  1122. memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
  1123. __func__, (u64)base, (u64)base + size - 1,
  1124. (void *)_RET_IP_);
  1125. kmemleak_free_part(__va(base), size);
  1126. memblock_remove_range(&memblock.reserved, base, size);
  1127. }
  1128. /*
  1129. * __memblock_free_late - free bootmem block pages directly to buddy allocator
  1130. * @addr: phys starting address of the boot memory block
  1131. * @size: size of the boot memory block in bytes
  1132. *
  1133. * This is only useful when the bootmem allocator has already been torn
  1134. * down, but we are still initializing the system. Pages are released directly
  1135. * to the buddy allocator, no bootmem metadata is updated because it is gone.
  1136. */
  1137. void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
  1138. {
  1139. u64 cursor, end;
  1140. memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
  1141. __func__, (u64)base, (u64)base + size - 1,
  1142. (void *)_RET_IP_);
  1143. kmemleak_free_part(__va(base), size);
  1144. cursor = PFN_UP(base);
  1145. end = PFN_DOWN(base + size);
  1146. for (; cursor < end; cursor++) {
  1147. __free_pages_bootmem(pfn_to_page(cursor), 0);
  1148. totalram_pages++;
  1149. }
  1150. }
  1151. /*
  1152. * Remaining API functions
  1153. */
  1154. phys_addr_t __init memblock_phys_mem_size(void)
  1155. {
  1156. return memblock.memory.total_size;
  1157. }
  1158. phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
  1159. {
  1160. unsigned long pages = 0;
  1161. struct memblock_region *r;
  1162. unsigned long start_pfn, end_pfn;
  1163. for_each_memblock(memory, r) {
  1164. start_pfn = memblock_region_memory_base_pfn(r);
  1165. end_pfn = memblock_region_memory_end_pfn(r);
  1166. start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
  1167. end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
  1168. pages += end_pfn - start_pfn;
  1169. }
  1170. return PFN_PHYS(pages);
  1171. }
  1172. /* lowest address */
  1173. phys_addr_t __init_memblock memblock_start_of_DRAM(void)
  1174. {
  1175. return memblock.memory.regions[0].base;
  1176. }
  1177. phys_addr_t __init_memblock memblock_end_of_DRAM(void)
  1178. {
  1179. int idx = memblock.memory.cnt - 1;
  1180. return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
  1181. }
  1182. void __init memblock_enforce_memory_limit(phys_addr_t limit)
  1183. {
  1184. phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
  1185. struct memblock_region *r;
  1186. if (!limit)
  1187. return;
  1188. /* find out max address */
  1189. for_each_memblock(memory, r) {
  1190. if (limit <= r->size) {
  1191. max_addr = r->base + limit;
  1192. break;
  1193. }
  1194. limit -= r->size;
  1195. }
  1196. /* truncate both memory and reserved regions */
  1197. memblock_remove_range(&memblock.memory, max_addr,
  1198. (phys_addr_t)ULLONG_MAX);
  1199. memblock_remove_range(&memblock.reserved, max_addr,
  1200. (phys_addr_t)ULLONG_MAX);
  1201. }
  1202. static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
  1203. {
  1204. unsigned int left = 0, right = type->cnt;
  1205. do {
  1206. unsigned int mid = (right + left) / 2;
  1207. if (addr < type->regions[mid].base)
  1208. right = mid;
  1209. else if (addr >= (type->regions[mid].base +
  1210. type->regions[mid].size))
  1211. left = mid + 1;
  1212. else
  1213. return mid;
  1214. } while (left < right);
  1215. return -1;
  1216. }
  1217. int __init memblock_is_reserved(phys_addr_t addr)
  1218. {
  1219. return memblock_search(&memblock.reserved, addr) != -1;
  1220. }
  1221. int __init_memblock memblock_is_memory(phys_addr_t addr)
  1222. {
  1223. return memblock_search(&memblock.memory, addr) != -1;
  1224. }
  1225. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1226. int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
  1227. unsigned long *start_pfn, unsigned long *end_pfn)
  1228. {
  1229. struct memblock_type *type = &memblock.memory;
  1230. int mid = memblock_search(type, PFN_PHYS(pfn));
  1231. if (mid == -1)
  1232. return -1;
  1233. *start_pfn = PFN_DOWN(type->regions[mid].base);
  1234. *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
  1235. return type->regions[mid].nid;
  1236. }
  1237. #endif
  1238. /**
  1239. * memblock_is_region_memory - check if a region is a subset of memory
  1240. * @base: base of region to check
  1241. * @size: size of region to check
  1242. *
  1243. * Check if the region [@base, @base+@size) is a subset of a memory block.
  1244. *
  1245. * RETURNS:
  1246. * 0 if false, non-zero if true
  1247. */
  1248. int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
  1249. {
  1250. int idx = memblock_search(&memblock.memory, base);
  1251. phys_addr_t end = base + memblock_cap_size(base, &size);
  1252. if (idx == -1)
  1253. return 0;
  1254. return memblock.memory.regions[idx].base <= base &&
  1255. (memblock.memory.regions[idx].base +
  1256. memblock.memory.regions[idx].size) >= end;
  1257. }
  1258. /**
  1259. * memblock_is_region_reserved - check if a region intersects reserved memory
  1260. * @base: base of region to check
  1261. * @size: size of region to check
  1262. *
  1263. * Check if the region [@base, @base+@size) intersects a reserved memory block.
  1264. *
  1265. * RETURNS:
  1266. * 0 if false, non-zero if true
  1267. */
  1268. int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
  1269. {
  1270. memblock_cap_size(base, &size);
  1271. return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
  1272. }
  1273. void __init_memblock memblock_trim_memory(phys_addr_t align)
  1274. {
  1275. phys_addr_t start, end, orig_start, orig_end;
  1276. struct memblock_region *r;
  1277. for_each_memblock(memory, r) {
  1278. orig_start = r->base;
  1279. orig_end = r->base + r->size;
  1280. start = round_up(orig_start, align);
  1281. end = round_down(orig_end, align);
  1282. if (start == orig_start && end == orig_end)
  1283. continue;
  1284. if (start < end) {
  1285. r->base = start;
  1286. r->size = end - start;
  1287. } else {
  1288. memblock_remove_region(&memblock.memory,
  1289. r - memblock.memory.regions);
  1290. r--;
  1291. }
  1292. }
  1293. }
  1294. void __init_memblock memblock_set_current_limit(phys_addr_t limit)
  1295. {
  1296. memblock.current_limit = limit;
  1297. }
  1298. phys_addr_t __init_memblock memblock_get_current_limit(void)
  1299. {
  1300. return memblock.current_limit;
  1301. }
  1302. static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
  1303. {
  1304. unsigned long long base, size;
  1305. unsigned long flags;
  1306. int i;
  1307. pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
  1308. for (i = 0; i < type->cnt; i++) {
  1309. struct memblock_region *rgn = &type->regions[i];
  1310. char nid_buf[32] = "";
  1311. base = rgn->base;
  1312. size = rgn->size;
  1313. flags = rgn->flags;
  1314. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1315. if (memblock_get_region_node(rgn) != MAX_NUMNODES)
  1316. snprintf(nid_buf, sizeof(nid_buf), " on node %d",
  1317. memblock_get_region_node(rgn));
  1318. #endif
  1319. pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
  1320. name, i, base, base + size - 1, size, nid_buf, flags);
  1321. }
  1322. }
  1323. void __init_memblock __memblock_dump_all(void)
  1324. {
  1325. pr_info("MEMBLOCK configuration:\n");
  1326. pr_info(" memory size = %#llx reserved size = %#llx\n",
  1327. (unsigned long long)memblock.memory.total_size,
  1328. (unsigned long long)memblock.reserved.total_size);
  1329. memblock_dump(&memblock.memory, "memory");
  1330. memblock_dump(&memblock.reserved, "reserved");
  1331. }
  1332. void __init memblock_allow_resize(void)
  1333. {
  1334. memblock_can_resize = 1;
  1335. }
  1336. static int __init early_memblock(char *p)
  1337. {
  1338. if (p && strstr(p, "debug"))
  1339. memblock_debug = 1;
  1340. return 0;
  1341. }
  1342. early_param("memblock", early_memblock);
  1343. #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
  1344. static int memblock_debug_show(struct seq_file *m, void *private)
  1345. {
  1346. struct memblock_type *type = m->private;
  1347. struct memblock_region *reg;
  1348. int i;
  1349. for (i = 0; i < type->cnt; i++) {
  1350. reg = &type->regions[i];
  1351. seq_printf(m, "%4d: ", i);
  1352. if (sizeof(phys_addr_t) == 4)
  1353. seq_printf(m, "0x%08lx..0x%08lx\n",
  1354. (unsigned long)reg->base,
  1355. (unsigned long)(reg->base + reg->size - 1));
  1356. else
  1357. seq_printf(m, "0x%016llx..0x%016llx\n",
  1358. (unsigned long long)reg->base,
  1359. (unsigned long long)(reg->base + reg->size - 1));
  1360. }
  1361. return 0;
  1362. }
  1363. static int memblock_debug_open(struct inode *inode, struct file *file)
  1364. {
  1365. return single_open(file, memblock_debug_show, inode->i_private);
  1366. }
  1367. static const struct file_operations memblock_debug_fops = {
  1368. .open = memblock_debug_open,
  1369. .read = seq_read,
  1370. .llseek = seq_lseek,
  1371. .release = single_release,
  1372. };
  1373. static int __init memblock_init_debugfs(void)
  1374. {
  1375. struct dentry *root = debugfs_create_dir("memblock", NULL);
  1376. if (!root)
  1377. return -ENXIO;
  1378. debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
  1379. debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
  1380. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  1381. debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
  1382. #endif
  1383. return 0;
  1384. }
  1385. __initcall(memblock_init_debugfs);
  1386. #endif /* CONFIG_DEBUG_FS */