m4u_mva.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. #include <linux/spinlock.h>
  2. #include "m4u_priv.h"
  3. /* ((va&0xfff)+size+0xfff)>>12 */
  4. #define mva_pageOffset(mva) ((mva)&0xfff)
  5. #define MVA_BLOCK_SIZE_ORDER 20 /* 1M */
  6. #define MVA_MAX_BLOCK_NR 4095 /* 4GB */
  7. #define MVA_BLOCK_SIZE (1<<MVA_BLOCK_SIZE_ORDER) /* 0x40000 */
  8. #define MVA_BLOCK_ALIGN_MASK (MVA_BLOCK_SIZE-1) /* 0x3ffff */
  9. #define MVA_BLOCK_NR_MASK (MVA_MAX_BLOCK_NR) /* 0xfff */
  10. #define MVA_BUSY_MASK (1<<15) /* 0x8000 */
  11. #define MVA_IS_BUSY(index) ((mvaGraph[index]&MVA_BUSY_MASK) != 0)
  12. #define MVA_SET_BUSY(index) (mvaGraph[index] |= MVA_BUSY_MASK)
  13. #define MVA_SET_FREE(index) (mvaGraph[index] & (~MVA_BUSY_MASK))
  14. #define MVA_GET_NR(index) (mvaGraph[index] & MVA_BLOCK_NR_MASK)
  15. #define MVAGRAPH_INDEX(mva) (mva>>MVA_BLOCK_SIZE_ORDER)
  16. static short mvaGraph[MVA_MAX_BLOCK_NR + 1];
  17. static void *mvaInfoGraph[MVA_MAX_BLOCK_NR + 1];
  18. static DEFINE_SPINLOCK(gMvaGraph_lock);
  19. void m4u_mvaGraph_init(void *priv_reserve)
  20. {
  21. unsigned long irq_flags;
  22. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  23. memset(mvaGraph, 0, sizeof(short) * (MVA_MAX_BLOCK_NR + 1));
  24. memset(mvaInfoGraph, 0, sizeof(void *) * (MVA_MAX_BLOCK_NR + 1));
  25. mvaGraph[0] = 1 | MVA_BUSY_MASK;
  26. mvaInfoGraph[0] = priv_reserve;
  27. mvaGraph[1] = MVA_MAX_BLOCK_NR;
  28. mvaInfoGraph[1] = priv_reserve;
  29. mvaGraph[MVA_MAX_BLOCK_NR] = MVA_MAX_BLOCK_NR;
  30. mvaInfoGraph[MVA_MAX_BLOCK_NR] = priv_reserve;
  31. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  32. }
  33. void m4u_mvaGraph_dump_raw(void)
  34. {
  35. int i;
  36. unsigned long irq_flags;
  37. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  38. M4ULOG_HIGH("[M4U_K] dump raw data of mvaGraph:============>\n");
  39. for (i = 0; i < MVA_MAX_BLOCK_NR + 1; i++)
  40. M4ULOG_HIGH("0x%4x: 0x%08x\n", i, mvaGraph[i]);
  41. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  42. }
  43. void m4u_mvaGraph_dump(void)
  44. {
  45. unsigned int addr = 0, size = 0;
  46. short index = 1, nr = 0;
  47. int i, max_bit, is_busy;
  48. short frag[12] = { 0 };
  49. short nr_free = 0, nr_alloc = 0;
  50. unsigned long irq_flags;
  51. M4ULOG_HIGH("[M4U_K] mva allocation info dump:====================>\n");
  52. M4ULOG_HIGH("start size blocknum busy\n");
  53. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  54. for (index = 1; index < MVA_MAX_BLOCK_NR + 1; index += nr) {
  55. addr = index << MVA_BLOCK_SIZE_ORDER;
  56. nr = MVA_GET_NR(index);
  57. size = nr << MVA_BLOCK_SIZE_ORDER;
  58. if (MVA_IS_BUSY(index)) {
  59. is_busy = 1;
  60. nr_alloc += nr;
  61. } else { /* mva region is free */
  62. is_busy = 0;
  63. nr_free += nr;
  64. max_bit = 0;
  65. for (i = 0; i < 12; i++) {
  66. if (nr & (1 << i))
  67. max_bit = i;
  68. }
  69. frag[max_bit]++;
  70. }
  71. M4ULOG_HIGH("0x%08x 0x%08x %4d %d\n", addr, size, nr, is_busy);
  72. }
  73. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  74. M4ULOG_HIGH("\n");
  75. M4ULOG_HIGH("[M4U_K] mva alloc summary: (unit: blocks)========================>\n");
  76. M4ULOG_HIGH("free: %d , alloc: %d, total: %d\n", nr_free, nr_alloc, nr_free + nr_alloc);
  77. M4ULOG_HIGH("[M4U_K] free region fragments in 2^x blocks unit:===============\n");
  78. M4ULOG_HIGH(" 0 1 2 3 4 5 6 7 8 9 10 11\n");
  79. M4ULOG_HIGH("%4d %4d %4d %4d %4d %4d %4d %4d %4d %4d %4d %4d\n",
  80. frag[0], frag[1], frag[2], frag[3], frag[4], frag[5], frag[6],
  81. frag[7], frag[8], frag[9], frag[10], frag[11]);
  82. M4ULOG_HIGH("[M4U_K] mva alloc dump done=========================<\n");
  83. }
  84. void *mva_get_priv_ext(unsigned int mva)
  85. {
  86. void *priv = NULL;
  87. int index;
  88. unsigned long irq_flags;
  89. index = MVAGRAPH_INDEX(mva);
  90. if (index == 0 || index > MVA_MAX_BLOCK_NR) {
  91. M4UMSG("mvaGraph index is 0. mva=0x%x\n", mva);
  92. return NULL;
  93. }
  94. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  95. /* find prev head/tail of this region */
  96. while (mvaGraph[index] == 0)
  97. index--;
  98. if (MVA_IS_BUSY(index))
  99. priv = mvaInfoGraph[index];
  100. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  101. return priv;
  102. }
  103. int mva_foreach_priv(mva_buf_fn_t *fn, void *data)
  104. {
  105. short index = 1, nr = 0;
  106. unsigned int mva;
  107. void *priv;
  108. unsigned long irq_flags;
  109. int ret;
  110. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  111. for (index = 1; index < MVA_MAX_BLOCK_NR + 1; index += nr) {
  112. mva = index << MVA_BLOCK_SIZE_ORDER;
  113. nr = MVA_GET_NR(index);
  114. if (MVA_IS_BUSY(index)) {
  115. priv = mvaInfoGraph[index];
  116. ret = fn(priv, mva, mva + nr * MVA_BLOCK_SIZE, data);
  117. if (ret)
  118. break;
  119. }
  120. }
  121. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  122. return 0;
  123. }
  124. unsigned int get_first_valid_mva(void)
  125. {
  126. short index = 1, nr = 0;
  127. unsigned int mva;
  128. void *priv;
  129. unsigned long irq_flags;
  130. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  131. for (index = 1; index < MVA_MAX_BLOCK_NR + 1; index += nr) {
  132. mva = index << MVA_BLOCK_SIZE_ORDER;
  133. nr = MVA_GET_NR(index);
  134. if (MVA_IS_BUSY(index)) {
  135. priv = mvaInfoGraph[index];
  136. break;
  137. }
  138. }
  139. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  140. return mva;
  141. }
  142. void *mva_get_priv(unsigned int mva)
  143. {
  144. void *priv = NULL;
  145. int index;
  146. unsigned long irq_flags;
  147. index = MVAGRAPH_INDEX(mva);
  148. if (index == 0 || index > MVA_MAX_BLOCK_NR) {
  149. M4UMSG("mvaGraph index is 0. mva=0x%x\n", mva);
  150. return NULL;
  151. }
  152. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  153. if (MVA_IS_BUSY(index))
  154. priv = mvaInfoGraph[index];
  155. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  156. return priv;
  157. }
  158. unsigned int m4u_do_mva_alloc(unsigned long va, unsigned int size, void *priv)
  159. {
  160. short s, end;
  161. short new_start, new_end;
  162. short nr = 0;
  163. unsigned int mvaRegionStart;
  164. unsigned long startRequire, endRequire, sizeRequire;
  165. unsigned long irq_flags;
  166. if (size == 0)
  167. return 0;
  168. /* ----------------------------------------------------- */
  169. /* calculate mva block number */
  170. startRequire = va & (~M4U_PAGE_MASK);
  171. endRequire = (va + size - 1) | M4U_PAGE_MASK;
  172. sizeRequire = endRequire - startRequire + 1;
  173. nr = (sizeRequire + MVA_BLOCK_ALIGN_MASK) >> MVA_BLOCK_SIZE_ORDER;
  174. /* (sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0); */
  175. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  176. /* ----------------------------------------------- */
  177. /* find first match free region */
  178. for (s = 1; (s < (MVA_MAX_BLOCK_NR + 1)) && (mvaGraph[s] < nr); s += (mvaGraph[s] & MVA_BLOCK_NR_MASK))
  179. ;
  180. if (s > MVA_MAX_BLOCK_NR) {
  181. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  182. M4UMSG("mva_alloc error: no available MVA region for %d blocks!\n", nr);
  183. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_M4U_ERROR], MMProfileFlagPulse, size, s);
  184. return 0;
  185. }
  186. /* ----------------------------------------------- */
  187. /* alloc a mva region */
  188. end = s + mvaGraph[s] - 1;
  189. if (unlikely(nr == mvaGraph[s])) {
  190. MVA_SET_BUSY(s);
  191. MVA_SET_BUSY(end);
  192. mvaInfoGraph[s] = priv;
  193. mvaInfoGraph[end] = priv;
  194. } else {
  195. new_end = s + nr - 1;
  196. new_start = new_end + 1;
  197. /* note: new_start may equals to end */
  198. mvaGraph[new_start] = (mvaGraph[s] - nr);
  199. mvaGraph[new_end] = nr | MVA_BUSY_MASK;
  200. mvaGraph[s] = mvaGraph[new_end];
  201. mvaGraph[end] = mvaGraph[new_start];
  202. mvaInfoGraph[s] = priv;
  203. mvaInfoGraph[new_end] = priv;
  204. }
  205. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  206. mvaRegionStart = (unsigned int)s;
  207. return (mvaRegionStart << MVA_BLOCK_SIZE_ORDER) + mva_pageOffset(va);
  208. }
  209. unsigned int m4u_do_mva_alloc_fix(unsigned int mva, unsigned int size, void *priv)
  210. {
  211. short nr = 0;
  212. unsigned int startRequire, endRequire, sizeRequire;
  213. unsigned long irq_flags;
  214. short startIdx = mva >> MVA_BLOCK_SIZE_ORDER;
  215. short endIdx;
  216. short region_start, region_end;
  217. if (size == 0)
  218. return 0;
  219. if (startIdx == 0 || startIdx > MVA_MAX_BLOCK_NR) {
  220. M4UMSG("mvaGraph index is 0. index=0x%x\n", startIdx);
  221. return 0;
  222. }
  223. /* ----------------------------------------------------- */
  224. /* calculate mva block number */
  225. startRequire = mva & (~MVA_BLOCK_ALIGN_MASK);
  226. endRequire = (mva + size - 1) | MVA_BLOCK_ALIGN_MASK;
  227. sizeRequire = endRequire - startRequire + 1;
  228. nr = (sizeRequire + MVA_BLOCK_ALIGN_MASK) >> MVA_BLOCK_SIZE_ORDER;
  229. /* (sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0); */
  230. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  231. region_start = startIdx;
  232. /* find prev head of this region */
  233. while (mvaGraph[region_start] == 0)
  234. region_start--;
  235. if (MVA_IS_BUSY(region_start) || (MVA_GET_NR(region_start) < nr + startIdx - region_start)) {
  236. M4UMSG("mva is inuse index=0x%x, mvaGraph=0x%x\n", region_start, mvaGraph[region_start]);
  237. mva = 0;
  238. goto out;
  239. }
  240. /* carveout startIdx~startIdx+nr-1 out of region_start */
  241. endIdx = startIdx + nr - 1;
  242. region_end = region_start + MVA_GET_NR(region_start) - 1;
  243. if (startIdx == region_start && endIdx == region_end) {
  244. MVA_SET_BUSY(startIdx);
  245. MVA_SET_BUSY(endIdx);
  246. } else if (startIdx == region_start) {
  247. mvaGraph[startIdx] = nr | MVA_BUSY_MASK;
  248. mvaGraph[endIdx] = mvaGraph[startIdx];
  249. mvaGraph[endIdx + 1] = region_end - endIdx;
  250. mvaGraph[region_end] = mvaGraph[endIdx + 1];
  251. } else if (endIdx == region_end) {
  252. mvaGraph[region_start] = startIdx - region_start;
  253. mvaGraph[startIdx - 1] = mvaGraph[region_start];
  254. mvaGraph[startIdx] = nr | MVA_BUSY_MASK;
  255. mvaGraph[endIdx] = mvaGraph[startIdx];
  256. } else {
  257. mvaGraph[region_start] = startIdx - region_start;
  258. mvaGraph[startIdx - 1] = mvaGraph[region_start];
  259. mvaGraph[startIdx] = nr | MVA_BUSY_MASK;
  260. mvaGraph[endIdx] = mvaGraph[startIdx];
  261. mvaGraph[endIdx + 1] = region_end - endIdx;
  262. mvaGraph[region_end] = mvaGraph[endIdx + 1];
  263. }
  264. mvaInfoGraph[startIdx] = priv;
  265. mvaInfoGraph[endIdx] = priv;
  266. out:
  267. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  268. return mva;
  269. }
  270. #define RightWrong(x) ((x) ? "correct" : "error")
  271. int m4u_do_mva_free(unsigned int mva, unsigned int size)
  272. {
  273. short startIdx = mva >> MVA_BLOCK_SIZE_ORDER;
  274. short nr = mvaGraph[startIdx] & MVA_BLOCK_NR_MASK;
  275. short endIdx = startIdx + nr - 1;
  276. unsigned int startRequire, endRequire, sizeRequire;
  277. short nrRequire;
  278. unsigned long irq_flags;
  279. spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
  280. /* -------------------------------- */
  281. /* check the input arguments */
  282. /* right condition: startIdx is not NULL && region is busy && right module && right size */
  283. startRequire = mva & (unsigned int)(~M4U_PAGE_MASK);
  284. endRequire = (mva + size - 1) | (unsigned int)M4U_PAGE_MASK;
  285. sizeRequire = endRequire - startRequire + 1;
  286. nrRequire = (sizeRequire + MVA_BLOCK_ALIGN_MASK) >> MVA_BLOCK_SIZE_ORDER;
  287. /* (sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0); */
  288. if (!(startIdx != 0 /* startIdx is not NULL */
  289. && MVA_IS_BUSY(startIdx)
  290. && (nr == nrRequire))) {
  291. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  292. M4UMSG("error to free mva========================>\n");
  293. M4UMSG("BufSize=%d(unit:0x%xBytes) (expect %d) [%s]\n",
  294. nrRequire, MVA_BLOCK_SIZE, nr, RightWrong(nrRequire == nr));
  295. M4UMSG("mva=0x%x, (IsBusy?)=%d (expect %d) [%s]\n",
  296. mva, MVA_IS_BUSY(startIdx), 1, RightWrong(MVA_IS_BUSY(startIdx)));
  297. m4u_mvaGraph_dump();
  298. /* m4u_mvaGraph_dump_raw(); */
  299. return -1;
  300. }
  301. mvaInfoGraph[startIdx] = NULL;
  302. mvaInfoGraph[endIdx] = NULL;
  303. /* -------------------------------- */
  304. /* merge with followed region */
  305. if ((endIdx + 1 <= MVA_MAX_BLOCK_NR) && (!MVA_IS_BUSY(endIdx + 1))) {
  306. nr += mvaGraph[endIdx + 1];
  307. mvaGraph[endIdx] = 0;
  308. mvaGraph[endIdx + 1] = 0;
  309. }
  310. /* -------------------------------- */
  311. /* merge with previous region */
  312. if ((startIdx - 1 > 0) && (!MVA_IS_BUSY(startIdx - 1))) {
  313. int pre_nr = mvaGraph[startIdx - 1];
  314. mvaGraph[startIdx] = 0;
  315. mvaGraph[startIdx - 1] = 0;
  316. startIdx -= pre_nr;
  317. nr += pre_nr;
  318. }
  319. /* -------------------------------- */
  320. /* set region flags */
  321. mvaGraph[startIdx] = nr;
  322. mvaGraph[startIdx + nr - 1] = nr;
  323. spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
  324. return 0;
  325. }