ccci_bm.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. #include <linux/skbuff.h>
  2. #include <linux/wait.h>
  3. #include <linux/sched.h>
  4. #include <linux/delay.h>
  5. #include <linux/module.h>
  6. #include <mt-plat/mt_ccci_common.h>
  7. #include "ccci_config.h"
  8. #include "ccci_bm.h"
  9. #ifdef CCCI_BM_TRACE
  10. #define CREATE_TRACE_POINTS
  11. #include "ccci_bm_events.h"
  12. #endif
  13. #define REQ_MAGIC_HEADER 0xF111F111
  14. #define REQ_MAGIC_FOOTER 0xF222F222
  15. #define SKB_MAGIC_HEADER 0xF333F333
  16. #define SKB_MAGIC_FOOTER 0xF444F444
  17. struct ccci_req_queue req_pool;
  18. struct ccci_skb_queue skb_pool_4K;
  19. struct ccci_skb_queue skb_pool_1_5K;
  20. struct ccci_skb_queue skb_pool_16;
  21. struct workqueue_struct *pool_reload_work_queue;
  22. #ifdef CCCI_BM_TRACE
  23. struct timer_list ccci_bm_stat_timer;
  24. void ccci_bm_stat_timer_func(unsigned long data)
  25. {
  26. trace_ccci_bm(req_pool.count, skb_pool_4K.skb_list.qlen, skb_pool_1_5K.skb_list.qlen,
  27. skb_pool_16.skb_list.qlen);
  28. mod_timer(&ccci_bm_stat_timer, jiffies + HZ / 2);
  29. }
  30. #endif
  31. #ifdef CCCI_WP_DEBUG
  32. #include <mt-plat/hw_watchpoint.h>
  33. static struct wp_event wp_event;
  34. static atomic_t hwp_enable = ATOMIC_INIT(0);
  35. static int my_wp_handler(phys_addr_t addr)
  36. {
  37. CCCI_INF_MSG(-1, BM, "[ccci/WP_LCH_DEBUG] access from 0x%p, call bug\n", (void *)addr);
  38. dump_stack();
  39. /*BUG();*/
  40. /* re-enable the watchpoint, since the auto-disable is not working */
  41. del_hw_watchpoint(&wp_event);
  42. #if 0
  43. wp_err = add_hw_watchpoint(&wp_event);
  44. if (wp_err != 0)
  45. /* error */
  46. CCCI_INF_MSG(-1, BM, "[mydebug]watchpoint init fail\n");
  47. else
  48. /* success */
  49. CCCI_INF_MSG(-1, BM, "[mydebug]watchpoint init done\n");
  50. #endif
  51. return 0;
  52. }
  53. /*
  54. static void disable_watchpoint(void)
  55. {
  56. if (atomic_read(&hwp_enable)) {
  57. del_hw_watchpoint(&wp_event);
  58. atomic_set(&hwp_enable, 0);
  59. }
  60. }
  61. */
  62. static void enable_watchpoint(void *address)
  63. {
  64. int wp_err;
  65. if (atomic_read(&hwp_enable) == 0) {
  66. init_wp_event(&wp_event, (phys_addr_t) address, (phys_addr_t) address,
  67. WP_EVENT_TYPE_WRITE, my_wp_handler);
  68. atomic_set(&hwp_enable, 1);
  69. wp_err = add_hw_watchpoint(&wp_event);
  70. if (wp_err)
  71. CCCI_INF_MSG(-1, BM, "[mydebug]watchpoint init fail,addr=%p\n", address);
  72. }
  73. }
  74. #endif
  75. #ifdef CCCI_MEM_BM_DEBUG
  76. static int ccci_skb_addr_checker(struct sk_buff *newsk)
  77. {
  78. unsigned long skb_addr_value;
  79. unsigned long queue16_addr_value;
  80. unsigned long queue1_5k_addr_value;
  81. unsigned long queue4k_addr_value;
  82. unsigned long req_pool_addr_value;
  83. skb_addr_value = (unsigned long)newsk;
  84. queue16_addr_value = (unsigned long)&skb_pool_16;
  85. queue1_5k_addr_value = (unsigned long)&skb_pool_1_5K;
  86. queue4k_addr_value = (unsigned long)&skb_pool_4K;
  87. req_pool_addr_value = (unsigned long)&req_pool;
  88. if ((skb_addr_value >= queue16_addr_value
  89. && skb_addr_value < queue16_addr_value + sizeof(struct ccci_skb_queue))
  90. ||
  91. (skb_addr_value >= queue1_5k_addr_value
  92. && skb_addr_value < queue1_5k_addr_value + sizeof(struct ccci_skb_queue))
  93. ||
  94. (skb_addr_value >= queue4k_addr_value
  95. && skb_addr_value < queue4k_addr_value + sizeof(struct ccci_skb_queue))
  96. ||
  97. (skb_addr_value >= req_pool_addr_value
  98. && skb_addr_value < req_pool_addr_value + sizeof(struct ccci_req_queue))
  99. ) {
  100. CCCI_INF_MSG(-1, BM, "Free wrong skb=%lx pointer in skb poool!\n", skb_addr_value);
  101. CCCI_INF_MSG(-1, BM, "skb=%lx, skb_pool_16=%lx, skb_pool_1_5K=%lx, skb_pool_4K=%lx, req_pool=%lx!\n",
  102. skb_addr_value, queue16_addr_value, queue1_5k_addr_value,
  103. queue4k_addr_value,
  104. req_pool_addr_value);
  105. return 1;
  106. }
  107. return 0;
  108. }
  109. void ccci_magic_checker(void)
  110. {
  111. if (req_pool.magic_header != REQ_MAGIC_HEADER || req_pool.magic_footer != REQ_MAGIC_FOOTER) {
  112. CCCI_INF_MSG(-1, BM, "req_pool magic error!\n");
  113. ccci_mem_dump(-1, &req_pool, sizeof(struct ccci_req_queue));
  114. dump_stack();
  115. }
  116. if (skb_pool_16.magic_header != SKB_MAGIC_HEADER || skb_pool_16.magic_footer != SKB_MAGIC_FOOTER) {
  117. CCCI_INF_MSG(-1, BM, "skb_pool_16 magic error!\n");
  118. ccci_mem_dump(-1, &skb_pool_16, sizeof(struct ccci_skb_queue));
  119. dump_stack();
  120. }
  121. if (skb_pool_1_5K.magic_header != SKB_MAGIC_HEADER || skb_pool_1_5K.magic_footer != SKB_MAGIC_FOOTER) {
  122. CCCI_INF_MSG(-1, BM, "skb_pool_1_5K magic error!\n");
  123. ccci_mem_dump(-1, &skb_pool_1_5K, sizeof(struct ccci_skb_queue));
  124. dump_stack();
  125. }
  126. if (skb_pool_4K.magic_header != SKB_MAGIC_HEADER || skb_pool_4K.magic_footer != SKB_MAGIC_FOOTER) {
  127. CCCI_INF_MSG(-1, BM, "skb_pool_4K magic error!\n");
  128. ccci_mem_dump(-1, &skb_pool_4K, sizeof(struct ccci_skb_queue));
  129. dump_stack();
  130. }
  131. }
  132. #endif
  133. static struct ccci_request *ccci_req_dequeue(struct ccci_req_queue *queue)
  134. {
  135. unsigned long flags;
  136. struct ccci_request *result = NULL;
  137. #ifdef CCCI_MEM_BM_DEBUG
  138. if (queue->magic_header != REQ_MAGIC_HEADER
  139. || queue->magic_footer != REQ_MAGIC_FOOTER) {
  140. ccci_mem_dump(-1, queue, sizeof(struct ccci_req_queue));
  141. dump_stack();
  142. }
  143. #endif
  144. spin_lock_irqsave(&queue->req_lock, flags);
  145. if (list_empty(&queue->req_list))
  146. goto out;
  147. result = list_first_entry(&queue->req_list, struct ccci_request, entry);
  148. if (result) {
  149. queue->count--;
  150. list_del(&result->entry);
  151. }
  152. out:
  153. spin_unlock_irqrestore(&queue->req_lock, flags);
  154. return result;
  155. }
  156. static void ccci_req_enqueue(struct ccci_req_queue *queue, struct ccci_request *req)
  157. {
  158. unsigned long flags;
  159. spin_lock_irqsave(&queue->req_lock, flags);
  160. ccci_request_struct_init(req);
  161. list_add_tail(&req->entry, &queue->req_list);
  162. queue->count++;
  163. spin_unlock_irqrestore(&queue->req_lock, flags);
  164. }
  165. static void ccci_req_queue_init(struct ccci_req_queue *queue)
  166. {
  167. int i;
  168. queue->magic_header = REQ_MAGIC_HEADER;
  169. queue->magic_footer = REQ_MAGIC_FOOTER;
  170. queue->max_len = BM_POOL_SIZE;
  171. INIT_LIST_HEAD(&queue->req_list);
  172. for (i = 0; i < queue->max_len; i++) {
  173. struct ccci_request *req = kmalloc(sizeof(struct ccci_request), GFP_KERNEL);
  174. ccci_request_struct_init(req);
  175. list_add_tail(&req->entry, &queue->req_list);
  176. }
  177. queue->count = queue->max_len;
  178. spin_lock_init(&queue->req_lock);
  179. init_waitqueue_head(&queue->req_wq);
  180. }
  181. static inline struct sk_buff *__alloc_skb_from_pool(int size)
  182. {
  183. struct sk_buff *skb = NULL;
  184. if (size > SKB_1_5K)
  185. skb = ccci_skb_dequeue(&skb_pool_4K);
  186. else if (size > SKB_16)
  187. skb = ccci_skb_dequeue(&skb_pool_1_5K);
  188. else if (size > 0)
  189. skb = ccci_skb_dequeue(&skb_pool_16);
  190. return skb;
  191. }
  192. static inline struct sk_buff *__alloc_skb_from_kernel(int size, gfp_t gfp_mask)
  193. {
  194. struct sk_buff *skb = NULL;
  195. if (size > SKB_1_5K)
  196. skb = __dev_alloc_skb(SKB_4K, gfp_mask);
  197. else if (size > SKB_16)
  198. skb = __dev_alloc_skb(SKB_1_5K, gfp_mask);
  199. else if (size > 0)
  200. skb = __dev_alloc_skb(SKB_16, gfp_mask);
  201. if (!skb)
  202. CCCI_ERR_MSG(-1, BM, "%ps alloc skb from kernel fail, size=%d\n", __builtin_return_address(0), size);
  203. return skb;
  204. }
  205. struct sk_buff *ccci_skb_dequeue(struct ccci_skb_queue *queue)
  206. {
  207. unsigned long flags;
  208. struct sk_buff *result;
  209. #ifdef CCCI_MEM_BM_DEBUG
  210. if (queue->magic_header != SKB_MAGIC_HEADER || queue->magic_footer != SKB_MAGIC_FOOTER) {
  211. CCCI_ERR_MSG(-1, BM,
  212. "ccci_skb_dequeue: queue=%lx, skb_pool_16=%lx, skb_pool_1_5K=%lx, skb_pool_4K=%lx, req_pool=%lx!\n",
  213. (unsigned long)queue, (unsigned long)&skb_pool_16, (unsigned long)&skb_pool_1_5K,
  214. (unsigned long)&skb_pool_4K,
  215. (unsigned long)&req_pool);
  216. ccci_mem_dump(-1, queue, sizeof(struct ccci_skb_queue));
  217. dump_stack();
  218. }
  219. #endif
  220. spin_lock_irqsave(&queue->skb_list.lock, flags);
  221. result = __skb_dequeue(&queue->skb_list);
  222. if (queue->pre_filled && queue->skb_list.qlen < queue->max_len / RELOAD_TH)
  223. queue_work(pool_reload_work_queue, &queue->reload_work);
  224. spin_unlock_irqrestore(&queue->skb_list.lock, flags);
  225. return result;
  226. }
  227. void ccci_skb_enqueue(struct ccci_skb_queue *queue, struct sk_buff *newsk)
  228. {
  229. unsigned long flags;
  230. spin_lock_irqsave(&queue->skb_list.lock, flags);
  231. if (queue->skb_list.qlen < queue->max_len) {
  232. __skb_queue_tail(&queue->skb_list, newsk);
  233. if (queue->skb_list.qlen > queue->max_history)
  234. queue->max_history = queue->skb_list.qlen;
  235. } else {
  236. #if 0
  237. if (queue->pre_filled) {
  238. CCCI_ERR_MSG(0, BM, "skb queue too long, max=%d\n", queue->max_len);
  239. #else
  240. if (1) {
  241. #endif
  242. #ifdef CCCI_MEM_BM_DEBUG
  243. if (ccci_skb_addr_checker(newsk)) {
  244. CCCI_INF_MSG(-1, BM, "ccci_skb_enqueue:ccci_skb_addr_checker failed!\n");
  245. ccci_mem_dump(-1, queue, sizeof(struct ccci_skb_queue));
  246. dump_stack();
  247. }
  248. #endif
  249. dev_kfree_skb_any(newsk);
  250. } else {
  251. __skb_queue_tail(&queue->skb_list, newsk);
  252. }
  253. }
  254. spin_unlock_irqrestore(&queue->skb_list.lock, flags);
  255. }
  256. void ccci_skb_queue_init(struct ccci_skb_queue *queue, unsigned int skb_size, unsigned int max_len,
  257. char fill_now)
  258. {
  259. int i;
  260. queue->magic_header = SKB_MAGIC_HEADER;
  261. queue->magic_footer = SKB_MAGIC_FOOTER;
  262. #ifdef CCCI_WP_DEBUG
  263. if (((unsigned long)queue) == ((unsigned long)(&skb_pool_16))) {
  264. CCCI_INF_MSG(-1, BM, "ccci_skb_queue_init: add hwp skb_pool_16.magic_footer=%p!\n",
  265. &queue->magic_footer);
  266. enable_watchpoint(&queue->magic_footer);
  267. }
  268. #endif
  269. skb_queue_head_init(&queue->skb_list);
  270. queue->max_len = max_len;
  271. if (fill_now) {
  272. for (i = 0; i < queue->max_len; i++) {
  273. struct sk_buff *skb = __alloc_skb_from_kernel(skb_size, GFP_KERNEL);
  274. if (skb != NULL)
  275. skb_queue_tail(&queue->skb_list, skb);
  276. }
  277. queue->pre_filled = 1;
  278. } else {
  279. queue->pre_filled = 0;
  280. }
  281. queue->max_history = 0;
  282. }
  283. /* may return NULL, caller should check, network should always use blocking as we do not want it consume our own pool */
  284. struct sk_buff *ccci_alloc_skb(int size, char from_pool, char blocking)
  285. {
  286. int count = 0;
  287. struct sk_buff *skb = NULL;
  288. #ifdef CCCI_MEM_BM_DEBUG
  289. ccci_magic_checker();
  290. #endif
  291. if (size > SKB_4K || size < 0)
  292. goto err_exit;
  293. if (from_pool) {
  294. slow_retry:
  295. skb = __alloc_skb_from_pool(size);
  296. if (unlikely(!skb && blocking)) {
  297. CCCI_INF_MSG(-1, BM, "skb pool is empty! size=%d (%d)\n", size, count++);
  298. msleep(100);
  299. goto slow_retry;
  300. }
  301. } else {
  302. if (blocking) {
  303. skb = __alloc_skb_from_kernel(size, GFP_KERNEL);
  304. } else {
  305. fast_retry:
  306. skb = __alloc_skb_from_kernel(size, GFP_ATOMIC);
  307. if (!skb && count++ < 20)
  308. goto fast_retry;
  309. }
  310. }
  311. err_exit:
  312. if (unlikely(!skb))
  313. CCCI_ERR_MSG(-1, BM, "%ps alloc skb fail, size=%d\n", __builtin_return_address(0), size);
  314. else
  315. CCCI_DBG_MSG(-1, BM, "%ps alloc skb %p, size=%d\n", __builtin_return_address(0), skb, size);
  316. return skb;
  317. }
  318. EXPORT_SYMBOL(ccci_alloc_skb);
  319. void ccci_free_skb(struct sk_buff *skb, DATA_POLICY policy)
  320. {
  321. CCCI_DBG_MSG(-1, BM, "%ps free skb %p, policy=%d, len=%d\n", __builtin_return_address(0),
  322. skb, policy, skb_size(skb));
  323. switch (policy) {
  324. case RECYCLE:
  325. /* 1. reset sk_buff (take __alloc_skb as ref.) */
  326. skb->data = skb->head;
  327. skb->len = 0;
  328. skb_reset_tail_pointer(skb);
  329. /* 2. enqueue */
  330. if (skb_size(skb) < SKB_1_5K)
  331. ccci_skb_enqueue(&skb_pool_16, skb);
  332. else if (skb_size(skb) < SKB_4K)
  333. ccci_skb_enqueue(&skb_pool_1_5K, skb);
  334. else
  335. ccci_skb_enqueue(&skb_pool_4K, skb);
  336. break;
  337. case FREE:
  338. #ifdef CCCI_MEM_BM_DEBUG
  339. if (ccci_skb_addr_checker(skb)) {
  340. CCCI_INF_MSG(-1, BM, "ccci_skb_addr_checker failed\n");
  341. dump_stack();
  342. }
  343. #endif
  344. dev_kfree_skb_any(skb);
  345. break;
  346. case NOOP:
  347. default:
  348. break;
  349. };
  350. }
  351. EXPORT_SYMBOL(ccci_free_skb);
  352. static void __4K_reload_work(struct work_struct *work)
  353. {
  354. struct sk_buff *skb;
  355. CCCI_DBG_MSG(-1, BM, "refill 4KB skb pool\n");
  356. while (skb_pool_4K.skb_list.qlen < SKB_POOL_SIZE_4K) {
  357. skb = __alloc_skb_from_kernel(SKB_4K, GFP_KERNEL);
  358. if (skb)
  359. skb_queue_tail(&skb_pool_4K.skb_list, skb);
  360. else
  361. CCCI_ERR_MSG(-1, BM, "fail to reload 4KB pool\n");
  362. }
  363. }
  364. static void __1_5K_reload_work(struct work_struct *work)
  365. {
  366. struct sk_buff *skb;
  367. CCCI_DBG_MSG(-1, BM, "refill 1.5KB skb pool\n");
  368. while (skb_pool_1_5K.skb_list.qlen < SKB_POOL_SIZE_1_5K) {
  369. skb = __alloc_skb_from_kernel(SKB_1_5K, GFP_KERNEL);
  370. if (skb)
  371. skb_queue_tail(&skb_pool_1_5K.skb_list, skb);
  372. else
  373. CCCI_ERR_MSG(-1, BM, "fail to reload 1.5KB pool\n");
  374. }
  375. }
  376. static void __16_reload_work(struct work_struct *work)
  377. {
  378. struct sk_buff *skb;
  379. CCCI_DBG_MSG(-1, BM, "refill 16B skb pool\n");
  380. while (skb_pool_16.skb_list.qlen < SKB_POOL_SIZE_16) {
  381. skb = __alloc_skb_from_kernel(SKB_16, GFP_KERNEL);
  382. if (skb)
  383. skb_queue_tail(&skb_pool_16.skb_list, skb);
  384. else
  385. CCCI_ERR_MSG(-1, BM, "fail to reload 16B pool\n");
  386. }
  387. }
  388. /*
  389. * a write operation may block at 3 stages:
  390. * 1. ccci_alloc_req
  391. * 2. wait until the queue has available slot (threshold check)
  392. * 3. wait until the SDIO transfer is complete --> abandoned, see the reason below.
  393. * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, waiting on @wq.
  394. * NULL is returned if no available skb, even when you set blk1=1.
  395. *
  396. * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
  397. * be completed. this is because request will be recycled and its state will be reset, so if a request
  398. * is completed and then used again, the poor guy who is waiting for it may never see the state
  399. * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
  400. */
  401. struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
  402. {
  403. struct ccci_request *req = NULL;
  404. #ifdef CCCI_MEM_BM_DEBUG
  405. ccci_magic_checker();
  406. #endif
  407. retry:
  408. req = ccci_req_dequeue(&req_pool);
  409. if (req) {
  410. if (size > 0) {
  411. req->skb = ccci_alloc_skb(size, 1, blk1);
  412. req->policy = RECYCLE;
  413. if (req->skb)
  414. CCCI_DBG_MSG(-1, BM, "alloc ok, req=%p skb=%p, len=%d\n", req, req->skb,
  415. skb_size(req->skb));
  416. } else {
  417. req->skb = NULL;
  418. req->policy = NOOP;
  419. }
  420. req->blocking = blk2;
  421. } else {
  422. if (blk1) {
  423. wait_event_interruptible(req_pool.req_wq, (req_pool.count > 0));
  424. goto retry;
  425. }
  426. CCCI_INF_MSG(-1, BM, "fail to alloc req for %ps, no retry\n", __builtin_return_address(0));
  427. }
  428. if (unlikely(size > 0 && req && !req->skb)) {
  429. CCCI_ERR_MSG(-1, BM, "fail to alloc skb for %ps, size=%d\n", __builtin_return_address(0), size);
  430. req->policy = NOOP;
  431. ccci_free_req(req);
  432. req = NULL;
  433. }
  434. return req;
  435. }
  436. EXPORT_SYMBOL(ccci_alloc_req);
  437. void ccci_free_req(struct ccci_request *req)
  438. {
  439. CCCI_DBG_MSG(-1, BM, "%ps free req=%p, policy=%d, skb=%p\n", __builtin_return_address(0),
  440. req, req->policy, req->skb);
  441. if (req->skb) {
  442. ccci_free_skb(req->skb, req->policy);
  443. req->skb = NULL;
  444. }
  445. if (req->entry.next != LIST_POISON1 || req->entry.prev != LIST_POISON2) {
  446. CCCI_ERR_MSG(-1, BM, "req %p entry not deleted yet, from %ps\n", req, __builtin_return_address(0));
  447. list_del(&req->entry);
  448. }
  449. ccci_req_enqueue(&req_pool, req);
  450. wake_up_all(&req_pool.req_wq);
  451. }
  452. EXPORT_SYMBOL(ccci_free_req);
  453. void ccci_mem_dump(int md_id, void *start_addr, int len)
  454. {
  455. unsigned int *curr_p = (unsigned int *)start_addr;
  456. unsigned char *curr_ch_p;
  457. int _16_fix_num = len / 16;
  458. int tail_num = len % 16;
  459. char buf[16];
  460. int i, j;
  461. if (NULL == curr_p) {
  462. CCCI_INF_MSG(md_id, BM, "NULL point to dump!\n");
  463. return;
  464. }
  465. if (0 == len) {
  466. CCCI_INF_MSG(md_id, BM, "Not need to dump\n");
  467. return;
  468. }
  469. CCCI_EXP_INF_MSG(md_id, BM, "Base: %p\n", start_addr);
  470. /* Fix section */
  471. for (i = 0; i < _16_fix_num; i++) {
  472. CCCI_DUMP_MSG2(md_id, BM, "%03X: %08X %08X %08X %08X\n",
  473. i * 16, *curr_p, *(curr_p + 1), *(curr_p + 2), *(curr_p + 3));
  474. curr_p += 4;
  475. }
  476. /* Tail section */
  477. if (tail_num > 0) {
  478. curr_ch_p = (unsigned char *)curr_p;
  479. for (j = 0; j < tail_num; j++) {
  480. buf[j] = *curr_ch_p;
  481. curr_ch_p++;
  482. }
  483. for (; j < 16; j++)
  484. buf[j] = 0;
  485. curr_p = (unsigned int *)buf;
  486. CCCI_DUMP_MSG2(md_id, BM, "%03X: %08X %08X %08X %08X\n",
  487. i * 16, *curr_p, *(curr_p + 1), *(curr_p + 2), *(curr_p + 3));
  488. }
  489. }
  490. EXPORT_SYMBOL(ccci_mem_dump);
  491. void ccci_cmpt_mem_dump(int md_id, void *start_addr, int len)
  492. {
  493. #define DUMP_LEN 80
  494. unsigned int *curr_p = (unsigned int *)start_addr;
  495. unsigned char *curr_ch_p;
  496. int fix_num = len / DUMP_LEN;
  497. int tail_num = len % DUMP_LEN;
  498. char buf[DUMP_LEN];
  499. int i, j;
  500. if (NULL == curr_p) {
  501. CCCI_INF_MSG(md_id, BM, "NULL point to dump!\n");
  502. return;
  503. }
  504. if (0 == len) {
  505. CCCI_INF_MSG(md_id, BM, "Not need to dump\n");
  506. return;
  507. }
  508. /* Fix section */
  509. for (i = 0; i < fix_num; i++) {
  510. CCCI_INF_MSG(md_id, BM, "%03X: %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X\n",
  511. i * DUMP_LEN,
  512. *curr_p, *(curr_p + 1), *(curr_p + 2), *(curr_p + 3),
  513. *(curr_p + 4), *(curr_p + 5), *(curr_p + 6), *(curr_p + 7),
  514. *(curr_p + 8), *(curr_p + 9), *(curr_p + 10), *(curr_p + 11),
  515. *(curr_p + 12), *(curr_p + 13), *(curr_p + 14), *(curr_p + 15),
  516. *(curr_p + 16), *(curr_p + 17), *(curr_p + 18), *(curr_p + 19)
  517. );
  518. curr_p += DUMP_LEN/4;
  519. }
  520. /* Tail section */
  521. if (tail_num > 0) {
  522. curr_ch_p = (unsigned char *)curr_p;
  523. for (j = 0; j < tail_num; j++) {
  524. buf[j] = *curr_ch_p;
  525. curr_ch_p++;
  526. }
  527. for (; j < DUMP_LEN; j++)
  528. buf[j] = 0;
  529. curr_p = (unsigned int *)buf;
  530. CCCI_INF_MSG(md_id, BM, "%03X: %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X\n",
  531. i * DUMP_LEN,
  532. *curr_p, *(curr_p + 1), *(curr_p + 2), *(curr_p + 3),
  533. *(curr_p + 4), *(curr_p + 5), *(curr_p + 6), *(curr_p + 7),
  534. *(curr_p + 8), *(curr_p + 9), *(curr_p + 10), *(curr_p + 11),
  535. *(curr_p + 12), *(curr_p + 13), *(curr_p + 14), *(curr_p + 15),
  536. *(curr_p + 16), *(curr_p + 17), *(curr_p + 18), *(curr_p + 19)
  537. );
  538. }
  539. }
  540. EXPORT_SYMBOL(ccci_cmpt_mem_dump);
  541. void ccci_dump_req(struct ccci_request *req)
  542. {
  543. ccci_mem_dump(-1, req->skb->data, req->skb->len > 32 ? 32 : req->skb->len);
  544. }
  545. EXPORT_SYMBOL(ccci_dump_req);
  546. int ccci_subsys_bm_init(void)
  547. {
  548. /* init ccci_request */
  549. ccci_req_queue_init(&req_pool);
  550. CCCI_INF_MSG(-1, BM, "MTU=%d/%d, pool size %d/%d/%d/%d\n", CCCI_MTU, CCCI_NET_MTU,
  551. SKB_POOL_SIZE_4K, SKB_POOL_SIZE_1_5K, SKB_POOL_SIZE_16, req_pool.max_len);
  552. /* init skb pool */
  553. ccci_skb_queue_init(&skb_pool_4K, SKB_4K, SKB_POOL_SIZE_4K, 1);
  554. ccci_skb_queue_init(&skb_pool_1_5K, SKB_1_5K, SKB_POOL_SIZE_1_5K, 1);
  555. ccci_skb_queue_init(&skb_pool_16, SKB_16, SKB_POOL_SIZE_16, 1);
  556. /* init pool reload work */
  557. pool_reload_work_queue = alloc_workqueue("pool_reload_work", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
  558. INIT_WORK(&skb_pool_4K.reload_work, __4K_reload_work);
  559. INIT_WORK(&skb_pool_1_5K.reload_work, __1_5K_reload_work);
  560. INIT_WORK(&skb_pool_16.reload_work, __16_reload_work);
  561. #ifdef CCCI_BM_TRACE
  562. init_timer(&ccci_bm_stat_timer);
  563. ccci_bm_stat_timer.function = ccci_bm_stat_timer_func;
  564. mod_timer(&ccci_bm_stat_timer, jiffies + 10 * HZ);
  565. #endif
  566. return 0;
  567. }