modem_cldma.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. #ifndef __MODEM_CD_H__
  2. #define __MODEM_CD_H__
  3. #include <linux/wakelock.h>
  4. #include <linux/dmapool.h>
  5. #include <linux/timer.h>
  6. #include <linux/hrtimer.h>
  7. #include <linux/skbuff.h>
  8. #include <mt-plat/mt_ccci_common.h>
  9. #include "ccci_config.h"
  10. #include "ccci_bm.h"
  11. /*
  12. * hardcode, max queue number should be synced with port array in port_cfg.c and macros in ccci_core.h
  13. * following number should sync with MAX_TXQ/RXQ_NUM in ccci_core.h and bitmask in modem_cldma.c
  14. */
  15. #define CLDMA_TXQ_NUM 8
  16. #define CLDMA_RXQ_NUM 8
  17. #define NET_TXQ_NUM 3
  18. #define NET_RXQ_NUM 3
  19. #define NORMAL_TXQ_NUM 6
  20. #define NORMAL_RXQ_NUM 6
  21. #define MAX_BD_NUM (MAX_SKB_FRAGS + 1)
  22. #define TRAFFIC_MONITOR_INTERVAL 10 /* seconds */
  23. #define SKB_RX_QUEUE_MAX_LEN 200000
  24. /*
  25. * CLDMA feature options:
  26. * CHECKSUM_SIZE: 0 to disable checksum function, non-zero for number of checksum bytes
  27. * CLDMA_NO_TX_IRQ: mask all TX interrupts, collect TX_DONE skb when get Rx interrupt or Tx busy.
  28. * ENABLE_CLDMA_TIMER: use a timer to detect TX packet sent or not. not usable if TX interrupts are masked.
  29. * CLDMA_NET_TX_BD: use BD to support scatter/gather IO for net device
  30. */
  31. #define CHECKSUM_SIZE 0 /* 12 */
  32. /* #define CLDMA_NO_TX_IRQ */
  33. #ifndef CLDMA_NO_TX_IRQ
  34. /* #define ENABLE_CLDMA_TIMER */
  35. #endif
  36. #define CLDMA_NET_TX_BD
  37. struct cldma_request {
  38. void *gpd; /* virtual address for CPU */
  39. dma_addr_t gpd_addr; /* physical address for DMA */
  40. struct sk_buff *skb;
  41. dma_addr_t data_buffer_ptr_saved;
  42. struct list_head entry;
  43. struct list_head bd;
  44. /* inherit from struct ccci_request */
  45. DATA_POLICY policy;
  46. unsigned char ioc_override; /* bit7: override or not; bit0: IOC setting */
  47. };
  48. typedef enum {
  49. RING_GPD = 0,
  50. RING_GPD_BD = 1,
  51. RING_SPD = 2,
  52. } CLDMA_RING_TYPE;
  53. struct md_cd_queue;
  54. /*
  55. * In a ideal design, all read/write pointers should be member of cldma_ring, and they will complete
  56. * a ring buffer object with buffer itself and Tx/Rx funcitions. but this will change too much of the original
  57. * code and we have to drop it. so here the cldma_ring is quite light and most of ring buffer opertions are
  58. * still in queue struct.
  59. */
  60. struct cldma_ring {
  61. struct list_head gpd_ring; /* ring of struct cldma_request */
  62. int length; /* number of struct cldma_request */
  63. int pkt_size; /* size of each packet in ring */
  64. CLDMA_RING_TYPE type;
  65. int (*handle_tx_request)(struct md_cd_queue *queue, struct cldma_request *req,
  66. struct sk_buff *skb, DATA_POLICY policy, unsigned int ioc_override);
  67. int (*handle_rx_done)(struct md_cd_queue *queue, int budget, int blocking, int *result, int *rxbytes);
  68. int (*handle_tx_done)(struct md_cd_queue *queue, int budget, int blocking, int *result);
  69. int (*handle_rx_refill)(struct md_cd_queue *queue);
  70. };
  71. static inline struct cldma_request *cldma_ring_step_forward(struct cldma_ring *ring, struct cldma_request *req)
  72. {
  73. struct cldma_request *next_req;
  74. if (req->entry.next == &ring->gpd_ring)
  75. next_req = list_first_entry(&ring->gpd_ring, struct cldma_request, entry);
  76. else
  77. next_req = list_entry(req->entry.next, struct cldma_request, entry);
  78. return next_req;
  79. }
  80. struct md_cd_queue {
  81. unsigned char index;
  82. struct ccci_modem *modem;
  83. struct ccci_port *napi_port;
  84. /*
  85. * what we have here is not a typical ring buffer, as we have three players:
  86. * for Tx: sender thread -> CLDMA -> tx_done thread
  87. * -sender thread: set HWO bit, req->skb and gpd->buffer, when req->skb==NULL
  88. * -tx_done thread: free skb only when req->skb!=NULL && HWO==0
  89. * -CLDMA: send skb only when gpd->buffer!=NULL && HWO==1
  90. * for Rx: refill thread -> CLDMA -> rx_done thread
  91. * -refill thread: set HWO bit, req->skb and gpd->buffer, when req->skb==NULL
  92. * -rx_done thread: free skb only when req->skb!=NULL && HWO==0
  93. * -CLDMA: send skb only when gpd->buffer!=NULL && HWO==1
  94. *
  95. * for Tx, although only sender thread is "writer"--who sets HWO bit, both tx_done thread and CLDMA
  96. * only read this bit. BUT, other than HWO bit, sender thread also shares req->skb with tx_done thread,
  97. * and gpd->buffer with CLDMA. so it must set HWO bit after set gpd->buffer and before set req->skb.
  98. *
  99. * for Rx, only refill thread is "writer"--who sets HWO bit, both rx_done thread and CLDMA only read this
  100. * bit. other than HWO bit, refill thread also shares req->skb with rx_done thread and gpd->buffer with
  101. * CLDMA. it also needs set HWO bit after set gpd->buffer and before set req->skb.
  102. *
  103. * so in a ideal world, we use HWO bit as an barrier, and this let us be able to avoid using lock.
  104. * although, there are multiple sender threads on top of each Tx queue, they must be separated.
  105. * therefore, we still have Tx lock.
  106. *
  107. * BUT, check this sequence: sender or refiller has set HWO=1, but doesn't set req->skb yet. CLDMA finishes
  108. * this GPD and Tx_DONE or Rx_DONE will see HWO==0 but req->skb==NULL. so this skb will not be handled.
  109. * therefore, as a conclusion, use lock!!!
  110. *
  111. * be aware, fot Tx this lock also protects TX_IRQ, TX_FULL, budget, sequence number usage.
  112. */
  113. struct cldma_ring *tr_ring;
  114. struct cldma_request *tr_done;
  115. int budget; /* same as ring buffer size by default */
  116. struct cldma_request *rx_refill; /* only for Rx */
  117. struct cldma_request *tx_xmit; /* only for Tx */
  118. wait_queue_head_t req_wq; /* only for Tx */
  119. spinlock_t ring_lock;
  120. struct ccci_skb_queue skb_list; /* only for network Rx */
  121. struct workqueue_struct *worker;
  122. struct work_struct cldma_rx_work;
  123. struct delayed_work cldma_tx_work;
  124. struct workqueue_struct *refill_worker; /* only for Rx */
  125. struct work_struct cldma_refill_work; /* only for Rx */
  126. wait_queue_head_t rx_wq;
  127. struct task_struct *rx_thread;
  128. #ifdef ENABLE_CLDMA_TIMER
  129. struct timer_list timeout_timer;
  130. unsigned long long timeout_start;
  131. unsigned long long timeout_end;
  132. #endif
  133. u16 debug_id;
  134. DIRECTION dir;
  135. unsigned int busy_count;
  136. };
  137. #define QUEUE_LEN(a) (sizeof(a)/sizeof(struct md_cd_queue))
  138. struct md_cd_ctrl {
  139. struct ccci_modem *modem;
  140. struct md_cd_queue txq[CLDMA_TXQ_NUM];
  141. struct md_cd_queue rxq[CLDMA_RXQ_NUM];
  142. unsigned short txq_active;
  143. unsigned short rxq_active;
  144. #ifdef NO_START_ON_SUSPEND_RESUME
  145. unsigned short txq_started;
  146. #endif
  147. struct mutex ccif_wdt_mutex;
  148. atomic_t reset_on_going;
  149. atomic_t wdt_enabled;
  150. atomic_t ccif_irq_enabled;
  151. char trm_wakelock_name[32];
  152. struct wake_lock trm_wake_lock;
  153. char peer_wakelock_name[32];
  154. struct wake_lock peer_wake_lock;
  155. struct work_struct ccif_work;
  156. struct timer_list bus_timeout_timer;
  157. spinlock_t cldma_timeout_lock; /* this lock is using to protect CLDMA, not only for timeout checking */
  158. struct work_struct cldma_irq_work;
  159. struct workqueue_struct *cldma_irq_worker;
  160. int channel_id; /* CCIF channel */
  161. struct work_struct wdt_work;
  162. #if TRAFFIC_MONITOR_INTERVAL
  163. unsigned tx_traffic_monitor[CLDMA_TXQ_NUM];
  164. unsigned rx_traffic_monitor[CLDMA_RXQ_NUM];
  165. unsigned tx_pre_traffic_monitor[CLDMA_TXQ_NUM];
  166. unsigned long long tx_done_last_start_time[CLDMA_TXQ_NUM];
  167. unsigned int tx_done_last_count[CLDMA_TXQ_NUM];
  168. struct timer_list traffic_monitor;
  169. unsigned long traffic_stamp;
  170. #endif
  171. struct dma_pool *gpd_dmapool; /* here we assume T/R GPD/BD/SPD have the same size */
  172. struct cldma_ring net_tx_ring[NET_TXQ_NUM];
  173. struct cldma_ring net_rx_ring[NET_RXQ_NUM];
  174. struct cldma_ring normal_tx_ring[NORMAL_TXQ_NUM];
  175. struct cldma_ring normal_rx_ring[NORMAL_RXQ_NUM];
  176. void __iomem *cldma_ap_ao_base;
  177. void __iomem *cldma_md_ao_base;
  178. void __iomem *cldma_ap_pdn_base;
  179. void __iomem *cldma_md_pdn_base;
  180. void __iomem *md_rgu_base;
  181. void __iomem *l1_rgu_base;
  182. void __iomem *md_boot_slave_Vector;
  183. void __iomem *md_boot_slave_Key;
  184. void __iomem *md_boot_slave_En;
  185. void __iomem *md_global_con0;
  186. void __iomem *ap_ccif_base;
  187. void __iomem *md_ccif_base;
  188. #ifdef MD_PEER_WAKEUP
  189. void __iomem *md_peer_wakeup;
  190. #endif
  191. void __iomem *md_bus_status;
  192. void __iomem *md_pc_monitor;
  193. void __iomem *md_topsm_status;
  194. void __iomem *md_ost_status;
  195. void __iomem *md_pll;
  196. /*struct md_pll_reg md_pll_base; struct moved to platform part*/
  197. struct md_pll_reg *md_pll_base;
  198. struct tasklet_struct ccif_irq_task;
  199. unsigned int cldma_irq_id;
  200. unsigned int ap_ccif_irq_id;
  201. unsigned int md_wdt_irq_id;
  202. unsigned int ap2md_bus_timeout_irq_id;
  203. unsigned long cldma_irq_flags;
  204. unsigned long ap_ccif_irq_flags;
  205. unsigned long md_wdt_irq_flags;
  206. unsigned long ap2md_bus_timeout_irq_flags;
  207. struct md_hw_info *hw_info;
  208. };
  209. struct cldma_tgpd {
  210. u8 gpd_flags;
  211. u8 gpd_checksum;
  212. u16 debug_id;
  213. u32 next_gpd_ptr;
  214. u32 data_buff_bd_ptr;
  215. u16 data_buff_len;
  216. u8 desc_ext_len;
  217. u8 non_used; /* debug:1 for Tx in; 2 for Tx done */
  218. } __packed;
  219. struct cldma_rgpd {
  220. u8 gpd_flags;
  221. u8 gpd_checksum;
  222. u16 data_allow_len;
  223. u32 next_gpd_ptr;
  224. u32 data_buff_bd_ptr;
  225. u16 data_buff_len;
  226. u16 debug_id;
  227. } __packed;
  228. struct cldma_tbd {
  229. u8 bd_flags;
  230. u8 bd_checksum;
  231. u16 reserved;
  232. u32 next_bd_ptr;
  233. u32 data_buff_ptr;
  234. u16 data_buff_len;
  235. u8 desc_ext_len;
  236. u8 non_used;
  237. } __packed;
  238. struct cldma_rbd {
  239. u8 bd_flags;
  240. u8 bd_checksum;
  241. u16 data_allow_len;
  242. u32 next_bd_ptr;
  243. u32 data_buff_ptr;
  244. u16 data_buff_len;
  245. u16 reserved;
  246. } __packed;
  247. struct cldma_rspd {
  248. u8 spd_flags;
  249. u8 spd_checksum;
  250. u16 data_allow_len;
  251. u32 next_spd_ptr;
  252. u32 data_buff_ptr;
  253. u16 data_buff_len;
  254. u8 reserve_len;
  255. u8 spd_flags2;
  256. } __packed;
  257. typedef enum {
  258. UNDER_BUDGET,
  259. REACH_BUDGET,
  260. PORT_REFUSE,
  261. NO_SKB,
  262. NO_REQ
  263. } RX_COLLECT_RESULT;
  264. enum {
  265. CCCI_TRACE_TX_IRQ = 0,
  266. CCCI_TRACE_RX_IRQ = 1,
  267. };
  268. static inline void md_cd_queue_struct_init(struct md_cd_queue *queue, struct ccci_modem *md,
  269. DIRECTION dir, unsigned char index)
  270. {
  271. queue->dir = dir;
  272. queue->index = index;
  273. queue->modem = md;
  274. queue->napi_port = NULL;
  275. queue->tr_ring = NULL;
  276. queue->tr_done = NULL;
  277. queue->tx_xmit = NULL;
  278. init_waitqueue_head(&queue->req_wq);
  279. spin_lock_init(&queue->ring_lock);
  280. queue->debug_id = 0;
  281. queue->busy_count = 0;
  282. }
  283. #ifndef CONFIG_MTK_ECCCI_C2K
  284. #ifdef CONFIG_MTK_SVLTE_SUPPORT
  285. extern void c2k_reset_modem(void);
  286. #endif
  287. #endif
  288. extern void mt_irq_dump_status(int irq);
  289. extern unsigned int ccci_get_md_debug_mode(struct ccci_modem *md);
  290. extern u32 mt_irq_get_pending(unsigned int irq);
  291. /* used for throttling feature - start */
  292. extern unsigned long ccci_modem_boot_count[];
  293. /* used for throttling feature - end */
  294. #define GF_PORT_LIST_MAX 128
  295. extern int gf_port_list_reg[GF_PORT_LIST_MAX];
  296. extern int gf_port_list_unreg[GF_PORT_LIST_MAX];
  297. extern int ccci_ipc_set_garbage_filter(struct ccci_modem *md, int reg);
  298. #ifdef TEST_MESSAGE_FOR_BRINGUP
  299. extern int ccci_sysmsg_echo_test(int, int);
  300. extern int ccci_sysmsg_echo_test_l1core(int, int);
  301. #endif
  302. #endif /* __MODEM_CD_H__ */