ccmni.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. /*****************************************************************************
  2. *
  3. * Filename:
  4. * ---------
  5. * ccmni.c
  6. *
  7. * Project:
  8. * --------
  9. *
  10. *
  11. * Description:
  12. * ------------
  13. * Cross Chip Modem Network Interface
  14. *
  15. * Author:
  16. * -------
  17. * Anny.Hu(mtk80401)
  18. *
  19. ****************************************************************************/
  20. #include <linux/netdevice.h>
  21. #include <linux/etherdevice.h>
  22. #include <linux/ip.h>
  23. #include <linux/tcp.h>
  24. #include <linux/ipv6.h>
  25. #include <net/ipv6.h>
  26. #include <net/sch_generic.h>
  27. #include <linux/skbuff.h>
  28. #include <linux/module.h>
  29. #include <linux/timer.h>
  30. #include <linux/version.h>
  31. #include <linux/sockios.h>
  32. #include <linux/device.h>
  33. #include <linux/debugfs.h>
  34. #include "ccmni.h"
  35. ccmni_ctl_block_t *ccmni_ctl_blk[MAX_MD_NUM];
  36. unsigned int ccmni_debug_level = 0;
  37. unsigned long long net_rx_delay[4];
  38. /********************internal function*********************/
  39. static int get_ccmni_idx_from_ch(int md_id, int ch)
  40. {
  41. ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id];
  42. int dir = ch & (1<<16);
  43. unsigned int i, ch_num;
  44. ch_num = ch & 0xFFFF;
  45. /*TX ACK queue*/
  46. if (dir && (ch_num >= CCCI_CCMNI1_DL_ACK) && (ch_num <= CCCI_CCMNI3_DL_ACK))
  47. return (int)(ch_num - CCCI_CCMNI1_DL_ACK);
  48. for (i = 0; i < ctlb->ccci_ops->ccmni_num; i++) {
  49. if (ctlb->ccmni_inst[i]) {
  50. if ((ctlb->ccmni_inst[i]->ch.rx == ch_num) || (ctlb->ccmni_inst[i]->ch.tx == ch_num))
  51. return i;
  52. } else {
  53. CCMNI_ERR_MSG(md_id, "invalid ccmni instance(ccmni%d): ch=0x%x\n", i, ch);
  54. }
  55. }
  56. CCMNI_ERR_MSG(md_id, "invalid ccmni rx channel(0x%x)\n", ch);
  57. return -1;
  58. }
  59. static void ccmni_make_etherframe(void *_eth_hdr, unsigned char *mac_addr, unsigned int packet_type)
  60. {
  61. struct ethhdr *eth_hdr = _eth_hdr;
  62. memcpy(eth_hdr->h_dest, mac_addr, sizeof(eth_hdr->h_dest));
  63. memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_source));
  64. if (packet_type == 0x60)
  65. eth_hdr->h_proto = cpu_to_be16(ETH_P_IPV6);
  66. else
  67. eth_hdr->h_proto = cpu_to_be16(ETH_P_IP);
  68. }
  69. static inline int is_ack_skb(int md_id, struct sk_buff *skb)
  70. {
  71. u32 packet_type;
  72. struct tcphdr *tcph;
  73. int ret = 0;
  74. packet_type = skb->data[0] & 0xF0;
  75. if (packet_type == IPV6_VERSION) {
  76. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  77. u32 total_len = sizeof(struct ipv6hdr) + ntohs(iph->payload_len);
  78. if (total_len <= 128 - sizeof(struct ccci_header)) {
  79. u8 nexthdr = iph->nexthdr;
  80. __be16 frag_off;
  81. u32 l4_off = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
  82. tcph = (struct tcphdr *)(skb->data + l4_off);
  83. if (nexthdr == IPPROTO_TCP && !tcph->syn && !tcph->fin &&
  84. !tcph->rst && ((total_len - l4_off) == (tcph->doff << 2)))
  85. ret = 1;
  86. if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_ACK_SKB)) {
  87. CCMNI_INF_MSG(md_id,
  88. "[SKB] ack=%d: proto=%d syn=%d fin=%d rst=%d ack=%d tot_len=%d l4_off=%d doff=%d\n",
  89. ret, nexthdr, tcph->syn, tcph->fin, tcph->rst,
  90. tcph->ack, total_len, l4_off, tcph->doff);
  91. }
  92. } else {
  93. if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_ACK_SKB))
  94. CCMNI_INF_MSG(md_id, "[SKB] ack=%d: tot_len=%d\n", ret, total_len);
  95. }
  96. } else if (packet_type == IPV4_VERSION) {
  97. struct iphdr *iph = (struct iphdr *)skb->data;
  98. if (ntohs(iph->tot_len) <= 128 - sizeof(struct ccci_header)) {
  99. tcph = (struct tcphdr *)(skb->data + (iph->ihl << 2));
  100. if (iph->protocol == IPPROTO_TCP && !tcph->syn && !tcph->fin &&
  101. !tcph->rst && (ntohs(iph->tot_len) == (iph->ihl << 2) + (tcph->doff << 2)))
  102. ret = 1;
  103. if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_ACK_SKB)) {
  104. CCMNI_INF_MSG(md_id,
  105. "[SKB] ack=%d: proto=%d syn=%d fin=%d rst=%d ack=%d tot_len=%d ihl=%d doff=%d\n",
  106. ret, iph->protocol, tcph->syn, tcph->fin, tcph->rst,
  107. tcph->ack, ntohs(iph->tot_len), iph->ihl, tcph->doff);
  108. }
  109. } else {
  110. if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_ACK_SKB))
  111. CCMNI_INF_MSG(md_id, "[SKB] ack=%d: tot_len=%d\n", ret, ntohs(iph->tot_len));
  112. }
  113. }
  114. return ret;
  115. }
  116. /********************internal debug function*********************/
  117. #if 1
  118. #if 0
  119. static void ccmni_dbg_skb_addr(int md_id, bool tx, struct sk_buff *skb, int idx)
  120. {
  121. CCMNI_INF_MSG(md_id, "[SKB][%s] idx=%d addr=%p len=%d data_len=%d, L2_addr=%p L3_addr=%p L4_addr=%p\n",
  122. tx?"TX":"RX", idx,
  123. (void *)skb->data, skb->len, skb->data_len, (void *)skb_mac_header(skb),
  124. (void *)skb_network_header(skb), (void *)skb_transport_header(skb));
  125. }
  126. #endif
  127. static void ccmni_dbg_eth_header(int md_id, bool tx, struct ethhdr *ethh)
  128. {
  129. if (ethh != NULL) {
  130. CCMNI_INF_MSG(md_id,
  131. "[SKB][%s] ethhdr: proto=0x%04x dest_mac=%02x:%02x:%02x:%02x:%02x:%02x src_mac=%02x:%02x:%02x:%02x:%02x:%02x\n",
  132. tx?"TX":"RX", ethh->h_proto, ethh->h_dest[0], ethh->h_dest[1], ethh->h_dest[2],
  133. ethh->h_dest[3], ethh->h_dest[4], ethh->h_dest[5], ethh->h_source[0], ethh->h_source[1],
  134. ethh->h_source[2], ethh->h_source[3], ethh->h_source[4], ethh->h_source[5]);
  135. }
  136. }
  137. static void ccmni_dbg_ip_header(int md_id, bool tx, struct iphdr *iph)
  138. {
  139. if (iph != NULL) {
  140. CCMNI_INF_MSG(md_id,
  141. "[SKB][%s] iphdr: ihl=0x%02x ver=0x%02x tos=0x%02x tot_len=0x%04x id=0x%04x frag_off=0x%04x ttl=0x%02x proto=0x%02x check=0x%04x saddr=0x%08x daddr=0x%08x\n",
  142. tx?"TX":"RX", iph->ihl, iph->version, iph->tos, iph->tot_len, iph->id,
  143. iph->frag_off, iph->ttl, iph->protocol, iph->check, iph->saddr, iph->daddr);
  144. }
  145. }
  146. static void ccmni_dbg_tcp_header(int md_id, bool tx, struct tcphdr *tcph)
  147. {
  148. if (tcph != NULL) {
  149. CCMNI_INF_MSG(md_id,
  150. "[SKB][%s] tcp_hdr: src=0x%04x dest=0x%04x seq=0x%08x ack_seq=0x%08x urg=%d ack=%d psh=%d rst=%d syn=%d fin=%d\n",
  151. tx?"TX":"RX", ntohl(tcph->source), ntohl(tcph->dest), tcph->seq, tcph->ack_seq,
  152. tcph->urg, tcph->ack, tcph->psh, tcph->rst, tcph->syn, tcph->fin);
  153. }
  154. }
  155. static void ccmni_dbg_skb_header(int md_id, bool tx, struct sk_buff *skb)
  156. {
  157. struct ethhdr *ethh = NULL;
  158. struct iphdr *iph = NULL;
  159. struct ipv6hdr *ipv6h = NULL;
  160. struct tcphdr *tcph = NULL;
  161. u8 nexthdr;
  162. __be16 frag_off;
  163. u32 l4_off;
  164. if (!tx) {
  165. ethh = (struct ethhdr *)(skb->data-ETH_HLEN);
  166. ccmni_dbg_eth_header(md_id, tx, ethh);
  167. }
  168. if (skb->protocol == htons(ETH_P_IP)) {
  169. iph = (struct iphdr *)skb->data;
  170. ccmni_dbg_ip_header(md_id, tx, iph);
  171. if (iph->protocol == IPPROTO_TCP) {
  172. tcph = (struct tcphdr *)(skb->data + (iph->ihl << 2));
  173. ccmni_dbg_tcp_header(md_id, tx, tcph);
  174. }
  175. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  176. ipv6h = (struct ipv6hdr *)skb->data;
  177. nexthdr = ipv6h->nexthdr;
  178. if (nexthdr == IPPROTO_TCP) {
  179. l4_off = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
  180. tcph = (struct tcphdr *)(skb->data + l4_off);
  181. ccmni_dbg_tcp_header(md_id, tx, tcph);
  182. }
  183. }
  184. }
  185. #endif
  186. /* ccmni debug sys file create */
  187. int ccmni_debug_file_init(int md_id)
  188. {
  189. int result = -1;
  190. char fname[16];
  191. struct dentry *dentry1, *dentry2, *dentry3;
  192. CCMNI_INF_MSG(md_id, "ccmni_debug_file_init\n");
  193. dentry1 = debugfs_create_dir("ccmni", NULL);
  194. if (!dentry1) {
  195. CCMNI_ERR_MSG(md_id, "create /proc/ccmni fail\n");
  196. return -ENOENT;
  197. }
  198. snprintf(fname, 16, "md%d", (md_id+1));
  199. dentry2 = debugfs_create_dir(fname, dentry1);
  200. if (!dentry2) {
  201. CCMNI_ERR_MSG(md_id, "create /proc/ccmni/md%d fail\n", (md_id+1));
  202. return -ENOENT;
  203. }
  204. dentry3 = debugfs_create_u32("debug_level", 0600, dentry2, &ccmni_debug_level);
  205. result = PTR_ERR(dentry3);
  206. if (IS_ERR(dentry3) && result != -ENODEV) {
  207. CCMNI_ERR_MSG(md_id, "create /proc/ccmni/md%d/debug_level fail: %d\n", md_id, result);
  208. return -ENOENT;
  209. }
  210. return 0;
  211. }
  212. /********************netdev register function********************/
  213. static u16 ccmni_select_queue(struct net_device *dev, struct sk_buff *skb,
  214. void *accel_priv, select_queue_fallback_t fallback)
  215. {
  216. ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev);
  217. if (ccmni->ch.rx == CCCI_CCMNI1_RX || ccmni->ch.rx == CCCI_CCMNI2_RX) {
  218. if (is_ack_skb(ccmni->md_id, skb))
  219. return CCMNI_TXQ_FAST;
  220. else
  221. return CCMNI_TXQ_NORMAL;
  222. } else
  223. return CCMNI_TXQ_NORMAL;
  224. }
  225. static int ccmni_open(struct net_device *dev)
  226. {
  227. ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev);
  228. ccmni_ctl_block_t *ccmni_ctl = ccmni_ctl_blk[ccmni->md_id];
  229. ccmni_instance_t *ccmni_tmp = NULL;
  230. if (unlikely(ccmni_ctl == NULL)) {
  231. CCMNI_ERR_MSG(ccmni->md_id, "%s_Open: MD%d ctlb is NULL\n", dev->name, ccmni->md_id);
  232. return -1;
  233. }
  234. if (ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ)
  235. netif_tx_start_all_queues(dev);
  236. else
  237. netif_start_queue(dev);
  238. if (unlikely(ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_NAPI)) {
  239. napi_enable(&ccmni->napi);
  240. napi_schedule(&ccmni->napi);
  241. }
  242. atomic_inc(&ccmni->usage);
  243. ccmni_tmp = ccmni_ctl->ccmni_inst[ccmni->index];
  244. if (ccmni != ccmni_tmp)
  245. atomic_inc(&ccmni_tmp->usage);
  246. CCMNI_INF_MSG(ccmni->md_id, "%s_Open: cnt=(%d,%d), md_ab=0x%X\n",
  247. dev->name, atomic_read(&ccmni->usage),
  248. atomic_read(&ccmni_tmp->usage), ccmni_ctl->ccci_ops->md_ability);
  249. return 0;
  250. }
  251. static int ccmni_close(struct net_device *dev)
  252. {
  253. ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev);
  254. ccmni_ctl_block_t *ccmni_ctl = ccmni_ctl_blk[ccmni->md_id];
  255. ccmni_instance_t *ccmni_tmp = NULL;
  256. if (unlikely(ccmni_ctl == NULL)) {
  257. CCMNI_ERR_MSG(ccmni->md_id, "%s_Close: MD%d ctlb is NULL\n", dev->name, ccmni->md_id);
  258. return -1;
  259. }
  260. atomic_dec(&ccmni->usage);
  261. ccmni_tmp = ccmni_ctl->ccmni_inst[ccmni->index];
  262. if (ccmni != ccmni_tmp)
  263. atomic_dec(&ccmni_tmp->usage);
  264. if (ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ)
  265. netif_tx_disable(dev);
  266. else
  267. netif_stop_queue(dev);
  268. if (unlikely(ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_NAPI))
  269. napi_disable(&ccmni->napi);
  270. CCMNI_INF_MSG(ccmni->md_id, "%s_Close: cnt=(%d, %d)\n",
  271. dev->name, atomic_read(&ccmni->usage), atomic_read(&ccmni_tmp->usage));
  272. return 0;
  273. }
  274. static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
  275. {
  276. int ret;
  277. int skb_len = skb->len;
  278. int tx_ch, ccci_tx_ch;
  279. ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev);
  280. ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[ccmni->md_id];
  281. unsigned int is_ack = 0;
  282. /* dev->mtu is changed if dev->mtu is changed by upper layer */
  283. if (unlikely(skb->len > dev->mtu)) {
  284. CCMNI_ERR_MSG(ccmni->md_id, "CCMNI%d write fail: len(0x%x)>MTU(0x%x, 0x%x)\n",
  285. ccmni->index, skb->len, CCMNI_MTU, dev->mtu);
  286. dev_kfree_skb(skb);
  287. dev->stats.tx_dropped++;
  288. return NETDEV_TX_OK;
  289. }
  290. if (unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) {
  291. CCMNI_ERR_MSG(ccmni->md_id, "CCMNI%d write fail: header room(%d) < ccci_header(%d)\n",
  292. ccmni->index, skb_headroom(skb), dev->hard_header_len);
  293. dev_kfree_skb(skb);
  294. dev->stats.tx_dropped++;
  295. return NETDEV_TX_OK;
  296. }
  297. ccci_tx_ch = tx_ch = ccmni->ch.tx;
  298. if (ctlb->ccci_ops->md_ability & MODEM_CAP_DATA_ACK_DVD) {
  299. if (ccmni->ch.rx == CCCI_CCMNI1_RX || ccmni->ch.rx == CCCI_CCMNI2_RX) {
  300. is_ack = is_ack_skb(ccmni->md_id, skb);
  301. if (is_ack)
  302. ccci_tx_ch = (ccmni->ch.tx == CCCI_CCMNI1_TX)?CCCI_CCMNI1_DL_ACK:CCCI_CCMNI2_DL_ACK;
  303. else
  304. ccci_tx_ch = ccmni->ch.tx;
  305. }
  306. }
  307. if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_TX)) {
  308. CCMNI_INF_MSG(ccmni->md_id, "[TX]CCMNI%d head_len=%d len=%d ack=%d tx_ch=%d\n",
  309. ccmni->index, skb_headroom(skb), skb->len, is_ack, ccci_tx_ch);
  310. }
  311. if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_TX_SKB))
  312. ccmni_dbg_skb_header(ccmni->md_id, true, skb);
  313. ret = ctlb->ccci_ops->send_pkt(ccmni->md_id, ccci_tx_ch, skb);
  314. if (ret == CCMNI_ERR_MD_NO_READY || ret == CCMNI_ERR_TX_INVAL) {
  315. dev_kfree_skb(skb);
  316. dev->stats.tx_dropped++;
  317. ccmni->tx_busy_cnt = 0;
  318. CCMNI_ERR_MSG(ccmni->md_id, "[TX]CCMNI%d send pkt fail: %d\n", ccmni->index, ret);
  319. return NETDEV_TX_OK;
  320. } else if (ret == CCMNI_ERR_TX_BUSY) {
  321. goto tx_busy;
  322. }
  323. dev->stats.tx_packets++;
  324. dev->stats.tx_bytes += skb_len;
  325. if (ccmni->tx_busy_cnt > 10) {
  326. CCMNI_ERR_MSG(ccmni->md_id, "[TX]CCMNI%d TX busy: tx_pkt=%ld retry %ld times done\n",
  327. ccmni->index, dev->stats.tx_packets, ccmni->tx_busy_cnt);
  328. }
  329. ccmni->tx_busy_cnt = 0;
  330. return NETDEV_TX_OK;
  331. tx_busy:
  332. if (unlikely(!(ctlb->ccci_ops->md_ability & MODEM_CAP_TXBUSY_STOP))) {
  333. if ((ccmni->tx_busy_cnt++)%100 == 0)
  334. CCMNI_ERR_MSG(ccmni->md_id, "[TX]CCMNI%d TX busy: retry_times=%ld\n",
  335. ccmni->index, ccmni->tx_busy_cnt);
  336. } else {
  337. ccmni->tx_busy_cnt++;
  338. }
  339. return NETDEV_TX_BUSY;
  340. }
  341. static int ccmni_change_mtu(struct net_device *dev, int new_mtu)
  342. {
  343. ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev);
  344. if (new_mtu > CCMNI_MTU)
  345. return -EINVAL;
  346. dev->mtu = new_mtu;
  347. CCMNI_INF_MSG(ccmni->md_id, "CCMNI%d change mtu_siz=%d\n", ccmni->index, new_mtu);
  348. return 0;
  349. }
  350. static void ccmni_tx_timeout(struct net_device *dev)
  351. {
  352. ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev);
  353. ccmni_ctl_block_t *ccmni_ctl = ccmni_ctl_blk[ccmni->md_id];
  354. CCMNI_INF_MSG(ccmni->md_id, "ccmni%d_tx_timeout: usage_cnt=%d, timeout=%ds\n",
  355. ccmni->index, atomic_read(&ccmni->usage), (ccmni->dev->watchdog_timeo/HZ));
  356. dev->stats.tx_errors++;
  357. if (atomic_read(&ccmni->usage) > 0) {
  358. if (ccmni_ctl->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ)
  359. netif_tx_wake_all_queues(dev);
  360. else
  361. netif_wake_queue(dev);
  362. }
  363. }
  364. static int ccmni_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  365. {
  366. int md_id, md_id_irat, usage_cnt;
  367. ccmni_instance_t *ccmni_irat;
  368. ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(dev);
  369. ccmni_instance_t *ccmni_tmp = NULL;
  370. ccmni_ctl_block_t *ctlb = NULL;
  371. ccmni_ctl_block_t *ctlb_irat = NULL;
  372. unsigned int timeout = 0;
  373. switch (cmd) {
  374. case SIOCSTXQSTATE:
  375. /* ifru_ivalue[3~0]:start/stop; ifru_ivalue[7~4]:reserve; */
  376. /* ifru_ivalue[15~8]:user id, bit8=rild, bit9=thermal */
  377. /* ifru_ivalue[31~16]: watchdog timeout value */
  378. ctlb = ccmni_ctl_blk[ccmni->md_id];
  379. if ((ifr->ifr_ifru.ifru_ivalue & 0xF) == 0) {
  380. if (atomic_read(&ccmni->usage) > 0) {
  381. atomic_dec(&ccmni->usage);
  382. if (ctlb->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ)
  383. netif_tx_disable(dev);
  384. else
  385. netif_stop_queue(dev);
  386. /* stop queue won't stop Tx watchdog (ndo_tx_timeout) */
  387. timeout = (ifr->ifr_ifru.ifru_ivalue & 0xFFFF0000) >> 16;
  388. if (timeout == 0)
  389. dev->watchdog_timeo = 60*HZ;
  390. else
  391. dev->watchdog_timeo = timeout*HZ;
  392. ccmni_tmp = ctlb->ccmni_inst[ccmni->index];
  393. if (ccmni_tmp != ccmni) { /* iRAT ccmni */
  394. usage_cnt = atomic_read(&ccmni->usage);
  395. atomic_set(&ccmni_tmp->usage, usage_cnt);
  396. }
  397. }
  398. } else {
  399. if (atomic_read(&ccmni->usage) <= 0) {
  400. if (netif_running(dev)) {
  401. if (ctlb->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ)
  402. netif_tx_wake_all_queues(dev);
  403. else
  404. netif_wake_queue(dev);
  405. }
  406. dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
  407. atomic_inc(&ccmni->usage);
  408. ccmni_tmp = ctlb->ccmni_inst[ccmni->index];
  409. if (ccmni_tmp != ccmni) { /* iRAT ccmni */
  410. usage_cnt = atomic_read(&ccmni->usage);
  411. atomic_set(&ccmni_tmp->usage, usage_cnt);
  412. }
  413. }
  414. }
  415. if (likely(ccmni_tmp != NULL)) {
  416. CCMNI_INF_MSG(ccmni->md_id, "SIOCSTXQSTATE: %s_state=0x%x, cnt=(%d, %d)\n",
  417. dev->name, ifr->ifr_ifru.ifru_ivalue, atomic_read(&ccmni->usage),
  418. atomic_read(&ccmni_tmp->usage));
  419. } else {
  420. CCMNI_INF_MSG(ccmni->md_id, "SIOCSTXQSTATE: %s_state=0x%x, cnt=%d\n",
  421. dev->name, ifr->ifr_ifru.ifru_ivalue, atomic_read(&ccmni->usage));
  422. }
  423. break;
  424. case SIOCCCMNICFG:
  425. md_id_irat = ifr->ifr_ifru.ifru_ivalue;
  426. md_id = ccmni->md_id;
  427. if (md_id_irat < 0 && md_id_irat >= MAX_MD_NUM) {
  428. CCMNI_ERR_MSG(md_id, "SIOCSCCMNICFG: %s invalid md_id(%d)\n",
  429. dev->name, (ifr->ifr_ifru.ifru_ivalue+1));
  430. return -EINVAL;
  431. }
  432. if (dev != ccmni->dev) {
  433. ccmni->dev = dev;
  434. atomic_set(&ccmni->usage, 0);
  435. ccmni->tx_busy_cnt = 0;
  436. CCMNI_INF_MSG(md_id, "SIOCCCMNICFG: %s iRAT on MD%d, diff dev(%s->%s)\n",
  437. dev->name, (ifr->ifr_ifru.ifru_ivalue+1), ccmni->dev->name, dev->name);
  438. break;
  439. }
  440. if (md_id_irat == ccmni->md_id) {
  441. CCMNI_INF_MSG(md_id, "SIOCCCMNICFG: %s iRAT on the same MD%d, cnt=%d\n",
  442. dev->name, (ifr->ifr_ifru.ifru_ivalue+1), atomic_read(&ccmni->usage));
  443. break;
  444. }
  445. ctlb_irat = ccmni_ctl_blk[md_id_irat];
  446. if (ccmni->index >= ctlb_irat->ccci_ops->ccmni_num) {
  447. CCMNI_ERR_MSG(md_id, "SIOCSCCMNICFG: %s iRAT fail, ccmni_idx(%d) > md%d_ccmni_num(%d)\n",
  448. dev->name, ccmni->index, md_id, ctlb_irat->ccci_ops->ccmni_num);
  449. break;
  450. }
  451. ccmni_irat = ctlb_irat->ccmni_inst[ccmni->index];
  452. usage_cnt = atomic_read(&ccmni->usage);
  453. atomic_set(&ccmni_irat->usage, usage_cnt);
  454. memcpy(netdev_priv(dev), ccmni_irat, sizeof(ccmni_instance_t));
  455. ctlb = ccmni_ctl_blk[md_id];
  456. ccmni_tmp = ctlb->ccmni_inst[ccmni->index];
  457. atomic_set(&ccmni_tmp->usage, usage_cnt);
  458. ccmni_tmp->tx_busy_cnt = ccmni->tx_busy_cnt;
  459. CCMNI_INF_MSG(md_id,
  460. "SIOCCCMNICFG: %s iRAT MD%d->MD%d, dev_cnt=%d, md_cnt=%d, md_irat_cnt=%d\n",
  461. dev->name, (md_id+1), (ifr->ifr_ifru.ifru_ivalue+1), atomic_read(&ccmni->usage),
  462. atomic_read(&ccmni_tmp->usage), atomic_read(&ccmni_irat->usage));
  463. break;
  464. default:
  465. CCMNI_ERR_MSG(ccmni->md_id, "%s: unknown ioctl cmd=%x\n", dev->name, cmd);
  466. break;
  467. }
  468. return 0;
  469. }
  470. static const struct net_device_ops ccmni_netdev_ops = {
  471. .ndo_open = ccmni_open,
  472. .ndo_stop = ccmni_close,
  473. .ndo_start_xmit = ccmni_start_xmit,
  474. .ndo_tx_timeout = ccmni_tx_timeout,
  475. .ndo_do_ioctl = ccmni_ioctl,
  476. .ndo_change_mtu = ccmni_change_mtu,
  477. .ndo_select_queue = ccmni_select_queue,
  478. };
  479. static int ccmni_napi_poll(struct napi_struct *napi , int budget)
  480. {
  481. ccmni_instance_t *ccmni = (ccmni_instance_t *)netdev_priv(napi->dev);
  482. int md_id = ccmni->md_id;
  483. ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id];
  484. del_timer(&ccmni->timer);
  485. if (ctlb->ccci_ops->napi_poll)
  486. return ctlb->ccci_ops->napi_poll(md_id, ccmni->ch.rx, napi, budget);
  487. else
  488. return 0;
  489. }
  490. static void ccmni_napi_poll_timeout(unsigned long data)
  491. {
  492. ccmni_instance_t *ccmni = (ccmni_instance_t *)data;
  493. CCMNI_ERR_MSG(ccmni->md_id, "CCMNI%d lost NAPI polling\n", ccmni->index);
  494. }
  495. /********************ccmni driver register ccci function********************/
  496. static inline int ccmni_inst_init(int md_id, ccmni_instance_t *ccmni, struct net_device *dev)
  497. {
  498. ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id];
  499. struct ccmni_ch channel;
  500. int ret = 0;
  501. ret = ctlb->ccci_ops->get_ccmni_ch(md_id, ccmni->index, &channel);
  502. if (ret) {
  503. CCMNI_ERR_MSG(md_id, "get ccmni%d channel fail\n", ccmni->index);
  504. return ret;
  505. }
  506. ccmni->dev = dev;
  507. ccmni->ctlb = ctlb;
  508. ccmni->md_id = md_id;
  509. /* ccmni tx/rx channel setting */
  510. ccmni->ch.rx = channel.rx;
  511. ccmni->ch.rx_ack = channel.rx_ack;
  512. ccmni->ch.tx = channel.tx;
  513. ccmni->ch.tx_ack = channel.tx_ack;
  514. /* register napi device */
  515. if (dev && (ctlb->ccci_ops->md_ability & MODEM_CAP_NAPI)) {
  516. init_timer(&ccmni->timer);
  517. ccmni->timer.function = ccmni_napi_poll_timeout;
  518. ccmni->timer.data = (unsigned long)ccmni;
  519. netif_napi_add(dev, &ccmni->napi, ccmni_napi_poll, ctlb->ccci_ops->napi_poll_weigh);
  520. }
  521. atomic_set(&ccmni->usage, 0);
  522. spin_lock_init(&ccmni->spinlock);
  523. return ret;
  524. }
  525. static int ccmni_init(int md_id, ccmni_ccci_ops_t *ccci_info)
  526. {
  527. int i = 0, j = 0, ret = 0;
  528. ccmni_ctl_block_t *ctlb = NULL;
  529. ccmni_ctl_block_t *ctlb_irat_src = NULL;
  530. ccmni_instance_t *ccmni = NULL;
  531. ccmni_instance_t *ccmni_irat_src = NULL;
  532. struct net_device *dev = NULL;
  533. if (unlikely(ccci_info->md_ability & MODEM_CAP_CCMNI_DISABLE)) {
  534. CCMNI_ERR_MSG(md_id, "no need init ccmni: md_ability=0x%08X\n", ccci_info->md_ability);
  535. return 0;
  536. }
  537. ctlb = kzalloc(sizeof(ccmni_ctl_block_t), GFP_KERNEL);
  538. if (unlikely(ctlb == NULL)) {
  539. CCMNI_ERR_MSG(md_id, "alloc ccmni ctl struct fail\n");
  540. return -ENOMEM;
  541. }
  542. ctlb->ccci_ops = kzalloc(sizeof(ccmni_ccci_ops_t), GFP_KERNEL);
  543. if (unlikely(ctlb->ccci_ops == NULL)) {
  544. CCMNI_ERR_MSG(md_id, "alloc ccmni_ccci_ops struct fail\n");
  545. ret = -ENOMEM;
  546. goto alloc_mem_fail;
  547. }
  548. ccmni_ctl_blk[md_id] = ctlb;
  549. memcpy(ctlb->ccci_ops, ccci_info, sizeof(ccmni_ccci_ops_t));
  550. CCMNI_INF_MSG(md_id,
  551. "ccmni_init: ccmni_num=%d, md_ability=0x%08x, irat_en=%08x, irat_md_id=%d, send_pkt=%p, get_ccmni_ch=%p, name=%s\n",
  552. ctlb->ccci_ops->ccmni_num, ctlb->ccci_ops->md_ability,
  553. (ctlb->ccci_ops->md_ability & MODEM_CAP_CCMNI_IRAT),
  554. ctlb->ccci_ops->irat_md_id, ctlb->ccci_ops->send_pkt,
  555. ctlb->ccci_ops->get_ccmni_ch, ctlb->ccci_ops->name);
  556. ccmni_debug_file_init(md_id);
  557. if (((ctlb->ccci_ops->md_ability & MODEM_CAP_CCMNI_IRAT) == 0) ||
  558. ((ctlb->ccci_ops->md_ability & MODEM_CAP_WORLD_PHONE) != 0)) {
  559. for (i = 0; i < ctlb->ccci_ops->ccmni_num; i++) {
  560. /* allocate netdev */
  561. if (ctlb->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ)
  562. /* alloc multiple tx queue, 2 txq and 1 rxq */
  563. dev = alloc_etherdev_mqs(sizeof(ccmni_instance_t), 2, 1);
  564. else
  565. dev = alloc_etherdev(sizeof(ccmni_instance_t));
  566. if (unlikely(dev == NULL)) {
  567. CCMNI_ERR_MSG(md_id, "alloc netdev fail\n");
  568. ret = -ENOMEM;
  569. goto alloc_netdev_fail;
  570. }
  571. /* init net device */
  572. dev->header_ops = NULL;
  573. dev->mtu = CCMNI_MTU;
  574. dev->tx_queue_len = CCMNI_TX_QUEUE;
  575. dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
  576. dev->flags = (IFF_NOARP | IFF_BROADCAST) & /* ccmni is a pure IP device */
  577. (~IFF_MULTICAST); /* ccmni is P2P */
  578. dev->features = NETIF_F_VLAN_CHALLENGED; /* not support VLAN */
  579. if (ctlb->ccci_ops->md_ability & MODEM_CAP_SGIO) {
  580. dev->features |= NETIF_F_SG;
  581. dev->hw_features |= NETIF_F_SG;
  582. }
  583. dev->addr_len = ETH_ALEN; /* ethernet header size */
  584. dev->destructor = free_netdev;
  585. dev->hard_header_len += sizeof(struct ccci_header); /* reserve Tx CCCI header room */
  586. dev->netdev_ops = &ccmni_netdev_ops;
  587. random_ether_addr((u8 *) dev->dev_addr);
  588. sprintf(dev->name, "%s%d", ctlb->ccci_ops->name, i);
  589. CCMNI_INF_MSG(md_id, "register netdev name: %s\n", dev->name);
  590. /* init private structure of netdev */
  591. ccmni = netdev_priv(dev);
  592. ccmni->index = i;
  593. ret = ccmni_inst_init(md_id, ccmni, dev);
  594. if (ret) {
  595. CCMNI_ERR_MSG(md_id, "initial ccmni instance fail\n");
  596. goto alloc_netdev_fail;
  597. }
  598. ctlb->ccmni_inst[i] = ccmni;
  599. /* register net device */
  600. ret = register_netdev(dev);
  601. if (ret) {
  602. CCMNI_ERR_MSG(md_id, "CCMNI%d register netdev fail: %d\n", i, ret);
  603. goto alloc_netdev_fail;
  604. }
  605. CCMNI_DBG_MSG(ccmni->md_id, "CCMNI%d=%p, ctlb=%p, ctlb_ops=%p, dev=%p\n",
  606. i, ccmni, ccmni->ctlb, ccmni->ctlb->ccci_ops, ccmni->dev);
  607. }
  608. }
  609. if ((ctlb->ccci_ops->md_ability & MODEM_CAP_CCMNI_IRAT) != 0) {
  610. if (ctlb->ccci_ops->irat_md_id < 0 || ctlb->ccci_ops->irat_md_id >= MAX_MD_NUM) {
  611. CCMNI_ERR_MSG(md_id, "md%d IRAT fail because invalid irat md(%d)\n",
  612. md_id, ctlb->ccci_ops->irat_md_id);
  613. ret = -EINVAL;
  614. goto alloc_mem_fail;
  615. }
  616. ctlb_irat_src = ccmni_ctl_blk[ctlb->ccci_ops->irat_md_id];
  617. if (!ctlb_irat_src) {
  618. CCMNI_ERR_MSG(md_id, "md%d IRAT fail because irat md%d ctlb is NULL\n",
  619. md_id, ctlb->ccci_ops->irat_md_id);
  620. ret = -EINVAL;
  621. goto alloc_mem_fail;
  622. }
  623. if (unlikely(ctlb->ccci_ops->ccmni_num > ctlb_irat_src->ccci_ops->ccmni_num)) {
  624. CCMNI_ERR_MSG(md_id, "IRAT fail because number of src(%d) and dest(%d) ccmni isn't equal\n",
  625. ctlb_irat_src->ccci_ops->ccmni_num, ctlb->ccci_ops->ccmni_num);
  626. ret = -EINVAL;
  627. goto alloc_mem_fail;
  628. }
  629. for (i = 0; i < ctlb->ccci_ops->ccmni_num; i++) {
  630. if ((ctlb->ccci_ops->md_ability & MODEM_CAP_WORLD_PHONE) != 0)
  631. ccmni = ctlb->ccmni_inst[i];
  632. else
  633. ccmni = kzalloc(sizeof(ccmni_instance_t), GFP_KERNEL);
  634. if (unlikely(ccmni == NULL)) {
  635. CCMNI_ERR_MSG(md_id, "alloc ccmni instance fail\n");
  636. ret = -ENOMEM;
  637. goto alloc_mem_fail;
  638. }
  639. ccmni_irat_src = kzalloc(sizeof(ccmni_instance_t), GFP_KERNEL);
  640. if (unlikely(ccmni_irat_src == NULL)) {
  641. CCMNI_ERR_MSG(md_id, "alloc ccmni_irat instance fail\n");
  642. kfree(ccmni);
  643. ret = -ENOMEM;
  644. goto alloc_mem_fail;
  645. }
  646. /* initial irat ccmni instance */
  647. ccmni->index = i;
  648. dev = ctlb_irat_src->ccmni_inst[i]->dev;
  649. if ((ctlb->ccci_ops->md_ability & MODEM_CAP_WORLD_PHONE) != 0)
  650. ccmni->dev = dev;
  651. else {
  652. ret = ccmni_inst_init(md_id, ccmni, dev);
  653. if (ret) {
  654. CCMNI_ERR_MSG(md_id, "initial ccmni instance fail\n");
  655. kfree(ccmni);
  656. kfree(ccmni_irat_src);
  657. goto alloc_mem_fail;
  658. }
  659. ctlb->ccmni_inst[i] = ccmni;
  660. }
  661. /* initial irat source ccmni instance */
  662. memcpy(ccmni_irat_src, ctlb_irat_src->ccmni_inst[i], sizeof(ccmni_instance_t));
  663. ctlb_irat_src->ccmni_inst[i] = ccmni_irat_src;
  664. CCMNI_DBG_MSG(ccmni->md_id, "[IRAT]CCMNI%d=%p, ctlb=%p, ctlb_ops=%p, dev=%p\n",
  665. i, ccmni, ccmni->ctlb, ccmni->ctlb->ccci_ops, ccmni->dev);
  666. }
  667. }
  668. snprintf(ctlb->wakelock_name, sizeof(ctlb->wakelock_name), "ccmni_md%d", (md_id+1));
  669. wake_lock_init(&ctlb->ccmni_wakelock, WAKE_LOCK_SUSPEND, ctlb->wakelock_name);
  670. return 0;
  671. alloc_netdev_fail:
  672. if (dev) {
  673. free_netdev(dev);
  674. ctlb->ccmni_inst[i] = NULL;
  675. }
  676. for (j = i-1; j >= 0; j--) {
  677. ccmni = ctlb->ccmni_inst[j];
  678. unregister_netdev(ccmni->dev);
  679. /* free_netdev(ccmni->dev); */
  680. ctlb->ccmni_inst[j] = NULL;
  681. }
  682. alloc_mem_fail:
  683. kfree(ctlb->ccci_ops);
  684. kfree(ctlb);
  685. ccmni_ctl_blk[md_id] = NULL;
  686. return ret;
  687. }
  688. static void ccmni_exit(int md_id)
  689. {
  690. int i = 0;
  691. ccmni_ctl_block_t *ctlb = NULL;
  692. ccmni_instance_t *ccmni = NULL;
  693. CCMNI_INF_MSG(md_id, "ccmni_exit\n");
  694. ctlb = ccmni_ctl_blk[md_id];
  695. if (ctlb) {
  696. if (ctlb->ccci_ops == NULL)
  697. goto ccmni_exit_ret;
  698. for (i = 0; i < ctlb->ccci_ops->ccmni_num; i++) {
  699. ccmni = ctlb->ccmni_inst[i];
  700. if (ccmni) {
  701. CCMNI_INF_MSG(md_id, "ccmni_exit: unregister ccmni%d dev\n", i);
  702. unregister_netdev(ccmni->dev);
  703. /* free_netdev(ccmni->dev); */
  704. ctlb->ccmni_inst[i] = NULL;
  705. }
  706. }
  707. kfree(ctlb->ccci_ops);
  708. ccmni_exit_ret:
  709. kfree(ctlb);
  710. ccmni_ctl_blk[md_id] = NULL;
  711. }
  712. }
  713. static int ccmni_rx_callback(int md_id, int rx_ch, struct sk_buff *skb, void *priv_data)
  714. {
  715. ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id];
  716. /* struct ccci_header *ccci_h = (struct ccci_header*)skb->data; */
  717. ccmni_instance_t *ccmni = NULL;
  718. struct net_device *dev = NULL;
  719. int pkt_type, skb_len, ccmni_idx;
  720. if (unlikely(ctlb == NULL || ctlb->ccci_ops == NULL)) {
  721. CCMNI_ERR_MSG(md_id, "invalid CCMNI ctrl/ops struct for RX_CH(%d)\n", rx_ch);
  722. dev_kfree_skb(skb);
  723. return -1;
  724. }
  725. ccmni_idx = get_ccmni_idx_from_ch(md_id, rx_ch);
  726. if (unlikely(ccmni_idx < 0)) {
  727. CCMNI_ERR_MSG(md_id, "CCMNI rx(%d) skb ch error\n", rx_ch);
  728. dev_kfree_skb(skb);
  729. return -1;
  730. }
  731. ccmni = ctlb->ccmni_inst[ccmni_idx];
  732. dev = ccmni->dev;
  733. /* skb_pull(skb, sizeof(struct ccci_header)); */
  734. pkt_type = skb->data[0] & 0xF0;
  735. ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, pkt_type);
  736. skb_set_mac_header(skb, -ETH_HLEN);
  737. skb->dev = dev;
  738. if (pkt_type == 0x60)
  739. skb->protocol = htons(ETH_P_IPV6);
  740. else
  741. skb->protocol = htons(ETH_P_IP);
  742. skb->ip_summed = CHECKSUM_NONE;
  743. skb_len = skb->len;
  744. if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX))
  745. CCMNI_INF_MSG(md_id, "[RX]CCMNI%d(rx_ch=%d) recv data_len=%d\n", ccmni_idx, rx_ch, skb->len);
  746. if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX_SKB))
  747. ccmni_dbg_skb_header(ccmni->md_id, false, skb);
  748. #if defined(NETDEV_TRACE) && defined(NETDEV_DL_TRACE)
  749. skb->mark &= 0x0FFFFFFF;
  750. skb->mark |= (0x1<<28);
  751. #endif
  752. #if defined(CCCI_SKB_TRACE)
  753. struct iphdr *iph = (struct iphdr *)skb->data;
  754. net_rx_delay[2] = iph->id;
  755. net_rx_delay[3] = sched_clock();
  756. #endif
  757. if (likely(ctlb->ccci_ops->md_ability & MODEM_CAP_NAPI)) {
  758. netif_receive_skb(skb);
  759. } else {
  760. if (!in_interrupt())
  761. netif_rx_ni(skb);
  762. else
  763. netif_rx(skb);
  764. }
  765. dev->stats.rx_packets++;
  766. dev->stats.rx_bytes += skb_len;
  767. #if defined(CCCI_SKB_TRACE)
  768. net_rx_delay[3] = sched_clock() - net_rx_delay[3];
  769. net_rx_delay[0] = dev->stats.rx_bytes - 40*dev->stats.rx_packets;
  770. net_rx_delay[1] = dev->stats.tx_bytes - 40*dev->stats.tx_packets;
  771. #endif
  772. wake_lock_timeout(&ctlb->ccmni_wakelock, HZ);
  773. return 0;
  774. }
  775. static void ccmni_md_state_callback(int md_id, int rx_ch, MD_STATE state)
  776. {
  777. ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id];
  778. ccmni_instance_t *ccmni = NULL;
  779. int ccmni_idx = 0;
  780. unsigned int ch_num = rx_ch & 0xFFFF;
  781. struct netdev_queue *net_queue = NULL;
  782. if (unlikely(ctlb == NULL)) {
  783. CCMNI_ERR_MSG(md_id, "invalid ccmni ctrl struct when rx_ch=%d md_sta=%d\n", rx_ch, state);
  784. return;
  785. }
  786. ccmni_idx = get_ccmni_idx_from_ch(md_id, rx_ch);
  787. if (unlikely(ccmni_idx < 0)) {
  788. CCMNI_ERR_MSG(md_id, "get error ccmni index when md_sta=%d\n", state);
  789. return;
  790. }
  791. ccmni = ctlb->ccmni_inst[ccmni_idx];
  792. if ((state != TX_IRQ) && (state != TX_FULL) && (atomic_read(&ccmni->usage) > 0)) {
  793. CCMNI_INF_MSG(md_id, "md_state_cb: CCMNI%d, md_sta=%d, usage=%d\n",
  794. ccmni_idx, state, atomic_read(&ccmni->usage));
  795. }
  796. switch (state) {
  797. case READY:
  798. netif_carrier_on(ccmni->dev);
  799. ccmni->tx_seq_num[0] = 0;
  800. ccmni->tx_seq_num[1] = 0;
  801. ccmni->rx_seq_num = 0;
  802. break;
  803. case EXCEPTION:
  804. case RESET:
  805. netif_carrier_off(ccmni->dev);
  806. break;
  807. case RX_IRQ:
  808. mod_timer(&ccmni->timer, jiffies+HZ);
  809. napi_schedule(&ccmni->napi);
  810. wake_lock_timeout(&ctlb->ccmni_wakelock, HZ);
  811. break;
  812. case TX_IRQ:
  813. if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
  814. if (likely(ctlb->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ)) {
  815. if ((ch_num == CCCI_CCMNI1_DL_ACK) || (ch_num == CCCI_CCMNI2_DL_ACK))
  816. net_queue = netdev_get_tx_queue(ccmni->dev, CCMNI_TXQ_FAST);
  817. else
  818. net_queue = netdev_get_tx_queue(ccmni->dev, CCMNI_TXQ_NORMAL);
  819. if (netif_tx_queue_stopped(net_queue))
  820. netif_tx_wake_queue(net_queue);
  821. } else {
  822. if (netif_queue_stopped(ccmni->dev))
  823. netif_wake_queue(ccmni->dev);
  824. }
  825. CCMNI_INF_MSG(md_id, "md_state_cb: %s, md_sta=TX_IRQ, ch=0x%x, usage=%d\n",
  826. ccmni->dev->name, rx_ch, atomic_read(&ccmni->usage));
  827. }
  828. break;
  829. case TX_FULL:
  830. if (atomic_read(&ccmni->usage) > 0) {
  831. if (ctlb->ccci_ops->md_ability & MODEM_CAP_CCMNI_MQ) {
  832. if ((ch_num == CCCI_CCMNI1_DL_ACK) || (ch_num == CCCI_CCMNI2_DL_ACK))
  833. net_queue = netdev_get_tx_queue(ccmni->dev, CCMNI_TXQ_FAST);
  834. else
  835. net_queue = netdev_get_tx_queue(ccmni->dev, CCMNI_TXQ_NORMAL);
  836. netif_tx_stop_queue(net_queue);
  837. } else
  838. netif_stop_queue(ccmni->dev);
  839. CCMNI_INF_MSG(md_id, "md_state_cb: %s, md_sta=TX_FULL, ch=0x%x, usage=%d\n",
  840. ccmni->dev->name, rx_ch, atomic_read(&ccmni->usage));
  841. }
  842. break;
  843. default:
  844. break;
  845. }
  846. }
  847. static void ccmni_dump(int md_id, int rx_ch, unsigned int flag)
  848. {
  849. ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id];
  850. ccmni_instance_t *ccmni = NULL;
  851. ccmni_instance_t *ccmni_tmp = NULL;
  852. int ccmni_idx = 0;
  853. struct net_device *dev = NULL;
  854. struct netdev_queue *dev_queue = NULL;
  855. if (ctlb == NULL)
  856. return;
  857. ccmni_idx = get_ccmni_idx_from_ch(md_id, rx_ch);
  858. if (unlikely(ccmni_idx < 0)) {
  859. CCMNI_ERR_MSG(md_id, "CCMNI rx(%d) skb ch error\n", rx_ch);
  860. return;
  861. }
  862. ccmni_tmp = ctlb->ccmni_inst[ccmni_idx];
  863. if (unlikely(ccmni_tmp == NULL))
  864. return;
  865. if ((ccmni_tmp->dev->stats.rx_packets == 0) && (ccmni_tmp->dev->stats.tx_packets == 0))
  866. return;
  867. dev = ccmni_tmp->dev;
  868. /*ccmni diff from ccmni_tmp for MD IRAT*/
  869. ccmni = (ccmni_instance_t *)netdev_priv(dev);
  870. dev_queue = netdev_get_tx_queue(dev, 0);
  871. CCMNI_INF_MSG(md_id,
  872. "%s(%d,%d), irat_md=MD%d, rx=(%ld,%ld), tx=(%ld,%ld), txq_len=%d, tx_busy=%ld, dev_sta=(0x%lx,0x%lx,0x%x)\n",
  873. dev->name, atomic_read(&ccmni->usage), atomic_read(&ccmni_tmp->usage), (ccmni->md_id+1),
  874. dev->stats.rx_packets, dev->stats.rx_bytes, dev->stats.tx_packets, dev->stats.tx_bytes,
  875. dev->qdisc->q.qlen, ccmni->tx_busy_cnt, dev->state, dev_queue->state, dev->flags);
  876. }
  877. static void ccmni_dump_rx_status(int md_id, int rx_ch, unsigned long long *status)
  878. {
  879. memcpy(status, net_rx_delay, sizeof(net_rx_delay));
  880. }
  881. struct ccmni_dev_ops ccmni_ops = {
  882. .skb_alloc_size = 1600,
  883. .init = &ccmni_init,
  884. .rx_callback = &ccmni_rx_callback,
  885. .md_state_callback = &ccmni_md_state_callback,
  886. .exit = ccmni_exit,
  887. .dump = ccmni_dump,
  888. .dump_rx_status = ccmni_dump_rx_status,
  889. };