port_net.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. #include <linux/netdevice.h>
  2. #include <linux/etherdevice.h>
  3. #include <linux/ip.h>
  4. #include <linux/tcp.h>
  5. #include <linux/ipv6.h>
  6. #include <net/ipv6.h>
  7. #include <linux/skbuff.h>
  8. #include <linux/module.h>
  9. #include <linux/timer.h>
  10. #include <linux/version.h>
  11. #include <linux/sockios.h>
  12. #include <mt-plat/mt_ccci_common.h>
  13. #include "ccci_config.h"
  14. #include "ccci_core.h"
  15. #include "ccci_bm.h"
  16. #include "port_net.h"
  17. #ifdef PORT_NET_TRACE
  18. #define CREATE_TRACE_POINTS
  19. #include "port_net_events.h"
  20. #endif
  21. #define NET_DAT_TXQ_INDEX(p) ((p)->modem->md_state == EXCEPTION?(p)->txq_exp_index:(p)->txq_index)
  22. #define NET_ACK_TXQ_INDEX(p) ((p)->modem->md_state == EXCEPTION?(p)->txq_exp_index:((p)->txq_exp_index&0x0F))
  23. #ifdef CCMNI_U
  24. int ccci_get_ccmni_channel(int md_id, int ccmni_idx, struct ccmni_ch *channel)
  25. {
  26. int ret = 0;
  27. switch (ccmni_idx) {
  28. case 0:
  29. channel->rx = CCCI_CCMNI1_RX;
  30. channel->rx_ack = 0xFF;
  31. channel->tx = CCCI_CCMNI1_TX;
  32. channel->tx_ack = 0xFF;
  33. break;
  34. case 1:
  35. channel->rx = CCCI_CCMNI2_RX;
  36. channel->rx_ack = 0xFF;
  37. channel->tx = CCCI_CCMNI2_TX;
  38. channel->tx_ack = 0xFF;
  39. break;
  40. case 2:
  41. channel->rx = CCCI_CCMNI3_RX;
  42. channel->rx_ack = 0xFF;
  43. channel->tx = CCCI_CCMNI3_TX;
  44. channel->tx_ack = 0xFF;
  45. break;
  46. case 3:
  47. channel->rx = CCCI_CCMNI4_RX;
  48. channel->rx_ack = 0xFF;
  49. channel->tx = CCCI_CCMNI4_TX;
  50. channel->tx_ack = 0xFF;
  51. break;
  52. case 4:
  53. channel->rx = CCCI_CCMNI5_RX;
  54. channel->rx_ack = 0xFF;
  55. channel->tx = CCCI_CCMNI5_TX;
  56. channel->tx_ack = 0xFF;
  57. break;
  58. case 5:
  59. channel->rx = CCCI_CCMNI6_RX;
  60. channel->rx_ack = 0xFF;
  61. channel->tx = CCCI_CCMNI6_TX;
  62. channel->tx_ack = 0xFF;
  63. break;
  64. case 6:
  65. channel->rx = CCCI_CCMNI7_RX;
  66. channel->rx_ack = 0xFF;
  67. channel->tx = CCCI_CCMNI7_TX;
  68. channel->tx_ack = 0xFF;
  69. break;
  70. case 7:
  71. channel->rx = CCCI_CCMNI8_RX;
  72. channel->rx_ack = 0xFF;
  73. channel->tx = CCCI_CCMNI8_TX;
  74. channel->tx_ack = 0xFF;
  75. break;
  76. default:
  77. CCCI_ERR_MSG(md_id, NET, "invalid ccmni index=%d\n", ccmni_idx);
  78. ret = -1;
  79. break;
  80. }
  81. return ret;
  82. }
  83. int ccmni_send_pkt(int md_id, int tx_ch, void *data)
  84. {
  85. struct ccci_modem *md = ccci_get_modem_by_id(md_id);
  86. struct ccci_port *port = NULL;
  87. /* struct ccci_request *req = NULL; */
  88. struct ccci_header *ccci_h;
  89. struct sk_buff *skb = (struct sk_buff *)data;
  90. int tx_ch_to_port, tx_queue;
  91. int ret;
  92. #ifdef PORT_NET_TRACE
  93. unsigned long long send_time = 0;
  94. unsigned long long get_port_time = 0;
  95. unsigned long long total_time = 0;
  96. total_time = sched_clock();
  97. #endif
  98. if (!md)
  99. return CCMNI_ERR_TX_INVAL;
  100. if (unlikely(md->md_state != READY))
  101. return CCMNI_ERR_MD_NO_READY;
  102. if (tx_ch == CCCI_CCMNI1_DL_ACK)
  103. tx_ch_to_port = CCCI_CCMNI1_TX;
  104. else if (tx_ch == CCCI_CCMNI2_DL_ACK)
  105. tx_ch_to_port = CCCI_CCMNI2_TX;
  106. else if (tx_ch == CCCI_CCMNI3_DL_ACK)
  107. tx_ch_to_port = CCCI_CCMNI3_TX;
  108. else
  109. tx_ch_to_port = tx_ch;
  110. #ifdef PORT_NET_TRACE
  111. get_port_time = sched_clock();
  112. #endif
  113. port = md->ops->get_port_by_channel(md, tx_ch_to_port);
  114. #ifdef PORT_NET_TRACE
  115. get_port_time = sched_clock() - get_port_time;
  116. #endif
  117. if (!port) {
  118. CCCI_ERR_MSG(0, NET, "port==NULL\n");
  119. return CCMNI_ERR_TX_INVAL;
  120. }
  121. /* req_alloc_time=sched_clock(); */
  122. /* req = ccci_alloc_req(OUT, -1, 1, 0); */
  123. /* req_alloc_time=sched_clock()-req_alloc_time; */
  124. /* if(!req) { */
  125. /* return CCMNI_ERR_TX_BUSY; */
  126. /* } */
  127. if (tx_ch == CCCI_CCMNI1_DL_ACK || tx_ch == CCCI_CCMNI2_DL_ACK || tx_ch == CCCI_CCMNI3_DL_ACK)
  128. tx_queue = NET_ACK_TXQ_INDEX(port);
  129. else
  130. tx_queue = NET_DAT_TXQ_INDEX(port);
  131. /* req->skb = skb; */
  132. /* req->policy = FREE; */
  133. ccci_h = (struct ccci_header *)skb_push(skb, sizeof(struct ccci_header));
  134. ccci_h = (struct ccci_header *)skb->data;
  135. ccci_h->channel = tx_ch;
  136. ccci_h->data[0] = 0;
  137. ccci_h->data[1] = skb->len; /* as skb->len already included ccci_header after skb_push */
  138. /* #ifndef FEATURE_SEQ_CHECK_EN */
  139. /* ccci_h->reserved = nent->tx_seq_num++; */
  140. /* #else */
  141. ccci_h->reserved = 0;
  142. /* #endif */
  143. CCCI_DBG_MSG(md_id, NET, "port %s send txq=%d: %08X, %08X, %08X, %08X\n", port->name, tx_queue,
  144. ccci_h->data[0], ccci_h->data[1], ccci_h->channel, ccci_h->reserved);
  145. #ifdef PORT_NET_TRACE
  146. send_time = sched_clock();
  147. #endif
  148. ret = port->modem->ops->send_request(port->modem, tx_queue, NULL, skb);
  149. #ifdef PORT_NET_TRACE
  150. send_time = sched_clock() - send_time;
  151. #endif
  152. if (ret) {
  153. skb_pull(skb, sizeof(struct ccci_header));
  154. /* undo header, in next retry, we'll reserve header again */
  155. ret = CCMNI_ERR_TX_BUSY;
  156. } else {
  157. ret = CCMNI_ERR_TX_OK;
  158. }
  159. #ifdef PORT_NET_TRACE
  160. if (ret == CCMNI_ERR_TX_OK) {
  161. total_time = sched_clock() - total_time;
  162. trace_port_net_tx(md_id, tx_queue, tx_ch, (unsigned int)get_port_time, (unsigned int)send_time,
  163. (unsigned int)(total_time));
  164. } else {
  165. trace_port_net_error(port->modem->index, tx_queue, port->tx_ch, port->tx_busy_count, __LINE__);
  166. }
  167. #endif
  168. return ret;
  169. }
  170. int ccmni_napi_poll(int md_id, int rx_ch, struct napi_struct *napi, int weight)
  171. {
  172. return 0;
  173. }
  174. struct ccmni_ccci_ops eccci_ccmni_ops = {
  175. .ccmni_ver = CCMNI_DRV_V0,
  176. .ccmni_num = 8,
  177. .name = "ccmni",
  178. .md_ability = MODEM_CAP_DATA_ACK_DVD,
  179. .irat_md_id = -1,
  180. .napi_poll_weigh = NAPI_POLL_WEIGHT,
  181. .send_pkt = ccmni_send_pkt,
  182. .napi_poll = ccmni_napi_poll,
  183. .get_ccmni_ch = ccci_get_ccmni_channel,
  184. };
  185. struct ccmni_ccci_ops eccci_cc3mni_ops = {
  186. .ccmni_ver = CCMNI_DRV_V0,
  187. .ccmni_num = 3,
  188. .name = "cc3mni",
  189. #if defined CONFIG_MTK_IRAT_SUPPORT
  190. #if defined CONFIG_MTK_C2K_SLOT2_SUPPORT
  191. .md_ability = MODEM_CAP_CCMNI_IRAT | MODEM_CAP_TXBUSY_STOP | MODEM_CAP_WORLD_PHONE,
  192. #else
  193. .md_ability = MODEM_CAP_CCMNI_IRAT | MODEM_CAP_TXBUSY_STOP,
  194. #endif
  195. .irat_md_id = MD_SYS1,
  196. #else
  197. .md_ability = MODEM_CAP_TXBUSY_STOP,
  198. .irat_md_id = -1,
  199. #endif
  200. .napi_poll_weigh = 0,
  201. .send_pkt = ccmni_send_pkt,
  202. .napi_poll = ccmni_napi_poll,
  203. .get_ccmni_ch = ccci_get_ccmni_channel,
  204. };
  205. #endif
  206. #define IPV4_VERSION 0x40
  207. #define IPV6_VERSION 0x60
  208. #define SIOCSTXQSTATE (SIOCDEVPRIVATE + 0)
  209. struct netdev_entity {
  210. struct napi_struct napi;
  211. struct net_device *ndev;
  212. #ifndef FEATURE_SEQ_CHECK_EN
  213. unsigned int rx_seq_num;
  214. unsigned int tx_seq_num;
  215. #endif
  216. struct timer_list polling_timer;
  217. };
  218. static int ccmni_open(struct net_device *dev)
  219. {
  220. struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
  221. struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
  222. atomic_inc(&port->usage_cnt);
  223. CCCI_INF_MSG(port->modem->index, NET, "port %s open %d cap=0x%X\n", port->name, atomic_read(&port->usage_cnt),
  224. port->modem->capability);
  225. netif_start_queue(dev);
  226. if (likely(port->modem->capability & MODEM_CAP_NAPI)) {
  227. napi_enable(&nent->napi);
  228. napi_schedule(&nent->napi);
  229. }
  230. return 0;
  231. }
  232. static int ccmni_close(struct net_device *dev)
  233. {
  234. struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
  235. atomic_dec(&port->usage_cnt);
  236. CCCI_INF_MSG(port->modem->index, NET, "port %s close %d\n", port->name, atomic_read(&port->usage_cnt));
  237. netif_stop_queue(dev);
  238. if (likely(port->modem->capability & MODEM_CAP_NAPI))
  239. napi_disable(&((struct netdev_entity *)port->private_data)->napi);
  240. return 0;
  241. }
  242. static inline int skb_is_ack(struct sk_buff *skb)
  243. {
  244. u32 packet_type;
  245. struct tcphdr *tcph;
  246. packet_type = skb->data[0] & 0xF0;
  247. if (packet_type == IPV6_VERSION) {
  248. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  249. u32 total_len = sizeof(struct ipv6hdr) + ntohs(iph->payload_len);
  250. if (total_len <= 128 - sizeof(struct ccci_header)) {
  251. u8 nexthdr = iph->nexthdr;
  252. __be16 frag_off;
  253. u32 l4_off = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
  254. tcph = (struct tcphdr *)(skb->data + l4_off);
  255. if (nexthdr == IPPROTO_TCP &&
  256. !tcph->syn && !tcph->fin && !tcph->rst && ((total_len - l4_off) == (tcph->doff << 2))) {
  257. return 1;
  258. }
  259. }
  260. } else if (packet_type == IPV4_VERSION) {
  261. struct iphdr *iph = (struct iphdr *)skb->data;
  262. if (ntohs(iph->tot_len) <= 128 - sizeof(struct ccci_header)) {
  263. tcph = (struct tcphdr *)(skb->data + (iph->ihl << 2));
  264. if (iph->protocol == IPPROTO_TCP &&
  265. !tcph->syn && !tcph->fin && !tcph->rst &&
  266. (ntohs(iph->tot_len) == (iph->ihl << 2) + (tcph->doff << 2))) {
  267. return 1;
  268. }
  269. }
  270. }
  271. return 0;
  272. }
  273. static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
  274. {
  275. struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
  276. struct ccci_header *ccci_h;
  277. int ret;
  278. int skb_len = skb->len;
  279. static int tx_busy_retry_cnt;
  280. int tx_queue, tx_channel;
  281. #ifdef PORT_NET_TRACE
  282. unsigned long long send_time = 0;
  283. unsigned long long total_time = 0;
  284. total_time = sched_clock();
  285. #endif
  286. #ifndef FEATURE_SEQ_CHECK_EN
  287. struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
  288. CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d, curr_seq=%d\n",
  289. port->name, skb_headroom(skb), skb->len, nent->tx_seq_num);
  290. #else
  291. CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d\n", port->name, skb_headroom(skb), skb->len);
  292. #endif
  293. if (unlikely(skb->len > CCCI_NET_MTU)) {
  294. CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCCI_NET_MTU, dev->mtu, skb->len);
  295. dev_kfree_skb(skb);
  296. dev->stats.tx_dropped++;
  297. return NETDEV_TX_OK;
  298. }
  299. if (unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) {
  300. CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on %s, len=%d header=%d hard_header=%d\n",
  301. port->name, skb->len, skb_headroom(skb), dev->hard_header_len);
  302. dev_kfree_skb(skb);
  303. dev->stats.tx_dropped++;
  304. return NETDEV_TX_OK;
  305. }
  306. if (unlikely(port->modem->md_state != READY)) {
  307. dev_kfree_skb(skb);
  308. dev->stats.tx_dropped++;
  309. return NETDEV_TX_OK;
  310. }
  311. if (likely((port->rx_ch == CCCI_CCMNI1_RX) || (port->rx_ch == CCCI_CCMNI2_RX))) {
  312. /* only use on ccmni0 && ccmni1 */
  313. if (unlikely(skb_is_ack(skb))) {
  314. tx_channel = port->tx_ch == CCCI_CCMNI1_TX ? CCCI_CCMNI1_DL_ACK : CCCI_CCMNI2_DL_ACK;
  315. tx_queue = NET_ACK_TXQ_INDEX(port);
  316. } else {
  317. tx_channel = port->tx_ch;
  318. tx_queue = NET_DAT_TXQ_INDEX(port);
  319. }
  320. } else {
  321. tx_channel = port->tx_ch;
  322. tx_queue = NET_DAT_TXQ_INDEX(port);
  323. }
  324. ccci_h = (struct ccci_header *)skb_push(skb, sizeof(struct ccci_header));
  325. ccci_h->channel = tx_channel;
  326. ccci_h->data[0] = 0;
  327. ccci_h->data[1] = skb->len; /* as skb->len already included ccci_header after skb_push */
  328. #ifndef FEATURE_SEQ_CHECK_EN
  329. ccci_h->reserved = nent->tx_seq_num++;
  330. #else
  331. ccci_h->reserved = 0;
  332. #endif
  333. #ifdef PORT_NET_TRACE
  334. send_time = sched_clock();
  335. #endif
  336. ret = port->modem->ops->send_request(port->modem, tx_queue, NULL, skb);
  337. #ifdef PORT_NET_TRACE
  338. send_time = sched_clock() - send_time;
  339. #endif
  340. if (ret) {
  341. skb_pull(skb, sizeof(struct ccci_header));
  342. /* undo header, in next retry, we'll reserve header again */
  343. goto tx_busy;
  344. }
  345. dev->stats.tx_packets++;
  346. dev->stats.tx_bytes += skb_len;
  347. tx_busy_retry_cnt = 0;
  348. #ifdef PORT_NET_TRACE
  349. total_time = sched_clock() - total_time;
  350. trace_port_net_tx(port->modem->index, tx_queue, port->tx_ch, 0,
  351. (unsigned int)send_time, (unsigned int)(total_time));
  352. #endif
  353. return NETDEV_TX_OK;
  354. tx_busy:
  355. if (unlikely(!(port->modem->capability & MODEM_CAP_TXBUSY_STOP))) {
  356. if ((tx_busy_retry_cnt) % 20000 == 0)
  357. CCCI_INF_MSG(port->modem->index, NET, "%s TX busy: retry_times=%d\n", port->name,
  358. tx_busy_retry_cnt);
  359. tx_busy_retry_cnt++;
  360. } else {
  361. port->tx_busy_count++;
  362. }
  363. #ifdef PORT_NET_TRACE
  364. trace_port_net_error(port->modem->index, tx_queue, port->tx_ch, port->tx_busy_count, __LINE__);
  365. #endif
  366. return NETDEV_TX_BUSY;
  367. }
  368. static void ccmni_tx_timeout(struct net_device *dev)
  369. {
  370. struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
  371. dev->stats.tx_errors++;
  372. if (atomic_read(&port->usage_cnt) > 0)
  373. netif_wake_queue(dev);
  374. }
  375. static int ccmni_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  376. {
  377. struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
  378. unsigned int timeout = 0;
  379. switch (cmd) {
  380. case SIOCSTXQSTATE:
  381. /* ifru_ivalue[3~0]:start/stop; ifru_ivalue[7~4]:reserve; */
  382. /* ifru_ivalue[15~8]:user id, bit8=rild, bit9=thermal */
  383. /* ifru_ivalue[31~16]: watchdog timeout value */
  384. if ((ifr->ifr_ifru.ifru_ivalue & 0xF) == 0) {
  385. if (atomic_read(&port->usage_cnt) > 0) {
  386. atomic_dec(&port->usage_cnt);
  387. netif_stop_queue(dev);
  388. /* stop queue won't stop Tx watchdog (ndo_tx_timeout) */
  389. timeout = (ifr->ifr_ifru.ifru_ivalue & 0xFFFF0000) >> 16;
  390. if (timeout == 0)
  391. dev->watchdog_timeo = 60*HZ;
  392. else
  393. dev->watchdog_timeo = timeout*HZ;
  394. }
  395. } else {
  396. if (atomic_read(&port->usage_cnt) <= 0) {
  397. if (netif_running(dev) && netif_queue_stopped(dev))
  398. netif_wake_queue(dev);
  399. dev->watchdog_timeo = 1 * HZ;
  400. atomic_inc(&port->usage_cnt);
  401. }
  402. }
  403. CCCI_INF_MSG(port->modem->index, NET, "SIOCSTXQSTATE request=%d on %s %d\n", ifr->ifr_ifru.ifru_ivalue,
  404. port->name, atomic_read(&port->usage_cnt));
  405. break;
  406. default:
  407. CCCI_INF_MSG(port->modem->index, NET, "unknown ioctl cmd=%d on %s\n", cmd, port->name);
  408. break;
  409. }
  410. return 0;
  411. }
  412. struct net_device_stats *ccmni_get_stats(struct net_device *dev)
  413. {
  414. return &dev->stats;
  415. }
  416. static const struct net_device_ops ccmni_netdev_ops = {
  417. .ndo_open = ccmni_open,
  418. .ndo_stop = ccmni_close,
  419. .ndo_start_xmit = ccmni_start_xmit,
  420. .ndo_tx_timeout = ccmni_tx_timeout,
  421. .ndo_do_ioctl = ccmni_ioctl,
  422. .ndo_get_stats = ccmni_get_stats,
  423. };
  424. #ifndef CCMNI_U
  425. static int port_net_poll(struct napi_struct *napi, int budget)
  426. {
  427. struct ccci_port *port = *((struct ccci_port **)netdev_priv(napi->dev));
  428. struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
  429. del_timer(&nent->polling_timer);
  430. return port->modem->ops->napi_poll(port->modem, PORT_RXQ_INDEX(port), napi, budget);
  431. }
  432. static void napi_polling_timer_func(unsigned long data)
  433. {
  434. struct ccci_port *port = (struct ccci_port *)data;
  435. CCCI_ERR_MSG(port->modem->index, NET, "lost NAPI polling on %s\n", port->name);
  436. }
  437. static void ccmni_make_etherframe(void *_eth_hdr, unsigned char *mac_addr, unsigned int packet_type)
  438. {
  439. struct ethhdr *eth_hdr = _eth_hdr;
  440. memcpy(eth_hdr->h_dest, mac_addr, sizeof(eth_hdr->h_dest));
  441. memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_source));
  442. if (packet_type == 0x60)
  443. eth_hdr->h_proto = cpu_to_be16(ETH_P_IPV6);
  444. else
  445. eth_hdr->h_proto = cpu_to_be16(ETH_P_IP);
  446. }
  447. #endif
  448. static int port_net_init(struct ccci_port *port)
  449. {
  450. #ifdef CCMNI_U
  451. if (port->rx_ch == CCCI_CCMNI1_RX) {
  452. #if defined CONFIG_MTK_IRAT_SUPPORT
  453. CCCI_NOTICE_MSG(port->modem->index, NET, "clear MODEM_CAP_SGIO flag\n");
  454. port->modem->capability &= (~(MODEM_CAP_SGIO));
  455. #endif
  456. eccci_ccmni_ops.md_ability |= port->modem->capability;
  457. if (port->modem->index == MD_SYS1)
  458. ccmni_ops.init(port->modem->index, &eccci_ccmni_ops);
  459. else if (port->modem->index == MD_SYS3)
  460. ccmni_ops.init(port->modem->index, &eccci_cc3mni_ops);
  461. }
  462. return 0;
  463. #else
  464. struct ccci_port **temp;
  465. struct net_device *dev = NULL;
  466. struct netdev_entity *nent = NULL;
  467. CCCI_DBG_MSG(port->modem->index, NET, "network port is initializing\n");
  468. dev = alloc_etherdev(sizeof(struct ccci_port *));
  469. dev->header_ops = NULL;
  470. dev->mtu = CCCI_NET_MTU;
  471. dev->tx_queue_len = 1000;
  472. dev->watchdog_timeo = 1 * HZ;
  473. dev->flags = IFF_NOARP & /* ccmni is a pure IP device */
  474. (~IFF_BROADCAST & ~IFF_MULTICAST); /* ccmni is P2P */
  475. dev->features = NETIF_F_VLAN_CHALLENGED; /* not support VLAN */
  476. #ifndef CONFIG_MTK_IRAT_SUPPORT
  477. if (port->modem->capability & MODEM_CAP_SGIO) {
  478. dev->features |= NETIF_F_SG;
  479. dev->hw_features |= NETIF_F_SG;
  480. }
  481. #endif
  482. dev->addr_len = ETH_ALEN; /* ethernet header size */
  483. dev->destructor = free_netdev;
  484. dev->hard_header_len += sizeof(struct ccci_header); /* reserve Tx CCCI header room */
  485. dev->netdev_ops = &ccmni_netdev_ops;
  486. temp = netdev_priv(dev);
  487. *temp = port;
  488. sprintf(dev->name, "%s", port->name);
  489. random_ether_addr((u8 *) dev->dev_addr);
  490. nent = kzalloc(sizeof(struct netdev_entity), GFP_KERNEL);
  491. nent->ndev = dev;
  492. if (likely(port->modem->capability & MODEM_CAP_NAPI))
  493. netif_napi_add(dev, &nent->napi, port_net_poll, NAPI_POLL_WEIGHT); /* hardcode */
  494. port->private_data = nent;
  495. init_timer(&nent->polling_timer);
  496. nent->polling_timer.function = napi_polling_timer_func;
  497. nent->polling_timer.data = (unsigned long)port;
  498. register_netdev(dev);
  499. CCCI_DBG_MSG(port->modem->index, NET, "network device %s hard_header_len=%d\n", dev->name,
  500. dev->hard_header_len);
  501. return 0;
  502. #endif
  503. }
  504. static int port_net_recv_skb(struct ccci_port *port, struct sk_buff *skb)
  505. {
  506. #ifdef CCMNI_U
  507. struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
  508. #ifdef PORT_NET_TRACE
  509. unsigned long long rx_cb_time;
  510. unsigned long long total_time;
  511. total_time = sched_clock();
  512. #endif
  513. skb_pull(skb, sizeof(struct ccci_header));
  514. CCCI_DBG_MSG(port->modem->index, NET, "[RX]: 0x%08X, 0x%08X, %08X, 0x%08X\n",
  515. ccci_h->data[0], ccci_h->data[1], ccci_h->channel, ccci_h->reserved);
  516. #ifdef PORT_NET_TRACE
  517. rx_cb_time = sched_clock();
  518. #endif
  519. ccmni_ops.rx_callback(port->modem->index, ccci_h->channel, skb, NULL);
  520. #ifdef PORT_NET_TRACE
  521. rx_cb_time = sched_clock() - rx_cb_time;
  522. total_time = sched_clock() - total_time;
  523. trace_port_net_rx(port->modem->index, PORT_RXQ_INDEX(port), port->rx_ch, (unsigned int)rx_cb_time,
  524. (unsigned int)total_time);
  525. #endif
  526. return 0;
  527. #else
  528. struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
  529. struct ccci_modem *md = port->modem;
  530. struct net_device *dev = nent->ndev;
  531. unsigned int packet_type;
  532. int skb_len = skb->len;
  533. #if !defined(FEATURE_SEQ_CHECK_EN)
  534. struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
  535. #endif
  536. #ifdef PORT_NET_TRACE
  537. total_time = sched_clock();
  538. #endif
  539. #ifndef FEATURE_SEQ_CHECK_EN
  540. CCCI_DBG_MSG(md->index, NET, "recv on %s, curr_seq=%d\n", port->name, ccci_h->reserved);
  541. if (unlikely(nent->rx_seq_num != 0 && (ccci_h->reserved - nent->rx_seq_num) != 1)) {
  542. CCCI_ERR_MSG(md->index, NET, "possible packet lost on %s %d->%d\n",
  543. port->name, nent->rx_seq_num, ccci_h->reserved);
  544. }
  545. nent->rx_seq_num = ccci_h->reserved;
  546. #else
  547. CCCI_DBG_MSG(md->index, NET, "recv on %s\n", port->name);
  548. #endif
  549. skb_pull(skb, sizeof(struct ccci_header));
  550. packet_type = skb->data[0] & 0xF0;
  551. ccmni_make_etherframe(skb->data - ETH_HLEN, dev->dev_addr, packet_type);
  552. skb_set_mac_header(skb, -ETH_HLEN);
  553. skb->dev = dev;
  554. if (packet_type == 0x60) {
  555. skb->protocol = htons(ETH_P_IPV6);
  556. } else {
  557. skb->protocol = htons(ETH_P_IP);
  558. #ifdef CCCI_SKB_TRACE
  559. md->netif_rx_profile[2] = ((struct iphdr *)skb->data)->id;
  560. skb->mark &= 0x0FFFFFFF;
  561. skb->mark |= (0x5<<28);
  562. #endif
  563. }
  564. skb->ip_summed = CHECKSUM_NONE;
  565. #ifdef CCCI_SKB_TRACE
  566. md->netif_rx_profile[3] = sched_clock();
  567. #endif
  568. #ifdef PORT_NET_TRACE
  569. rx_cb_time = sched_clock();
  570. #endif
  571. if (likely(md->capability & MODEM_CAP_NAPI)) {
  572. #ifdef ENABLE_GRO
  573. napi_gro_receive(&nent->napi, skb);
  574. #else
  575. netif_receive_skb(skb);
  576. #endif
  577. } else {
  578. if (!in_interrupt())
  579. netif_rx_ni(skb);
  580. else
  581. netif_rx(skb);
  582. }
  583. #ifdef CCCI_SKB_TRACE
  584. md->netif_rx_profile[3] = sched_clock() - md->netif_rx_profile[3];
  585. #endif
  586. #ifdef PORT_NET_TRACE
  587. rx_cb_time = sched_clock() - rx_cb_time;
  588. #endif
  589. dev->stats.rx_packets++;
  590. dev->stats.rx_bytes += skb_len;
  591. #ifdef CCCI_SKB_TRACE
  592. md->netif_rx_profile[0] = dev->stats.rx_bytes - 40*dev->stats.rx_packets;
  593. md->netif_rx_profile[1] = dev->stats.tx_bytes - 40*dev->stats.tx_packets;
  594. #endif
  595. wake_lock_timeout(&port->rx_wakelock, HZ);
  596. #ifdef PORT_NET_TRACE
  597. total_time = sched_clock() - total_time;
  598. trace_port_net_rx(md->index, PORT_RXQ_INDEX(port), port->rx_ch, (unsigned int)rx_cb_time,
  599. (unsigned int)total_time);
  600. #endif
  601. return 0;
  602. #endif
  603. }
  604. static void port_net_md_state_notice(struct ccci_port *port, MD_STATE state)
  605. {
  606. #ifdef CCMNI_U
  607. if (((state == TX_IRQ) && ((port->flags & PORT_F_RX_FULLED) == 0)) ||
  608. ((state == TX_FULL) && (port->flags & PORT_F_RX_FULLED)))
  609. return;
  610. ccmni_ops.md_state_callback(port->modem->index, port->rx_ch, state);
  611. switch (state) {
  612. case TX_IRQ:
  613. port->flags &= ~PORT_F_RX_FULLED;
  614. break;
  615. case TX_FULL:
  616. port->flags |= PORT_F_RX_FULLED; /* for convenient in traffic log */
  617. break;
  618. default:
  619. break;
  620. };
  621. #else
  622. struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
  623. struct net_device *dev = nent->ndev;
  624. /* CCCI_INF_MSG(port->modem->index, NET, "port_net_md_state_notice: %s, md_sta=%d\n", port->name, state); */
  625. if (((state == TX_IRQ) && ((port->flags & PORT_F_RX_FULLED) == 0)) ||
  626. ((state == TX_FULL) && (port->flags & PORT_F_RX_FULLED)))
  627. return;
  628. switch (state) {
  629. case RX_IRQ:
  630. mod_timer(&nent->polling_timer, jiffies + HZ);
  631. napi_schedule(&nent->napi);
  632. wake_lock_timeout(&port->rx_wakelock, HZ);
  633. break;
  634. case TX_IRQ:
  635. if (netif_running(dev) && netif_queue_stopped(dev) && atomic_read(&port->usage_cnt) > 0)
  636. netif_wake_queue(dev);
  637. port->flags &= ~PORT_F_RX_FULLED;
  638. break;
  639. case TX_FULL:
  640. netif_stop_queue(dev);
  641. port->flags |= PORT_F_RX_FULLED; /* for convenient in traffic log */
  642. break;
  643. case READY:
  644. netif_carrier_on(dev);
  645. break;
  646. case EXCEPTION:
  647. case RESET:
  648. netif_carrier_off(dev);
  649. #ifndef FEATURE_SEQ_CHECK_EN
  650. nent->tx_seq_num = 0;
  651. nent->rx_seq_num = 0;
  652. #endif
  653. break;
  654. default:
  655. break;
  656. };
  657. #endif
  658. }
  659. void port_net_md_dump_info(struct ccci_port *port, unsigned int flag)
  660. {
  661. #ifdef CCMNI_U
  662. if (port == NULL) {
  663. CCCI_ERR_MSG(0, NET, "port_net_md_dump_info: port==NULL\n");
  664. return;
  665. }
  666. if (port->modem == NULL) {
  667. CCCI_ERR_MSG(0, NET, "port_net_md_dump_info: port->modem == null\n");
  668. return;
  669. }
  670. if (ccmni_ops.dump == NULL) {
  671. CCCI_ERR_MSG(0, NET, "port_net_md_dump_info: ccmni_ops.dump== null\n");
  672. return;
  673. }
  674. ccmni_ops.dump(port->modem->index, port->rx_ch, 0);
  675. #endif
  676. }
  677. struct ccci_port_ops net_port_ops = {
  678. .init = &port_net_init,
  679. .recv_skb = &port_net_recv_skb,
  680. .md_state_notice = &port_net_md_state_notice,
  681. .dump_info = &port_net_md_dump_info,
  682. };