ccmni_net.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. /*****************************************************************************
  2. *
  3. * Filename:
  4. * ---------
  5. * ccmni.c
  6. *
  7. * Project:
  8. * --------
  9. * YuSu
  10. *
  11. * Description:
  12. * ------------
  13. * MT6516 Cross Chip Modem Network Interface
  14. *
  15. * Author:
  16. * -------
  17. * TL Lau (mtk02008)
  18. *
  19. ****************************************************************************/
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/device.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/bitops.h>
  27. #include <linux/wakelock.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/delay.h>
  31. #include <linux/wait.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/bitops.h>
  35. #include <linux/timer.h>
  36. #include <ccmni_pfp.h>
  37. #include <ccci_tty.h>
  38. #include <ccci.h>
  39. #define CCMNI_TX_QUEUE 8
  40. #define CCMNI_UART_OFFSET 2
  41. struct ccmni_instance_t {
  42. int channel;
  43. int m_md_id;
  44. int uart_rx;
  45. int uart_rx_ack;
  46. int uart_tx;
  47. int uart_tx_ack;
  48. int ready;
  49. int net_if_off;
  50. unsigned long flags;
  51. struct timer_list timer;
  52. unsigned long send_len;
  53. struct net_device *dev;
  54. struct wake_lock wake_lock;
  55. spinlock_t spinlock;
  56. struct shared_mem_tty_t *shared_mem;
  57. int shared_mem_phys_addr;
  58. unsigned char write_buffer[CCMNI_MTU + 4];
  59. unsigned char read_buffer[CCCI1_CCMNI_BUF_SIZE];
  60. unsigned char decode_buffer[CCCI1_CCMNI_BUF_SIZE];
  61. unsigned char mac_addr[ETH_ALEN];
  62. struct tasklet_struct tasklet;
  63. void *owner;
  64. };
  65. struct ccmni_v1_ctl_block_t {
  66. int m_md_id;
  67. int ccci_is_ready;
  68. struct ccmni_instance_t *ccmni_instance[CCMNI_V1_PORT_NUM];
  69. struct wake_lock ccmni_wake_lock;
  70. char wakelock_name[16];
  71. struct MD_CALL_BACK_QUEUE ccmni_notifier;
  72. };
  73. static void ccmni_read(unsigned long arg);
  74. /* static DECLARE_TASKLET (ccmni_read_tasklet, ccmni_read, 0); */
  75. static void reset_ccmni_instance_buffer(struct ccmni_instance_t *ccmni_instance)
  76. {
  77. unsigned long flags;
  78. spin_lock_irqsave(&ccmni_instance->spinlock, flags);
  79. ccci_reset_buffers(ccmni_instance->shared_mem, CCCI1_CCMNI_BUF_SIZE);
  80. spin_unlock_irqrestore(&ccmni_instance->spinlock, flags);
  81. }
  82. int ccmni_v1_ipo_h_restore(int md_id)
  83. {
  84. int i;
  85. struct ccmni_v1_ctl_block_t *ctlb;
  86. ctlb = ccmni_ctl_block[md_id];
  87. for (i = 0; i < CCMNI_V1_PORT_NUM; i++)
  88. ccci_reset_buffers(ctlb->ccmni_instance[i]->shared_mem,
  89. CCCI1_CCMNI_BUF_SIZE);
  90. return 0;
  91. }
  92. static void restore_ccmni_instance(struct ccmni_instance_t *ccmni_instance)
  93. {
  94. unsigned long flags;
  95. spin_lock_irqsave(&ccmni_instance->spinlock, flags);
  96. if (ccmni_instance->net_if_off) {
  97. ccmni_instance->net_if_off = 0;
  98. netif_carrier_on(ccmni_instance->dev);
  99. }
  100. spin_unlock_irqrestore(&ccmni_instance->spinlock, flags);
  101. }
  102. static void stop_ccmni_instance(struct ccmni_instance_t *ccmni_instance)
  103. {
  104. unsigned long flags;
  105. spin_lock_irqsave(&ccmni_instance->spinlock, flags);
  106. if (ccmni_instance->net_if_off == 0) {
  107. ccmni_instance->net_if_off = 1;
  108. del_timer(&ccmni_instance->timer);
  109. netif_carrier_off(ccmni_instance->dev);
  110. }
  111. spin_unlock_irqrestore(&ccmni_instance->spinlock, flags);
  112. }
  113. static void ccmni_notifier_call(struct MD_CALL_BACK_QUEUE *notifier,
  114. unsigned long val)
  115. {
  116. int i;
  117. struct ccmni_v1_ctl_block_t *ctl_b =
  118. container_of(notifier, struct ccmni_v1_ctl_block_t,
  119. ccmni_notifier);
  120. struct ccmni_instance_t *instance;
  121. switch (val) {
  122. case CCCI_MD_EXCEPTION:
  123. ctl_b->ccci_is_ready = 0;
  124. for (i = 0; i < CCMNI_V1_PORT_NUM; i++) {
  125. instance = ctl_b->ccmni_instance[i];
  126. if (instance)
  127. stop_ccmni_instance(instance);
  128. }
  129. break;
  130. case CCCI_MD_STOP:
  131. for (i = 0; i < CCMNI_V1_PORT_NUM; i++) {
  132. instance = ctl_b->ccmni_instance[i];
  133. if (instance)
  134. stop_ccmni_instance(instance);
  135. }
  136. break;
  137. case CCCI_MD_RESET:
  138. ctl_b->ccci_is_ready = 0;
  139. for (i = 0; i < CCMNI_V1_PORT_NUM; i++) {
  140. instance = ctl_b->ccmni_instance[i];
  141. if (instance)
  142. reset_ccmni_instance_buffer(instance);
  143. }
  144. break;
  145. case CCCI_MD_BOOTUP:
  146. if (ctl_b->ccci_is_ready == 0) {
  147. ctl_b->ccci_is_ready = 1;
  148. for (i = 0; i < CCMNI_V1_PORT_NUM; i++) {
  149. instance = ctl_b->ccmni_instance[i];
  150. if (instance)
  151. restore_ccmni_instance(instance);
  152. }
  153. }
  154. break;
  155. default:
  156. break;
  157. }
  158. }
  159. static void timer_func(unsigned long data)
  160. {
  161. struct ccmni_instance_t *ccmni = (struct ccmni_instance_t *) data;
  162. int contin = 0;
  163. int ret = 0;
  164. struct ccci_msg_t msg;
  165. struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
  166. int md_id = ctl_b->m_md_id;
  167. spin_lock_bh(&ccmni->spinlock);
  168. if (ctl_b->ccci_is_ready == 0)
  169. goto out;
  170. if (test_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags)) {
  171. msg.magic = 0;
  172. msg.id = CCMNI_CHANNEL_OFFSET + ccmni->channel;
  173. msg.channel = ccmni->uart_rx_ack;
  174. msg.reserved = 0;
  175. ret = ccci_message_send(md_id, &msg, 1);
  176. if (ret == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
  177. contin = 1;
  178. else
  179. clear_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags);
  180. }
  181. if (test_bit(CCMNI_SEND_PENDING, &ccmni->flags)) {
  182. msg.addr = 0;
  183. msg.len = ccmni->send_len;
  184. msg.channel = ccmni->uart_tx;
  185. msg.reserved = 0;
  186. ret = ccci_message_send(md_id, &msg, 1);
  187. if (ret == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
  188. contin = 1;
  189. else {
  190. clear_bit(CCMNI_SEND_PENDING, &ccmni->flags);
  191. ccmni->send_len = 0;
  192. }
  193. }
  194. out:
  195. spin_unlock_bh(&ccmni->spinlock);
  196. if (contin)
  197. mod_timer(&ccmni->timer, jiffies + 2);
  198. return;
  199. }
  200. static void ccmni_make_etherframe(void *_eth_hdr, u8 *mac_addr,
  201. int packet_type)
  202. {
  203. struct ethhdr *eth_hdr = _eth_hdr;
  204. memcpy(eth_hdr->h_dest, mac_addr, sizeof(eth_hdr->h_dest));
  205. memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_source));
  206. if (packet_type == IPV6_VERSION)
  207. eth_hdr->h_proto = cpu_to_be16(ETH_P_IPV6);
  208. else
  209. eth_hdr->h_proto = cpu_to_be16(ETH_P_IP);
  210. }
  211. static int ccmni_receive(struct ccmni_instance_t *ccmni, int length)
  212. {
  213. int counter, ret;
  214. struct packet_info_t packet_info;
  215. struct complete_ippkt_t *packet;
  216. struct complete_ippkt_t *processed_packet;
  217. struct sk_buff *skb;
  218. struct complete_ippkt_t last_packet = { 0 };
  219. int offset_put_pkt = 0;
  220. int offset_parse_frame = 0;
  221. int packet_type;
  222. struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
  223. int md_id = ctl_b->m_md_id;
  224. CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive() invoke pfp_unframe()\n",
  225. ccmni->channel);
  226. do {
  227. packet_info = pfp_unframe(ccmni->decode_buffer + offset_put_pkt,
  228. CCCI1_CCMNI_BUF_SIZE - offset_put_pkt,
  229. ccmni->read_buffer +
  230. offset_parse_frame, length,
  231. ccmni->channel);
  232. packet = packet_info.pkt_list;
  233. CCCI_CCMNI_MSG(md_id,
  234. "CCMNI%d num_complete_pkt=%d after pfp_unframe\n",
  235. ccmni->channel,
  236. packet_info.num_complete_packets);
  237. for (counter = 0; counter < packet_info.num_complete_packets;
  238. counter++) {
  239. skb = dev_alloc_skb(packet->pkt_size);
  240. if (skb) {
  241. packet_type = packet->pkt_data[0] & 0xF0;
  242. memcpy(skb_put(skb, packet->pkt_size),
  243. packet->pkt_data, packet->pkt_size);
  244. ccmni_make_etherframe(skb->data - ETH_HLEN,
  245. ccmni->dev->dev_addr,
  246. packet_type);
  247. skb_set_mac_header(skb, -ETH_HLEN);
  248. skb->dev = ccmni->dev;
  249. if (packet_type == IPV6_VERSION)
  250. skb->protocol = htons(ETH_P_IPV6);
  251. else
  252. skb->protocol = htons(ETH_P_IP);
  253. skb->ip_summed = CHECKSUM_NONE;
  254. ret = netif_rx(skb);
  255. CCCI_CCMNI_MSG(md_id,
  256. "CCMNI%d invoke netif_rx()=%d\n",
  257. ccmni->channel, ret);
  258. ccmni->dev->stats.rx_packets++;
  259. ccmni->dev->stats.rx_bytes += packet->pkt_size;
  260. CCCI_CCMNI_MSG(md_id,
  261. "CCMNI%d rx_pkts=%ld, stats_rx_bytes=%ld\n",
  262. ccmni->channel,
  263. ccmni->dev->stats.rx_packets,
  264. ccmni->dev->stats.rx_bytes);
  265. } else {
  266. CCCI_DBG_MSG(md_id, "net",
  267. "CCMNI%d Socket buffer allocate fail\n",
  268. ccmni->channel);
  269. }
  270. processed_packet = packet;
  271. last_packet = *processed_packet;
  272. packet = packet->next;
  273. /* Only clear the entry_used flag as 0 */
  274. release_one_used_complete_ippkt_entry(processed_packet);
  275. };
  276. /* It must to check if it is necessary to invoke the pfp_unframe()
  277. * again due to no available complete_ippkt entry
  278. */
  279. if (packet_info.try_decode_again == 1) {
  280. offset_put_pkt +=
  281. (last_packet.pkt_data - ccmni->decode_buffer +
  282. last_packet.pkt_size);
  283. offset_parse_frame += packet_info.consumed_length;
  284. }
  285. } while (packet_info.try_decode_again == 1);
  286. offset_parse_frame += packet_info.consumed_length;
  287. return offset_parse_frame;
  288. }
  289. static void ccmni_read(unsigned long arg)
  290. {
  291. int part, size;
  292. int ret;
  293. int read, write, consumed;
  294. unsigned char *string;
  295. struct ccmni_instance_t *ccmni = (struct ccmni_instance_t *) arg;
  296. struct ccci_msg_t msg;
  297. struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
  298. int md_id = ctl_b->m_md_id;
  299. char *rx_buffer;
  300. spin_lock_bh(&ccmni->spinlock);
  301. if (ctl_b->ccci_is_ready == 0) {
  302. CCCI_DBG_MSG(md_id, "net",
  303. "CCMNI%d_read fail when modem not ready\n",
  304. ccmni->channel);
  305. goto out;
  306. }
  307. string = ccmni->read_buffer;
  308. read = ccmni->shared_mem->rx_control.read;
  309. write = ccmni->shared_mem->rx_control.write;
  310. size = write - read;
  311. part = 0;
  312. rx_buffer = ccmni->shared_mem->buffer;
  313. if (size < 0)
  314. size += ccmni->shared_mem->rx_control.length;
  315. if (read > write) {
  316. part = ccmni->shared_mem->rx_control.length - read;
  317. memcpy(string, &rx_buffer[read], part);
  318. size -= part;
  319. string += part;
  320. read = 0;
  321. }
  322. memcpy(string, &rx_buffer[read], size);
  323. CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive[Before]: size=%d, read=%d\n",
  324. ccmni->channel, (size + part), read);
  325. consumed = ccmni_receive(ccmni, size + part);
  326. CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive[After]: consume=%d\n",
  327. ccmni->channel, consumed);
  328. /* Calculate the new position of the read pointer. */
  329. /* Take into consideration the number of bytes actually consumed; */
  330. /* i.e. number of bytes taken up by complete IP packets. */
  331. read += size;
  332. if (read >= ccmni->shared_mem->rx_control.length)
  333. read -= ccmni->shared_mem->rx_control.length;
  334. if (consumed < (size + part)) {
  335. read -= ((size + part) - consumed);
  336. if (read < 0)
  337. read += ccmni->shared_mem->rx_control.length;
  338. }
  339. ccmni->shared_mem->rx_control.read = read;
  340. /* Send an acknowledgment back to modem side. */
  341. CCCI_CCMNI_MSG(md_id, "CCMNI%d_read to write mailbox(ch%d, tty%d)\n",
  342. ccmni->channel, ccmni->uart_rx_ack,
  343. CCMNI_CHANNEL_OFFSET + ccmni->channel);
  344. /* ret = ccci_write_mailbox(ccmni->uart_rx_ack, CCMNI_CHANNEL_OFFSET + ccmni->channel); */
  345. msg.magic = 0xFFFFFFFF;
  346. msg.id = CCMNI_CHANNEL_OFFSET + ccmni->channel;
  347. msg.channel = ccmni->uart_rx_ack;
  348. msg.reserved = 0;
  349. ret = ccci_message_send(md_id, &msg, 1);
  350. if (ret == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL) {
  351. set_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags);
  352. mod_timer(&ccmni->timer, jiffies);
  353. } else if (ret == sizeof(struct ccci_msg_t))
  354. clear_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags);
  355. out:
  356. spin_unlock_bh(&ccmni->spinlock);
  357. CCCI_CCMNI_MSG(md_id, "CCMNI%d_read invoke wake_lock_timeout(1s)\n",
  358. ccmni->channel);
  359. wake_lock_timeout(&ctl_b->ccmni_wake_lock, HZ);
  360. }
  361. /* will be called when modem sends us something. */
  362. /* we will then copy it to the tty's buffer. */
  363. /* this is essentially the "read" fops. */
  364. static void ccmni_callback(void *private)
  365. {
  366. struct logic_channel_info_t *ch_info = (struct logic_channel_info_t *) private;
  367. struct ccmni_instance_t *ccmni = (struct ccmni_instance_t *) (ch_info->m_owner);
  368. struct ccci_msg_t msg;
  369. while (get_logic_ch_data(ch_info, &msg)) {
  370. switch (msg.channel) {
  371. case CCCI_CCMNI1_TX_ACK:
  372. case CCCI_CCMNI2_TX_ACK:
  373. case CCCI_CCMNI3_TX_ACK:
  374. /* this should be in an interrupt, */
  375. /* so no locking required... */
  376. ccmni->ready = 1;
  377. netif_wake_queue(ccmni->dev);
  378. break;
  379. case CCCI_CCMNI1_RX:
  380. case CCCI_CCMNI2_RX:
  381. case CCCI_CCMNI3_RX:
  382. /* ccmni_read_tasklet2.data = (unsigned long) private_data; */
  383. /* tasklet_schedule(&ccmni_read_tasklet); */
  384. tasklet_schedule(&ccmni->tasklet);
  385. break;
  386. default:
  387. break;
  388. }
  389. }
  390. }
  391. static void ccmni_write(struct ccmni_instance_t *ccmni, struct frame_info_t *frame_info)
  392. {
  393. int size, over, total;
  394. int ret;
  395. unsigned read, write, length, len;
  396. unsigned tmp_write;
  397. unsigned char *ptr;
  398. struct ccci_msg_t msg;
  399. char *tx_buffer;
  400. struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
  401. int md_id = ctl_b->m_md_id;
  402. size = 0;
  403. ptr = (unsigned char *)frame_info->frame_list[0].frame_data;
  404. len = frame_info->frame_list[0].frame_size;
  405. read = ccmni->shared_mem->tx_control.read;
  406. write = ccmni->shared_mem->tx_control.write;
  407. length = ccmni->shared_mem->tx_control.length;
  408. over = length - write;
  409. tx_buffer = ccmni->shared_mem->buffer + length;
  410. if (read == write) {
  411. size = length;
  412. } else if (read < write) {
  413. size = length - write;
  414. size += read;
  415. } else {
  416. size = read - write;
  417. }
  418. if (len > size) {
  419. len = size;
  420. total = size;
  421. }
  422. total = len;
  423. if (over < len) {
  424. memcpy(&tx_buffer[write], (void *)ptr, over);
  425. len -= over;
  426. ptr += over;
  427. write = 0;
  428. }
  429. memcpy(&tx_buffer[write], (void *)ptr, len);
  430. /* wait memory updated */
  431. mb();
  432. tmp_write = write + len;
  433. if (tmp_write >= length)
  434. tmp_write -= length;
  435. ccmni->shared_mem->tx_control.write = tmp_write;
  436. /* ccmni->ready = 0; */
  437. len = total;
  438. msg.addr = 0;
  439. msg.len = len;
  440. msg.channel = ccmni->uart_tx;
  441. msg.reserved = 0;
  442. ret = ccci_message_send(md_id, &msg, 1);
  443. if (ret == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL) {
  444. set_bit(CCMNI_SEND_PENDING, &ccmni->flags);
  445. ccmni->send_len += len;
  446. mod_timer(&ccmni->timer, jiffies);
  447. } else if (ret == sizeof(struct ccci_msg_t))
  448. clear_bit(CCMNI_SEND_PENDING, &ccmni->flags);
  449. }
  450. /* The function start_xmit is called when there is one packet to transmit. */
  451. static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
  452. {
  453. int ret = NETDEV_TX_OK;
  454. int size;
  455. unsigned int read, write, length;
  456. struct frame_info_t frame_info;
  457. struct ccmni_instance_t *ccmni = netdev_priv(dev);
  458. struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
  459. int md_id = ctl_b->m_md_id;
  460. spin_lock_bh(&ccmni->spinlock);
  461. if (ctl_b->ccci_is_ready == 0) {
  462. CCCI_DBG_MSG(md_id, "net",
  463. "CCMNI%d transfer data fail when modem not ready\n",
  464. ccmni->channel);
  465. ret = NETDEV_TX_BUSY;
  466. goto _ccmni_start_xmit_busy;
  467. }
  468. read = ccmni->shared_mem->tx_control.read;
  469. write = ccmni->shared_mem->tx_control.write;
  470. length = ccmni->shared_mem->tx_control.length;
  471. size = read - write;
  472. CCCI_CCMNI_MSG(md_id,
  473. "CCMNI%d_start_xmit: skb_len=%d, size=%d, ccmni_ready=%d\n",
  474. ccmni->channel, skb->len, size, ccmni->ready);
  475. if (size <= 0)
  476. size += length;
  477. if (skb->len > CCMNI_MTU) {
  478. /* Sanity check; this should not happen! */
  479. /* Digest and return OK. */
  480. CCCI_DBG_MSG(md_id, "net",
  481. "CCMNI%d packet size exceed 1500 bytes: size=%d\n",
  482. ccmni->channel, skb->len);
  483. dev->stats.tx_dropped++;
  484. goto _ccmni_start_xmit_exit;
  485. }
  486. if (size >= 1)
  487. size -= 1;
  488. else
  489. CCCI_DBG_MSG(md_id, "net", "CCMNI%d size is Zero(1)\n",
  490. ccmni->channel);
  491. if (size < (skb->len + 4)) {
  492. /* The TX buffer is full, or its not ready yet, */
  493. /* we should stop the net queue for the moment. */
  494. CCCI_DBG_MSG(md_id, "net",
  495. "CCMNI%d TX busy and stop queue: size=%d, skb->len=%d\n",
  496. ccmni->channel, size, skb->len);
  497. CCCI_DBG_MSG(md_id, "net", " TX read = %d write = %d\n",
  498. ccmni->shared_mem->tx_control.read,
  499. ccmni->shared_mem->tx_control.write);
  500. CCCI_DBG_MSG(md_id, "net", " RX read = %d write = %d\n",
  501. ccmni->shared_mem->rx_control.read,
  502. ccmni->shared_mem->rx_control.write);
  503. netif_stop_queue(ccmni->dev);
  504. /* Set CCMNI ready to ZERO, and wait for the ACK from modem side. */
  505. ccmni->ready = 0;
  506. ret = NETDEV_TX_BUSY;
  507. goto _ccmni_start_xmit_busy;
  508. }
  509. frame_info =
  510. pfp_frame(ccmni->write_buffer, skb->data, skb->len, FRAME_START,
  511. ccmni->channel);
  512. ccmni_write(ccmni, &frame_info);
  513. dev->stats.tx_packets++;
  514. dev->stats.tx_bytes += skb->len;
  515. _ccmni_start_xmit_exit:
  516. dev_kfree_skb(skb);
  517. _ccmni_start_xmit_busy:
  518. spin_unlock_bh(&ccmni->spinlock);
  519. return ret;
  520. }
  521. static int ccmni_open(struct net_device *dev)
  522. {
  523. struct ccmni_instance_t *ccmni = netdev_priv(dev);
  524. struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
  525. int md_id = ctl_b->m_md_id;
  526. CCCI_MSG_INF(md_id, "net", "CCMNI%d open\n", ccmni->channel);
  527. if (ctl_b->ccci_is_ready == 0) {
  528. CCCI_MSG_INF(md_id, "net",
  529. "CCMNI%d open fail when modem not ready\n",
  530. ccmni->channel);
  531. return -EIO;
  532. }
  533. netif_start_queue(dev);
  534. return 0;
  535. }
  536. static int ccmni_close(struct net_device *dev)
  537. {
  538. struct ccmni_instance_t *ccmni = netdev_priv(dev);
  539. struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
  540. CCCI_MSG_INF(ctl_b->m_md_id, "net", "CCMNI%d close\n", ccmni->channel);
  541. netif_stop_queue(dev);
  542. return 0;
  543. }
  544. static int ccmni_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  545. {
  546. /* No implementation at this moment. */
  547. /* This is a place holder. */
  548. return 0;
  549. }
  550. static void ccmni_tx_timeout(struct net_device *dev)
  551. {
  552. /* No implementation at this moment. */
  553. /* This is a place holder. */
  554. dev->stats.tx_errors++;
  555. netif_wake_queue(dev);
  556. }
  557. static const struct net_device_ops ccmni_netdev_ops = {
  558. .ndo_open = ccmni_open,
  559. .ndo_stop = ccmni_close,
  560. .ndo_start_xmit = ccmni_start_xmit,
  561. .ndo_do_ioctl = ccmni_net_ioctl,
  562. .ndo_tx_timeout = ccmni_tx_timeout,
  563. };
  564. static void ccmni_setup(struct net_device *dev)
  565. {
  566. int retry = 10;
  567. ether_setup(dev);
  568. dev->header_ops = NULL;
  569. dev->netdev_ops = &ccmni_netdev_ops;
  570. dev->flags = IFF_NOARP & (~IFF_BROADCAST & ~IFF_MULTICAST);
  571. dev->mtu = CCMNI_MTU;
  572. dev->tx_queue_len = CCMNI_TX_QUEUE;
  573. dev->addr_len = ETH_ALEN;
  574. dev->destructor = free_netdev;
  575. while (retry-- > 0) {
  576. random_ether_addr((u8 *) dev->dev_addr);
  577. if (is_mac_addr_duplicate((u8 *) dev->dev_addr))
  578. continue;
  579. else
  580. break;
  581. }
  582. }
  583. static int ccmni_create_instance(int md_id, int channel)
  584. {
  585. int ret, size;
  586. int uart_rx, uart_rx_ack;
  587. int uart_tx, uart_tx_ack;
  588. struct ccmni_instance_t *ccmni;
  589. struct net_device *dev = NULL;
  590. struct ccmni_v1_ctl_block_t *ctl_b =
  591. (struct ccmni_v1_ctl_block_t *) ccmni_ctl_block[md_id];
  592. /* Network device creation and registration. */
  593. dev = alloc_netdev(sizeof(struct ccmni_instance_t), "eth%d", NET_NAME_UNKNOWN, ccmni_setup);
  594. if (dev == NULL) {
  595. CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate netdev fail!\n",
  596. channel);
  597. return -ENOMEM;
  598. }
  599. ccmni = netdev_priv(dev);
  600. ccmni->dev = dev;
  601. ccmni->channel = channel;
  602. ccmni->owner = ccmni_ctl_block[md_id];
  603. if (md_id == MD_SYS1) {
  604. sprintf(dev->name, "ccmni%d", channel);
  605. } else {
  606. sprintf(dev->name, "cc%dmni%d", md_id + 1, channel);
  607. /* sprintf(dev->name, "ccmni%d", channel); */
  608. }
  609. ret = register_netdev(dev);
  610. if (ret != 0) {
  611. CCCI_MSG_INF(md_id, "net", "CCMNI%d register netdev fail: %d\n",
  612. ccmni->channel, ret);
  613. goto _ccmni_create_instance_exit;
  614. }
  615. /* CCCI channel registration. */
  616. ccci_uart_base_req(md_id, CCMNI_UART_OFFSET + ccmni->channel,
  617. (int *)&ccmni->shared_mem, &ccmni->shared_mem_phys_addr, &size);
  618. if (ccmni->shared_mem == NULL) {
  619. CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate memory fail\n",
  620. ccmni->channel);
  621. unregister_netdev(dev);
  622. ret = -ENOMEM;
  623. goto _ccmni_create_instance_exit;
  624. }
  625. CCCI_CCMNI_MSG(md_id, "0x%08X:0x%08X:%d\n",
  626. (unsigned int)ccmni->shared_mem,
  627. (unsigned int)ccmni->shared_mem_phys_addr, size);
  628. ccmni->shared_mem->tx_control.length = CCCI1_CCMNI_BUF_SIZE;
  629. ccmni->shared_mem->tx_control.read = 0;
  630. ccmni->shared_mem->tx_control.write = 0;
  631. ccmni->shared_mem->rx_control.length = CCCI1_CCMNI_BUF_SIZE;
  632. ccmni->shared_mem->rx_control.read = 0;
  633. ccmni->shared_mem->rx_control.write = 0;
  634. switch (ccmni->channel) {
  635. case 0:
  636. uart_rx = CCCI_CCMNI1_RX;
  637. uart_rx_ack = CCCI_CCMNI1_RX_ACK;
  638. uart_tx = CCCI_CCMNI1_TX;
  639. uart_tx_ack = CCCI_CCMNI1_TX_ACK;
  640. break;
  641. case 1:
  642. uart_rx = CCCI_CCMNI2_RX;
  643. uart_rx_ack = CCCI_CCMNI2_RX_ACK;
  644. uart_tx = CCCI_CCMNI2_TX;
  645. uart_tx_ack = CCCI_CCMNI2_TX_ACK;
  646. break;
  647. case 2:
  648. uart_rx = CCCI_CCMNI3_RX;
  649. uart_rx_ack = CCCI_CCMNI3_RX_ACK;
  650. uart_tx = CCCI_CCMNI3_TX;
  651. uart_tx_ack = CCCI_CCMNI3_TX_ACK;
  652. break;
  653. default:
  654. CCCI_MSG_INF(md_id, "net",
  655. "[Error]CCMNI%d Invalid ccmni number\n",
  656. ccmni->channel);
  657. unregister_netdev(dev);
  658. ret = -ENOSYS;
  659. goto _ccmni_create_instance_exit;
  660. }
  661. ccmni->m_md_id = md_id;
  662. ccmni->uart_rx = uart_rx;
  663. ccmni->uart_rx_ack = uart_rx_ack;
  664. ccmni->uart_tx = uart_tx;
  665. ccmni->uart_tx_ack = uart_tx_ack;
  666. /* Register this ccmni instance to the ccci driver. */
  667. /* pass it the notification handler. */
  668. register_to_logic_ch(md_id, uart_rx, ccmni_callback, (void *)ccmni);
  669. register_to_logic_ch(md_id, uart_tx_ack, ccmni_callback, (void *)ccmni);
  670. /* Initialize the spinlock. */
  671. spin_lock_init(&ccmni->spinlock);
  672. setup_timer(&ccmni->timer, timer_func, (unsigned long)ccmni);
  673. /* Initialize the tasklet. */
  674. tasklet_init(&ccmni->tasklet, ccmni_read, (unsigned long)ccmni);
  675. ctl_b->ccmni_instance[channel] = ccmni;
  676. ccmni->ready = 1;
  677. ccmni->net_if_off = 0;
  678. return ret;
  679. _ccmni_create_instance_exit:
  680. free_netdev(dev);
  681. kfree(ccmni);
  682. ctl_b->ccmni_instance[channel] = NULL;
  683. return ret;
  684. }
  685. static void ccmni_destroy_instance(int md_id, int channel)
  686. {
  687. struct ccmni_v1_ctl_block_t *ctl_b =
  688. (struct ccmni_v1_ctl_block_t *) ccmni_ctl_block[md_id];
  689. struct ccmni_instance_t *ccmni = ctl_b->ccmni_instance[channel];
  690. if (ccmni != NULL) {
  691. ccmni->ready = 0;
  692. un_register_to_logic_ch(md_id, ccmni->uart_rx);
  693. un_register_to_logic_ch(md_id, ccmni->uart_tx_ack);
  694. if (ccmni->shared_mem != NULL) {
  695. ccmni->shared_mem = NULL;
  696. ccmni->shared_mem_phys_addr = 0;
  697. }
  698. if (ccmni->dev != NULL) {
  699. unregister_netdev(ccmni->dev);
  700. /* free_netdev(ccmni->dev); */
  701. }
  702. /* tasklet_kill(&ccmni->tasklet); */
  703. ctl_b->ccmni_instance[channel] = NULL;
  704. }
  705. }
  706. int ccmni_v1_init(int md_id)
  707. {
  708. int count, ret, curr;
  709. struct ccmni_v1_ctl_block_t *ctl_b;
  710. /* Create control block structure */
  711. ctl_b = kmalloc(sizeof(struct ccmni_v1_ctl_block_t), GFP_KERNEL);
  712. if (ctl_b == NULL)
  713. return -CCCI_ERR_GET_MEM_FAIL;
  714. memset(ctl_b, 0, sizeof(struct ccmni_v1_ctl_block_t));
  715. ccmni_ctl_block[md_id] = ctl_b;
  716. /* Init ctl_b */
  717. ctl_b->m_md_id = md_id;
  718. ctl_b->ccmni_notifier.call = ccmni_notifier_call;
  719. ctl_b->ccmni_notifier.next = NULL;
  720. for (count = 0; count < CCMNI_V1_PORT_NUM; count++) {
  721. ret = ccmni_create_instance(md_id, count);
  722. if (ret != 0) {
  723. CCCI_MSG_INF(md_id, "net",
  724. "CCMNI%d create instance fail: %d\n",
  725. count, ret);
  726. goto _CCMNI_INSTANCE_CREATE_FAIL;
  727. }
  728. }
  729. ret = md_register_call_chain(md_id, &ctl_b->ccmni_notifier);
  730. if (ret) {
  731. CCCI_MSG_INF(md_id, "net", "md_register_call_chain fail: %d\n",
  732. ret);
  733. goto _CCMNI_INSTANCE_CREATE_FAIL;
  734. }
  735. snprintf(ctl_b->wakelock_name, sizeof(ctl_b->wakelock_name),
  736. "ccci%d_net_v1", (md_id + 1));
  737. wake_lock_init(&ctl_b->ccmni_wake_lock, WAKE_LOCK_SUSPEND,
  738. ctl_b->wakelock_name);
  739. return ret;
  740. _CCMNI_INSTANCE_CREATE_FAIL:
  741. for (curr = 0; curr <= count; curr++)
  742. ccmni_destroy_instance(md_id, curr);
  743. kfree(ctl_b);
  744. ccmni_ctl_block[md_id] = NULL;
  745. return ret;
  746. }
  747. void ccmni_v1_exit(int md_id)
  748. {
  749. int count;
  750. struct ccmni_v1_ctl_block_t *ctl_b =
  751. (struct ccmni_v1_ctl_block_t *) ccmni_ctl_block[md_id];
  752. if (ctl_b) {
  753. for (count = 0; count < CCMNI_V1_PORT_NUM; count++)
  754. ccmni_destroy_instance(md_id, count);
  755. md_unregister_call_chain(md_id, &ctl_b->ccmni_notifier);
  756. wake_lock_destroy(&ctl_b->ccmni_wake_lock);
  757. kfree(ctl_b);
  758. ccmni_ctl_block[md_id] = NULL;
  759. }
  760. }