ccmni_v2_net.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226
  1. /*****************************************************************************
  2. *
  3. * Filename:
  4. * ---------
  5. * ccmni.c
  6. *
  7. * Project:
  8. * --------
  9. *
  10. * Description:
  11. *------------
  12. *
  13. *
  14. * Author:
  15. * -------
  16. *
  17. *
  18. ****************************************************************************/
  19. #include <ccci.h>
  20. #include <linux/sockios.h>
  21. #define CCMNI_DBG_INFO 1
  22. #define SIOCSTXQSTATE (SIOCDEVPRIVATE + 0)
  23. struct ccmni_v2_instance_t {
  24. int channel;
  25. int m_md_id;
  26. int uart_rx;
  27. int uart_rx_ack;
  28. int uart_tx;
  29. int uart_tx_ack;
  30. int ready;
  31. int net_if_off;
  32. int log_count;
  33. unsigned long flags;
  34. struct timer_list timer;
  35. unsigned long send_len;
  36. struct net_device *dev;
  37. struct wake_lock wake_lock;
  38. spinlock_t spinlock;
  39. atomic_t usage;
  40. struct shared_mem_ccmni_t *shared_mem;
  41. int shared_mem_phys_addr;
  42. unsigned char mac_addr[ETH_ALEN];
  43. struct tasklet_struct tasklet;
  44. void *owner;
  45. };
  46. struct ccmni_v2_ctl_block_t {
  47. int m_md_id;
  48. int ccci_is_ready;
  49. struct ccmni_v2_instance_t *ccmni_v2_instance[CCMNI_V2_PORT_NUM];
  50. struct wake_lock ccmni_wake_lock;
  51. char wakelock_name[16];
  52. struct MD_CALL_BACK_QUEUE ccmni_notifier;
  53. };
  54. static void ccmni_v2_read(unsigned long arg);
  55. static void ccmni_make_etherframe(void *_eth_hdr, u8 *mac_addr,
  56. int packet_type)
  57. {
  58. struct ethhdr *eth_hdr = _eth_hdr;
  59. memcpy(eth_hdr->h_dest, mac_addr, sizeof(eth_hdr->h_dest));
  60. memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_source));
  61. if (packet_type == IPV6_VERSION)
  62. eth_hdr->h_proto = cpu_to_be16(ETH_P_IPV6);
  63. else
  64. eth_hdr->h_proto = cpu_to_be16(ETH_P_IP);
  65. }
  66. static unsigned char *ccmni_v2_phys_to_virt(int md_id, unsigned char *addr_phy)
  67. {
  68. int ccmni_rx_base_phy;
  69. int ccmni_rx_base_virt;
  70. int p_to_v_offset;
  71. ccmni_v2_ul_base_req(md_id, &ccmni_rx_base_virt, &ccmni_rx_base_phy);
  72. p_to_v_offset =
  73. (unsigned char *)(ccmni_rx_base_virt) -
  74. (unsigned char *)(ccmni_rx_base_phy);
  75. return p_to_v_offset + addr_phy + get_md2_ap_phy_addr_fixed();
  76. }
  77. void ccmni_v2_dump(int md_id)
  78. {
  79. #if 0
  80. int i = 0, port = 0;
  81. struct ccmni_v2_instance_t *ccmni;
  82. struct ccmni_v2_ctl_block_t *ctl_b =
  83. (struct ccmni_v2_ctl_block_t *) ccmni_ctl_block[md_id];
  84. CCCI_MSG_INF(md_id, "ctl", "ccmni v2 dump start\n");
  85. for (port = 0; port < CCMNI_V2_PORT_NUM; port++) {
  86. ccmni = ctl_b->ccmni_v2_instance[port];
  87. CCCI_MSG_INF(md_id, "ctl",
  88. "Port%d RX CONTROL: read_out=%d, avai_out=%d, avai_in=%d,q_len=%d\n",
  89. port, ccmni->shared_mem->rx_control.read_out,
  90. ccmni->shared_mem->rx_control.avai_out,
  91. ccmni->shared_mem->rx_control.avai_in,
  92. ccmni->shared_mem->rx_control.q_length);
  93. CCCI_MSG_INF(md_id, "ctl", "Port%d RX ringbuff:\n", port);
  94. for (i = 0; i < CCMNI_CTRL_Q_RX_SIZE; i++) {
  95. if (ccmni->shared_mem->q_rx_ringbuff[i].ptr != NULL
  96. && ccmni->shared_mem->q_rx_ringbuff[i].len != 0)
  97. CCCI_MSG_INF(md_id, "ctl",
  98. "[%d]: ptr=%08X len=%d\n", i,
  99. (int)(ccmni->
  100. shared_mem->q_rx_ringbuff[i].
  101. ptr),
  102. ccmni->
  103. shared_mem->q_rx_ringbuff[i].len);
  104. }
  105. CCCI_MSG_INF(md_id, "ctl",
  106. "Port%d TX CONTROL: read_out=%d, avai_out=%d, avai_in=%d,q_len=%d\n",
  107. port, ccmni->shared_mem->tx_control.read_out,
  108. ccmni->shared_mem->tx_control.avai_out,
  109. ccmni->shared_mem->tx_control.avai_in,
  110. ccmni->shared_mem->tx_control.q_length);
  111. CCCI_MSG_INF(md_id, "ctl", "Port%d TX ringbuff:\n", port);
  112. for (i = 0; i < CCMNI_CTRL_Q_TX_SIZE; i++) {
  113. if (ccmni->shared_mem->q_tx_ringbuff[i].ptr != NULL
  114. && ccmni->shared_mem->q_tx_ringbuff[i].len != 0)
  115. CCCI_MSG_INF(md_id, "ctl",
  116. "[%d]: ptr=%08X len=%d\n", i,
  117. (int)(ccmni->
  118. shared_mem->q_tx_ringbuff[i].
  119. ptr),
  120. ccmni->
  121. shared_mem->q_tx_ringbuff[i].len);
  122. }
  123. }
  124. CCCI_MSG_INF(md_id, "ctl", "ccmni v2 dump end\n");
  125. #endif
  126. }
  127. static void ccmni_v2_reset_buffers(struct ccmni_v2_instance_t *ccmni)
  128. {
  129. int *ccmni_rx_base_phy;
  130. int *ccmni_rx_base_virt;
  131. unsigned char *ptr_virt;
  132. int md_id;
  133. int count;
  134. #if CCMNI_DBG_INFO
  135. struct dbg_info_ccmni_t *dbg_info;
  136. #endif
  137. if (ccmni == NULL) {
  138. CCCI_MSG("[Error]CCMNI V2 get NULL pointer for buffer reset\n");
  139. return;
  140. }
  141. md_id = ccmni->m_md_id;
  142. /* DL --RX */
  143. ccmni->shared_mem->rx_control.read_out = 0;
  144. ccmni->shared_mem->rx_control.avai_out = 0;
  145. ccmni->shared_mem->rx_control.avai_in =
  146. CCMNI_CTRL_Q_RX_SIZE_DEFAULT - 1;
  147. ccmni->shared_mem->rx_control.q_length = CCMNI_CTRL_Q_RX_SIZE;
  148. /* UP -- TX */
  149. memset(&ccmni->shared_mem->tx_control, 0,
  150. sizeof(struct buffer_control_ccmni_t));
  151. memset(ccmni->shared_mem->q_tx_ringbuff, 0,
  152. sizeof(struct q_ringbuf_ccmni_t) * CCMNI_CTRL_Q_TX_SIZE);
  153. memset(ccmni->shared_mem->q_rx_ringbuff, 0,
  154. ccmni->shared_mem->rx_control.q_length *
  155. sizeof(struct q_ringbuf_ccmni_t));
  156. ccmni_v2_dl_base_req(md_id, &ccmni_rx_base_virt, &ccmni_rx_base_phy);
  157. /* Each channel has 100 RX buffers default */
  158. for (count = 0; count < CCMNI_CTRL_Q_RX_SIZE_DEFAULT; count++) {
  159. ccmni->shared_mem->q_rx_ringbuff[count].ptr =
  160. (CCMNI_CTRL_Q_RX_SIZE_DEFAULT * ccmni->channel +
  161. count) * CCMNI_SINGLE_BUFF_SIZE +
  162. (unsigned char *)ccmni_rx_base_phy +
  163. CCMNI_BUFF_HEADER_SIZE + CCMNI_BUFF_DBG_INFO_SIZE -
  164. get_md2_ap_phy_addr_fixed();
  165. ptr_virt =
  166. ccmni_v2_phys_to_virt(md_id,
  167. (unsigned char *)(ccmni->
  168. shared_mem->q_rx_ringbuff
  169. [count].ptr));
  170. /* buffer header and footer init */
  171. /* Assume int to be 32bit. May need further modifying!!!!! */
  172. *((int *)(ptr_virt - CCMNI_BUFF_HEADER_SIZE)) =
  173. CCMNI_BUFF_HEADER;
  174. *((int *)(ptr_virt + CCMNI_BUFF_DATA_FIELD_SIZE)) =
  175. CCMNI_BUFF_FOOTER;
  176. #if CCMNI_DBG_INFO
  177. /* debug info */
  178. dbg_info =
  179. (struct dbg_info_ccmni_t *) (ptr_virt - CCMNI_BUFF_HEADER_SIZE -
  180. CCMNI_BUFF_DBG_INFO_SIZE);
  181. dbg_info->port = ccmni->channel;
  182. dbg_info->avai_in_no = count;
  183. #endif
  184. }
  185. CCCI_MSG("ccmni_v2_reset_buffers\n");
  186. }
  187. int ccmni_v2_ipo_h_restore(int md_id)
  188. {
  189. struct ccmni_v2_ctl_block_t *ctlb;
  190. int i;
  191. ctlb = ccmni_ctl_block[md_id];
  192. for (i = 0; i < CCMNI_V2_PORT_NUM; i++)
  193. ccmni_v2_reset_buffers(ctlb->ccmni_v2_instance[i]);
  194. return 0;
  195. }
  196. static void reset_ccmni_v2_instance_buffer(struct ccmni_v2_instance_t *
  197. ccmni_v2_instance)
  198. {
  199. unsigned long flags;
  200. spin_lock_irqsave(&ccmni_v2_instance->spinlock, flags);
  201. ccmni_v2_reset_buffers(ccmni_v2_instance);
  202. spin_unlock_irqrestore(&ccmni_v2_instance->spinlock, flags);
  203. }
  204. static void stop_ccmni_v2_instance(struct ccmni_v2_instance_t *ccmni_v2_instance)
  205. {
  206. unsigned long flags;
  207. spin_lock_irqsave(&ccmni_v2_instance->spinlock, flags);
  208. if (ccmni_v2_instance->net_if_off == 0) {
  209. ccmni_v2_instance->net_if_off = 1;
  210. netif_carrier_off(ccmni_v2_instance->dev);
  211. del_timer(&ccmni_v2_instance->timer);
  212. }
  213. spin_unlock_irqrestore(&ccmni_v2_instance->spinlock, flags);
  214. }
  215. static void restore_ccmni_v2_instance(struct ccmni_v2_instance_t *ccmni_v2_instance)
  216. {
  217. unsigned long flags;
  218. spin_lock_irqsave(&ccmni_v2_instance->spinlock, flags);
  219. if (ccmni_v2_instance->net_if_off) {
  220. ccmni_v2_instance->net_if_off = 0;
  221. netif_carrier_on(ccmni_v2_instance->dev);
  222. }
  223. spin_unlock_irqrestore(&ccmni_v2_instance->spinlock, flags);
  224. }
  225. static void ccmni_v2_notifier_call(struct MD_CALL_BACK_QUEUE *notifier,
  226. unsigned long val)
  227. {
  228. int i = 0;
  229. struct ccmni_v2_ctl_block_t *ctl_b =
  230. container_of(notifier, struct ccmni_v2_ctl_block_t, ccmni_notifier);
  231. struct ccmni_v2_instance_t *instance;
  232. switch (val) {
  233. case CCCI_MD_EXCEPTION:
  234. ctl_b->ccci_is_ready = 0;
  235. for (i = 0; i < CCMNI_V2_PORT_NUM; i++) {
  236. instance = ctl_b->ccmni_v2_instance[i];
  237. if (instance)
  238. stop_ccmni_v2_instance(instance);
  239. }
  240. break;
  241. case CCCI_MD_STOP:
  242. for (i = 0; i < CCMNI_V2_PORT_NUM; i++) {
  243. instance = ctl_b->ccmni_v2_instance[i];
  244. if (instance)
  245. stop_ccmni_v2_instance(instance);
  246. }
  247. break;
  248. case CCCI_MD_RESET:
  249. ctl_b->ccci_is_ready = 0;
  250. for (i = 0; i < CCMNI_V2_PORT_NUM; i++) {
  251. instance = ctl_b->ccmni_v2_instance[i];
  252. if (instance)
  253. reset_ccmni_v2_instance_buffer(instance);
  254. }
  255. break;
  256. case CCCI_MD_BOOTUP:
  257. if (ctl_b->ccci_is_ready == 0) {
  258. ctl_b->ccci_is_ready = 1;
  259. for (i = 0; i < CCMNI_V2_PORT_NUM; i++) {
  260. instance = ctl_b->ccmni_v2_instance[i];
  261. if (instance)
  262. restore_ccmni_v2_instance(instance);
  263. }
  264. }
  265. break;
  266. default:
  267. break;
  268. }
  269. }
  270. static void timer_func(unsigned long data)
  271. {
  272. struct ccmni_v2_instance_t *ccmni = (struct ccmni_v2_instance_t *) data;
  273. int contin = 0;
  274. int ret = 0;
  275. struct ccci_msg_t msg;
  276. struct ccmni_v2_ctl_block_t *ctl_b = (struct ccmni_v2_ctl_block_t *) ccmni->owner;
  277. int md_id = ctl_b->m_md_id;
  278. spin_lock_bh(&ccmni->spinlock);
  279. if (ctl_b == 0)
  280. goto out;
  281. if (test_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags)) {
  282. msg.magic = 0;
  283. msg.id = CCMNI_CHANNEL_OFFSET + ccmni->channel;
  284. msg.channel = ccmni->uart_rx_ack;
  285. msg.reserved = 0;
  286. ret = ccci_message_send(md_id, &msg, 1);
  287. if (ret == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
  288. contin = 1;
  289. else
  290. clear_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags);
  291. }
  292. if (test_bit(CCMNI_SEND_PENDING, &ccmni->flags)) {
  293. msg.magic = 0;
  294. msg.id = ccmni->send_len;
  295. msg.channel = ccmni->uart_tx;
  296. msg.reserved = 0;
  297. ret = ccci_message_send(md_id, &msg, 1);
  298. if (ret == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
  299. contin = 1;
  300. else {
  301. clear_bit(CCMNI_SEND_PENDING, &ccmni->flags);
  302. ccmni->send_len = 0;
  303. }
  304. }
  305. if (test_bit(CCMNI_RECV_PENDING, &ccmni->flags))
  306. tasklet_schedule(&ccmni->tasklet);
  307. out:
  308. spin_unlock_bh(&ccmni->spinlock);
  309. if (contin)
  310. mod_timer(&ccmni->timer, jiffies + 2);
  311. return;
  312. }
  313. static int ccmni_v2_check_info(int md_id, int ch,
  314. const unsigned char *ccmni_ptr, int ccmni_len)
  315. {
  316. int ret = 0;
  317. if ((ccmni_ptr == NULL) || (ccmni_len <= 0)) {
  318. CCCI_MSG_INF(md_id, "net",
  319. "CCMNI%d_check_info() ptr_n or len_n error!\n",
  320. ch);
  321. ret = -CCCI_ERR_INVALID_PARAM;
  322. goto check_info_error;
  323. }
  324. /* Check Header and Footer */
  325. if ((*(int *)(ccmni_ptr - CCMNI_BUFF_HEADER_SIZE) != CCMNI_BUFF_HEADER)
  326. || (*(int *)(ccmni_ptr + CCMNI_BUFF_DATA_FIELD_SIZE) !=
  327. CCMNI_BUFF_FOOTER)) {
  328. CCCI_MSG_INF(md_id, "net",
  329. "CCMNI%d_check_info() check header and footer error\n",
  330. ch);
  331. ret = -CCCI_ERR_MEM_CHECK_FAIL;
  332. goto check_info_error;
  333. }
  334. /* Check End Byte */
  335. if (*(unsigned char *)
  336. ((unsigned int)(ccmni_ptr + ccmni_len + 3) & 0xfffffffc) !=
  337. CCMNI_DATA_END) {
  338. CCCI_MSG_INF(md_id, "net",
  339. "CCMNI%d_check_info() check end byte error\n", ch);
  340. ret = -CCCI_ERR_MEM_CHECK_FAIL;
  341. goto check_info_error;
  342. }
  343. ret = 0;
  344. check_info_error:
  345. return ret;
  346. }
  347. static int ccmni_v2_receive(struct ccmni_v2_instance_t *ccmni,
  348. const unsigned char *ccmni_ptr, int ccmni_len)
  349. {
  350. int packet_type, ret = 0;
  351. struct sk_buff *skb;
  352. struct ccmni_v2_ctl_block_t *ctl_b = (struct ccmni_v2_ctl_block_t *) ccmni->owner;
  353. int md_id = ctl_b->m_md_id;
  354. if ((ccmni == NULL) || (ccmni_ptr == NULL) || (ccmni_len <= 0)) {
  355. CCCI_MSG_INF(md_id, "net",
  356. "CCMNI%d_receive: invalid private data\n",
  357. ccmni->channel);
  358. ret = -1;
  359. }
  360. skb = dev_alloc_skb(ccmni_len);
  361. if (skb) {
  362. packet_type = ccmni_ptr[0] & 0xF0;
  363. memcpy(skb_put(skb, ccmni_len), ccmni_ptr, ccmni_len);
  364. ccmni_make_etherframe(skb->data - ETH_HLEN,
  365. ccmni->dev->dev_addr, packet_type);
  366. skb_set_mac_header(skb, -ETH_HLEN);
  367. skb->dev = ccmni->dev;
  368. if (packet_type == IPV6_VERSION)
  369. skb->protocol = htons(ETH_P_IPV6);
  370. else
  371. skb->protocol = htons(ETH_P_IP);
  372. /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
  373. skb->ip_summed = CHECKSUM_NONE;
  374. ret = netif_rx(skb);
  375. CCCI_CCMNI_MSG(md_id, "CCMNI%d invoke netif_rx()=%d\n",
  376. ccmni->channel, ret);
  377. ccmni->dev->stats.rx_packets++;
  378. ccmni->dev->stats.rx_bytes += ccmni_len;
  379. CCCI_CCMNI_MSG(md_id,
  380. "CCMNI%d rx_pkts=%ld, stats_rx_bytes=%ld\n",
  381. ccmni->channel, ccmni->dev->stats.rx_packets,
  382. ccmni->dev->stats.rx_bytes);
  383. ret = 0;
  384. } else {
  385. CCCI_MSG_INF(md_id, "net",
  386. "CCMNI%d Socket buffer allocate fail\n",
  387. ccmni->channel);
  388. ret = -CCCI_ERR_MEM_CHECK_FAIL;
  389. }
  390. return ret;
  391. }
  392. static void ccmni_v2_read(unsigned long arg)
  393. {
  394. int ret;
  395. int read_out, avai_out, avai_in, q_length;
  396. int packet_cnt, packet_cnt_save, consumed;
  397. int rx_buf_res_left_cnt;
  398. #if CCMNI_DBG_INFO
  399. struct dbg_info_ccmni_t *dbg_info;
  400. #endif
  401. struct ccmni_v2_instance_t *ccmni = (struct ccmni_v2_instance_t *) arg;
  402. unsigned char *ccmni_ptr;
  403. unsigned int ccmni_len, q_idx;
  404. struct ccmni_v2_ctl_block_t *ctl_b = (struct ccmni_v2_ctl_block_t *) ccmni->owner;
  405. int md_id = ctl_b->m_md_id;
  406. struct ccci_msg_t msg;
  407. if (ccmni == NULL) {
  408. CCCI_DBG_MSG(md_id, "net",
  409. "[Error]CCMNI%d_read: invalid private data\n",
  410. ccmni->channel);
  411. return;
  412. }
  413. spin_lock_bh(&ccmni->spinlock);
  414. if (ctl_b->ccci_is_ready == 0) {
  415. CCCI_DBG_MSG(md_id, "net",
  416. "CCMNI%d_read fail when modem not ready\n",
  417. ccmni->channel);
  418. goto out;
  419. }
  420. read_out = ccmni->shared_mem->rx_control.read_out;
  421. avai_out = ccmni->shared_mem->rx_control.avai_out;
  422. avai_in = ccmni->shared_mem->rx_control.avai_in;
  423. q_length = ccmni->shared_mem->rx_control.q_length;
  424. if ((read_out < 0) || (avai_out < 0) || (avai_in < 0) || (q_length < 0)) {
  425. CCCI_DBG_MSG(md_id, "net",
  426. "CCMNI%d_read fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n",
  427. ccmni->channel, avai_out, read_out, avai_in,
  428. q_length);
  429. goto out;
  430. }
  431. if ((read_out >= q_length) || (avai_out >= q_length)
  432. || (avai_in >= q_length)) {
  433. CCCI_DBG_MSG(md_id, "net",
  434. "CCMNI%d_read fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n",
  435. ccmni->channel, avai_out, read_out, avai_in,
  436. q_length);
  437. goto out;
  438. }
  439. /* Number of packets waiting to be processed */
  440. packet_cnt =
  441. avai_out >=
  442. read_out ? (avai_out - read_out) : (avai_out - read_out + q_length);
  443. packet_cnt_save = packet_cnt;
  444. rx_buf_res_left_cnt =
  445. avai_in >=
  446. avai_out ? (avai_in - avai_out) : (avai_in - avai_out + q_length);
  447. if (packet_cnt <= 0) {
  448. CCCI_DBG_MSG(md_id, "net",
  449. "CCMNI%d_read fail: nothing to read, avai_out=%d, read_out=%d, q_length=%d\n",
  450. ccmni->channel, avai_out, read_out, q_length);
  451. goto out;
  452. }
  453. q_idx = read_out;
  454. CCCI_CCMNI_MSG(md_id,
  455. "CCMNI%d_receive[Before]: avai_out=%d, read_out=%d, avai_in=%d, packet_cnt=%d\n",
  456. ccmni->channel, avai_out, read_out, avai_in, packet_cnt);
  457. consumed = 0;
  458. for (; packet_cnt > 0; packet_cnt--) {
  459. q_idx &= q_length - 1;
  460. ccmni_ptr =
  461. ccmni_v2_phys_to_virt(md_id,
  462. (unsigned char *)(ccmni->
  463. shared_mem->q_rx_ringbuff
  464. [q_idx].ptr));
  465. ccmni_len = ccmni->shared_mem->q_rx_ringbuff[q_idx].len;
  466. #if CCMNI_DBG_INFO
  467. /* DBG info */
  468. dbg_info =
  469. (struct dbg_info_ccmni_t *) (ccmni_ptr - CCMNI_BUFF_HEADER_SIZE -
  470. CCMNI_BUFF_DBG_INFO_SIZE);
  471. #endif
  472. if (-CCCI_ERR_MEM_CHECK_FAIL ==
  473. ccmni_v2_check_info(md_id, ccmni->channel, ccmni_ptr,
  474. ccmni_len)) {
  475. CCCI_DBG_MSG(md_id, "net",
  476. "CCMNI%d_read: check info error, read_out=%d\n",
  477. ccmni->channel, read_out);
  478. #if CCMNI_DBG_INFO
  479. /* dbg_info->port = ccmni->channel; */
  480. dbg_info->avai_in_no = q_idx;
  481. /* dbg_info->avai_out_no = q_idx; */
  482. dbg_info->read_out_no = q_idx;
  483. #endif
  484. avai_in++;
  485. avai_in &= q_length - 1;
  486. ccmni->shared_mem->q_rx_ringbuff[avai_in].ptr =
  487. ccmni->shared_mem->q_rx_ringbuff[q_idx].ptr;
  488. ccmni_ptr =
  489. ccmni_v2_phys_to_virt(md_id,
  490. (unsigned char
  491. *)
  492. (ccmni->shared_mem->q_rx_ringbuff
  493. [avai_in].ptr));
  494. #if CCMNI_DBG_INFO
  495. dbg_info =
  496. (struct dbg_info_ccmni_t *) (ccmni_ptr -
  497. CCMNI_BUFF_HEADER_SIZE -
  498. CCMNI_BUFF_DBG_INFO_SIZE);
  499. dbg_info->avai_in_no = avai_in;
  500. #endif
  501. q_idx++;
  502. consumed++;
  503. continue;
  504. }
  505. ret = ccmni_v2_receive(ccmni, ccmni_ptr, ccmni_len);
  506. if (0 == ret) {
  507. #if CCMNI_DBG_INFO
  508. /* dbg_info->port = ccmni->channel; */
  509. dbg_info->avai_in_no = q_idx;
  510. /* dbg_info->avai_out_no = q_idx; */
  511. dbg_info->read_out_no = q_idx;
  512. #endif
  513. avai_in++;
  514. avai_in &= q_length - 1;
  515. ccmni->shared_mem->q_rx_ringbuff[avai_in].ptr =
  516. ccmni->shared_mem->q_rx_ringbuff[q_idx].ptr;
  517. ccmni_ptr =
  518. ccmni_v2_phys_to_virt(md_id,
  519. (unsigned char
  520. *)
  521. (ccmni->shared_mem->q_rx_ringbuff
  522. [avai_in].ptr));
  523. #if CCMNI_DBG_INFO
  524. dbg_info =
  525. (struct dbg_info_ccmni_t *) (ccmni_ptr -
  526. CCMNI_BUFF_HEADER_SIZE -
  527. CCMNI_BUFF_DBG_INFO_SIZE);
  528. dbg_info->avai_in_no = avai_in;
  529. #endif
  530. q_idx++;
  531. consumed++;
  532. } else if (-CCCI_ERR_MEM_CHECK_FAIL == ret) {
  533. /* If dev_alloc_skb() failed, retry right now may still fail.
  534. * So setup timer, and retry later.
  535. */
  536. set_bit(CCMNI_RECV_PENDING, &ccmni->flags);
  537. CCCI_DBG_MSG(md_id, "net",
  538. "CCMNI%d_read: no sk_buff, retrying, read_out=%d, avai_out=%d\n",
  539. ccmni->channel, q_idx, avai_out);
  540. mod_timer(&ccmni->timer, jiffies + msecs_to_jiffies(10));
  541. break;
  542. /* q_idx++; */
  543. /* consumed++; */
  544. }
  545. }
  546. read_out = (q_idx & (q_length - 1));
  547. CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive[After]: consumed=%d\n",
  548. ccmni->channel, consumed);
  549. if (consumed > packet_cnt_save) {
  550. /* Sanity check. This should not happen! */
  551. CCCI_DBG_MSG(md_id, "net",
  552. "CCMNI%d_read fail: consumed more than packet_cnt, consumed = %d, packet_cnt = %d\n",
  553. ccmni->channel, consumed, packet_cnt_save);
  554. /* Should ignore all data in buffer??? haow.wang */
  555. ccmni->shared_mem->rx_control.read_out = avai_out;
  556. ccmni->shared_mem->rx_control.avai_in = avai_in;
  557. goto out;
  558. }
  559. ccmni->shared_mem->rx_control.read_out = read_out;
  560. ccmni->shared_mem->rx_control.avai_in = avai_in;
  561. CCCI_CCMNI_MSG(md_id, "CCMNI%d_read to write mailbox(ch%d, tty%d)\n",
  562. ccmni->channel, ccmni->uart_rx_ack,
  563. CCMNI_CHANNEL_OFFSET + ccmni->channel);
  564. msg.magic = 0xFFFFFFFF;
  565. msg.id = CCMNI_CHANNEL_OFFSET + ccmni->channel;
  566. msg.channel = ccmni->uart_rx_ack;
  567. msg.reserved = 0;
  568. ret = ccci_message_send(md_id, &msg, 1);
  569. if (ret == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL) {
  570. set_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags);
  571. mod_timer(&ccmni->timer, jiffies);
  572. } else if (ret == sizeof(struct ccci_msg_t))
  573. clear_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags);
  574. out:
  575. spin_unlock_bh(&ccmni->spinlock);
  576. CCCI_CCMNI_MSG(md_id, "CCMNI%d_read invoke wake_lock_timeout(1s)\n",
  577. ccmni->channel);
  578. wake_lock_timeout(&ctl_b->ccmni_wake_lock, HZ);
  579. }
  580. /* will be called when modem sends us something. */
  581. /* we will then copy it to the tty's buffer. */
  582. /* this is essentially the "read" fops. */
  583. static void ccmni_v2_callback(void *private_data)
  584. {
  585. struct logic_channel_info_t *ch_info = (struct logic_channel_info_t *) private_data;
  586. struct ccmni_v2_instance_t *ccmni = (struct ccmni_v2_instance_t *) (ch_info->m_owner);
  587. struct ccci_msg_t msg;
  588. while (get_logic_ch_data(ch_info, &msg)) {
  589. switch (msg.channel) {
  590. case CCCI_CCMNI1_TX_ACK:
  591. case CCCI_CCMNI2_TX_ACK:
  592. case CCCI_CCMNI3_TX_ACK:
  593. /* this should be in an interrupt, */
  594. /* so no locking required... */
  595. ccmni->ready = 1;
  596. if (atomic_read(&ccmni->usage) > 0)
  597. netif_wake_queue(ccmni->dev);
  598. break;
  599. case CCCI_CCMNI1_RX:
  600. case CCCI_CCMNI2_RX:
  601. case CCCI_CCMNI3_RX:
  602. tasklet_schedule(&ccmni->tasklet);
  603. break;
  604. default:
  605. break;
  606. }
  607. }
  608. }
  609. /* The function start_xmit is called when there is one packet to transmit. */
  610. static int ccmni_v2_start_xmit(struct sk_buff *skb, struct net_device *dev)
  611. {
  612. int ret = NETDEV_TX_OK;
  613. int result = 0;
  614. int read_out, avai_in, avai_out, q_length, q_idx;
  615. #if CCMNI_DBG_INFO
  616. struct dbg_info_ccmni_t *dbg_info;
  617. #endif
  618. unsigned char *ccmni_ptr;
  619. struct ccmni_v2_instance_t *ccmni = netdev_priv(dev);
  620. struct ccmni_v2_ctl_block_t *ctl_b = (struct ccmni_v2_ctl_block_t *) (ccmni->owner);
  621. int md_id = ctl_b->m_md_id;
  622. struct ccci_msg_t msg;
  623. spin_lock_bh(&ccmni->spinlock);
  624. if (ctl_b->ccci_is_ready == 0) {
  625. CCCI_DBG_MSG(md_id, "net",
  626. "CCMNI%d transfer data fail when modem not ready\n",
  627. ccmni->channel);
  628. ret = NETDEV_TX_BUSY;
  629. goto _ccmni_start_xmit_busy;
  630. }
  631. read_out = ccmni->shared_mem->tx_control.read_out;
  632. avai_in = ccmni->shared_mem->tx_control.avai_in;
  633. avai_out = ccmni->shared_mem->tx_control.avai_out;
  634. q_length = ccmni->shared_mem->tx_control.q_length;
  635. if ((read_out < 0) || (avai_out < 0) || (avai_in < 0) || (q_length < 0)) {
  636. CCCI_DBG_MSG(md_id, "net",
  637. "CCMNI%d TX fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n",
  638. ccmni->channel, avai_out, read_out, avai_in,
  639. q_length);
  640. goto _ccmni_start_xmit_busy;
  641. }
  642. if ((read_out >= q_length) || (avai_out >= q_length)
  643. || (avai_in >= q_length)) {
  644. CCCI_DBG_MSG(md_id, "net",
  645. "CCMNI%d TX fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n",
  646. ccmni->channel, avai_out, read_out, avai_in,
  647. q_length);
  648. goto _ccmni_start_xmit_busy;
  649. }
  650. /* Choose Q index */
  651. q_idx = avai_out;
  652. ccmni_ptr = ccmni->shared_mem->q_tx_ringbuff[q_idx].ptr;
  653. /* check if too many data waiting to be read out or Q not initialized yet */
  654. /* ccmni_ptr=NULL when not initialized???? haow.wang */
  655. if ((q_idx == avai_in) || (ccmni_ptr == NULL)) {
  656. if (ccmni->log_count >= 1000) {
  657. CCCI_DBG_MSG(md_id, "net",
  658. "CCMNI%d TX busy and stop queue: q_idx=%d, skb->len=%d\n",
  659. ccmni->channel, q_idx, skb->len);
  660. CCCI_DBG_MSG(md_id, "net",
  661. " TX read_out = %d avai_out = %d avai_in = %d\n",
  662. ccmni->shared_mem->tx_control.read_out,
  663. ccmni->shared_mem->tx_control.avai_out,
  664. ccmni->shared_mem->tx_control.avai_in);
  665. CCCI_DBG_MSG(md_id, "net",
  666. " RX read_out = %d avai_out = %d avai_in = %d\n",
  667. ccmni->shared_mem->rx_control.read_out,
  668. ccmni->shared_mem->rx_control.avai_out,
  669. ccmni->shared_mem->rx_control.avai_in);
  670. ccmni->log_count = 1;
  671. } else {
  672. ccmni->log_count++;
  673. }
  674. netif_stop_queue(ccmni->dev);
  675. /* Set CCMNI ready to ZERO, and wait for the ACK from modem side. */
  676. ccmni->ready = 0;
  677. ret = NETDEV_TX_BUSY;
  678. goto _ccmni_start_xmit_busy;
  679. }
  680. ccmni_ptr =
  681. ccmni_v2_phys_to_virt(md_id,
  682. (unsigned char *)(ccmni->
  683. shared_mem->q_tx_ringbuff
  684. [q_idx].ptr));
  685. CCCI_CCMNI_MSG(md_id,
  686. "CCMNI%d_start_xmit: skb_len=%d, ccmni_ready=%d\n",
  687. ccmni->channel, skb->len, ccmni->ready);
  688. if (skb->len > CCMNI_MTU) {
  689. /* Sanity check; this should not happen! */
  690. /* Digest and return OK. */
  691. CCCI_DBG_MSG(md_id, "net",
  692. "CCMNI%d packet size exceed 1500 bytes: size=%d\n",
  693. ccmni->channel, skb->len);
  694. dev->stats.tx_dropped++;
  695. goto _ccmni_start_xmit_exit;
  696. }
  697. #if CCMNI_DBG_INFO
  698. /* DBG info */
  699. dbg_info =
  700. (struct dbg_info_ccmni_t *) (ccmni_ptr - CCMNI_BUFF_HEADER_SIZE -
  701. CCMNI_BUFF_DBG_INFO_SIZE);
  702. dbg_info->avai_out_no = q_idx;
  703. #endif
  704. memcpy(ccmni_ptr, skb->data, skb->len);
  705. ccmni->shared_mem->q_tx_ringbuff[q_idx].len = skb->len;
  706. /* End byte */
  707. *(unsigned char *)(ccmni_ptr + skb->len) = CCMNI_DATA_END;
  708. /* wait memory updated */
  709. mb();
  710. /* Update avail_out after data buffer filled */
  711. q_idx++;
  712. ccmni->shared_mem->tx_control.avai_out = (q_idx & (q_length - 1));
  713. /* wait memory updated */
  714. mb();
  715. msg.addr = 0;
  716. msg.len = skb->len;
  717. msg.channel = ccmni->uart_tx;
  718. msg.reserved = 0;
  719. result = ccci_message_send(md_id, &msg, 1);
  720. if (result == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL) {
  721. set_bit(CCMNI_SEND_PENDING, &ccmni->flags);
  722. ccmni->send_len += skb->len;
  723. mod_timer(&ccmni->timer, jiffies);
  724. } else if (result == sizeof(struct ccci_msg_t))
  725. clear_bit(CCMNI_SEND_PENDING, &ccmni->flags);
  726. dev->stats.tx_packets++;
  727. dev->stats.tx_bytes += skb->len;
  728. _ccmni_start_xmit_exit:
  729. dev_kfree_skb(skb);
  730. _ccmni_start_xmit_busy:
  731. spin_unlock_bh(&ccmni->spinlock);
  732. return ret;
  733. }
  734. static int ccmni_v2_open(struct net_device *dev)
  735. {
  736. struct ccmni_v2_instance_t *ccmni = netdev_priv(dev);
  737. struct ccmni_v2_ctl_block_t *ctl_b = (struct ccmni_v2_ctl_block_t *) ccmni->owner;
  738. int md_id = ctl_b->m_md_id;
  739. atomic_inc(&ccmni->usage);
  740. CCCI_MSG_INF(md_id, "net", "CCMNI%d open: usage=%d\n", ccmni->channel, atomic_read(&ccmni->usage));
  741. if (ctl_b->ccci_is_ready == 0) {
  742. CCCI_MSG_INF(md_id, "net",
  743. "CCMNI%d open fail when modem not ready\n",
  744. ccmni->channel);
  745. return -EIO;
  746. }
  747. netif_start_queue(dev);
  748. return 0;
  749. }
  750. static int ccmni_v2_close(struct net_device *dev)
  751. {
  752. struct ccmni_v2_instance_t *ccmni = netdev_priv(dev);
  753. struct ccmni_v2_ctl_block_t *ctl_b = (struct ccmni_v2_ctl_block_t *) ccmni->owner;
  754. atomic_dec(&ccmni->usage);
  755. CCCI_MSG_INF(ctl_b->m_md_id, "net", "CCMNI%d close: usage=%d\n", ccmni->channel, atomic_read(&ccmni->usage));
  756. netif_stop_queue(dev);
  757. return 0;
  758. }
  759. static int ccmni_v2_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  760. {
  761. struct ccmni_v2_instance_t *ccmni = netdev_priv(dev);
  762. struct ccmni_v2_ctl_block_t *ctl_b = (struct ccmni_v2_ctl_block_t *)ccmni->owner;
  763. switch (cmd) {
  764. case SIOCSTXQSTATE:
  765. /* ifru_ivalue[3~0]:start/stop; ifru_ivalue[7~4]:reserve; */
  766. /* ifru_ivalue[15~8]:user id, bit8=rild, bit9=thermal */
  767. /* ifru_ivalue[31~16]: watchdog timeout value */
  768. if ((ifr->ifr_ifru.ifru_ivalue & 0xF) == 0) {
  769. if (atomic_read(&ccmni->usage) > 0) {
  770. atomic_dec(&ccmni->usage);
  771. netif_stop_queue(dev);
  772. dev->watchdog_timeo = 60*HZ;
  773. }
  774. } else {
  775. if (atomic_read(&ccmni->usage) <= 0) {
  776. if (netif_running(dev) && netif_queue_stopped(dev))
  777. netif_wake_queue(dev);
  778. dev->watchdog_timeo = 1*HZ;
  779. atomic_inc(&ccmni->usage);
  780. }
  781. }
  782. CCCI_MSG_INF(ctl_b->m_md_id, "net", "SIOCSTXQSTATE request=%d on CCMNI%d usge=%d\n",
  783. ifr->ifr_ifru.ifru_ivalue, ccmni->channel, atomic_read(&ccmni->usage));
  784. break;
  785. default:
  786. CCCI_MSG_INF(ctl_b->m_md_id, "net", "unknown ioctl cmd=%d on CCMNI%d\n", cmd, ccmni->channel);
  787. break;
  788. }
  789. return 0;
  790. }
  791. static void ccmni_v2_tx_timeout(struct net_device *dev)
  792. {
  793. struct ccmni_v2_instance_t *ccmni = netdev_priv(dev);
  794. dev->stats.tx_errors++;
  795. if (atomic_read(&ccmni->usage) > 0)
  796. netif_wake_queue(dev);
  797. }
  798. static const struct net_device_ops ccmni_v2_netdev_ops = {
  799. .ndo_open = ccmni_v2_open,
  800. .ndo_stop = ccmni_v2_close,
  801. .ndo_start_xmit = ccmni_v2_start_xmit,
  802. .ndo_do_ioctl = ccmni_v2_net_ioctl,
  803. .ndo_tx_timeout = ccmni_v2_tx_timeout,
  804. };
  805. static void ccmni_v2_setup(struct net_device *dev)
  806. {
  807. struct ccmni_v2_instance_t *ccmni = netdev_priv(dev);
  808. int retry = 10;
  809. ether_setup(dev);
  810. dev->header_ops = NULL;
  811. dev->netdev_ops = &ccmni_v2_netdev_ops;
  812. dev->flags = IFF_NOARP & (~IFF_BROADCAST & ~IFF_MULTICAST);
  813. dev->mtu = CCMNI_MTU;
  814. dev->tx_queue_len = CCMNI_TX_QUEUE;
  815. dev->addr_len = ETH_ALEN;
  816. dev->destructor = free_netdev;
  817. while (retry-- > 0) {
  818. random_ether_addr((u8 *) dev->dev_addr);
  819. if (is_mac_addr_duplicate((u8 *) dev->dev_addr))
  820. continue;
  821. else
  822. break;
  823. }
  824. CCCI_CCMNI_MSG(ccmni->m_md_id,
  825. "CCMNI%d_setup: features=0x%08x,flags=0x%08x\n",
  826. ccmni->channel, (unsigned int)(dev->features),
  827. dev->flags);
  828. }
  829. static int ccmni_v2_create_instance(int md_id, int channel)
  830. {
  831. int ret, size, count;
  832. int uart_rx, uart_rx_ack;
  833. int uart_tx, uart_tx_ack;
  834. struct ccmni_v2_instance_t *ccmni;
  835. struct net_device *dev = NULL;
  836. int *ccmni_rx_base_phy;
  837. int *ccmni_rx_base_virt;
  838. unsigned char *ptr_virt;
  839. #if CCMNI_DBG_INFO
  840. struct dbg_info_ccmni_t *dbg_info;
  841. #endif
  842. struct ccmni_v2_ctl_block_t *ctl_b =
  843. (struct ccmni_v2_ctl_block_t *) ccmni_ctl_block[md_id];
  844. /* Network device creation and registration. */
  845. dev = alloc_netdev(sizeof(struct ccmni_v2_instance_t), "eth%d", NET_NAME_UNKNOWN, ccmni_v2_setup);
  846. if (dev == NULL) {
  847. CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate netdev fail!\n",
  848. channel);
  849. return -ENOMEM;
  850. }
  851. ccmni = netdev_priv(dev);
  852. ccmni->dev = dev;
  853. ccmni->channel = channel;
  854. ccmni->owner = ccmni_ctl_block[md_id];
  855. if (md_id == MD_SYS1) {
  856. sprintf(dev->name, "ccmni%d", channel);
  857. } else {
  858. sprintf(dev->name, "cc%dmni%d", md_id + 1, channel);
  859. /* sprintf(dev->name, "ccmni%d", channel); */
  860. }
  861. ret = register_netdev(dev);
  862. if (ret != 0) {
  863. CCCI_MSG_INF(md_id, "net", "CCMNI%d register netdev fail: %d\n",
  864. ccmni->channel, ret);
  865. goto _ccmni_create_instance_exit;
  866. }
  867. ccci_ccmni_v2_ctl_mem_base_req(md_id, ccmni->channel,
  868. (int *)&ccmni->shared_mem, &ccmni->shared_mem_phys_addr, &size);
  869. if (ccmni->shared_mem == NULL) {
  870. CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate memory fail\n",
  871. ccmni->channel);
  872. unregister_netdev(dev);
  873. ret = -ENOMEM;
  874. goto _ccmni_create_instance_exit;
  875. }
  876. CCCI_CCMNI_MSG(md_id, "0x%08X:0x%08X:%d\n",
  877. (unsigned int)ccmni->shared_mem,
  878. (unsigned int)ccmni->shared_mem_phys_addr, size);
  879. ccmni->shared_mem->rx_control.read_out = 0;
  880. ccmni->shared_mem->rx_control.avai_out = 0;
  881. ccmni->shared_mem->rx_control.avai_in =
  882. CCMNI_CTRL_Q_RX_SIZE_DEFAULT - 1;
  883. ccmni->shared_mem->rx_control.q_length = CCMNI_CTRL_Q_RX_SIZE;
  884. memset(ccmni->shared_mem->q_rx_ringbuff, 0,
  885. ccmni->shared_mem->rx_control.q_length *
  886. sizeof(struct q_ringbuf_ccmni_t));
  887. ccmni_v2_dl_base_req(md_id, &ccmni_rx_base_virt, &ccmni_rx_base_phy);
  888. if (ccmni_rx_base_virt == NULL || ccmni_rx_base_phy == NULL) {
  889. CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate memory fail\n",
  890. ccmni->channel);
  891. unregister_netdev(dev);
  892. ret = -ENOMEM;
  893. goto _ccmni_create_instance_exit;
  894. }
  895. switch (ccmni->channel) {
  896. case 0:
  897. uart_rx = CCCI_CCMNI1_RX;
  898. uart_rx_ack = CCCI_CCMNI1_RX_ACK;
  899. uart_tx = CCCI_CCMNI1_TX;
  900. uart_tx_ack = CCCI_CCMNI1_TX_ACK;
  901. break;
  902. case 1:
  903. uart_rx = CCCI_CCMNI2_RX;
  904. uart_rx_ack = CCCI_CCMNI2_RX_ACK;
  905. uart_tx = CCCI_CCMNI2_TX;
  906. uart_tx_ack = CCCI_CCMNI2_TX_ACK;
  907. break;
  908. case 2:
  909. uart_rx = CCCI_CCMNI3_RX;
  910. uart_rx_ack = CCCI_CCMNI3_RX_ACK;
  911. uart_tx = CCCI_CCMNI3_TX;
  912. uart_tx_ack = CCCI_CCMNI3_TX_ACK;
  913. break;
  914. default:
  915. CCCI_MSG_INF(md_id, "net",
  916. "[Error]CCMNI%d Invalid ccmni number\n",
  917. ccmni->channel);
  918. unregister_netdev(dev);
  919. ret = -ENOSYS;
  920. goto _ccmni_create_instance_exit;
  921. }
  922. ccmni->m_md_id = md_id;
  923. /* Each channel has 100 RX buffers default */
  924. for (count = 0; count < CCMNI_CTRL_Q_RX_SIZE_DEFAULT; count++) {
  925. ccmni->shared_mem->q_rx_ringbuff[count].ptr =
  926. (CCMNI_CTRL_Q_RX_SIZE_DEFAULT * ccmni->channel +
  927. count) * CCMNI_SINGLE_BUFF_SIZE +
  928. (unsigned char *)ccmni_rx_base_phy +
  929. CCMNI_BUFF_HEADER_SIZE + CCMNI_BUFF_DBG_INFO_SIZE -
  930. get_md2_ap_phy_addr_fixed();
  931. ptr_virt =
  932. ccmni_v2_phys_to_virt(md_id,
  933. (unsigned char *)(ccmni->
  934. shared_mem->q_rx_ringbuff
  935. [count].ptr));
  936. /* buffer header and footer init */
  937. /* Assume int to be 32bit. May need further modifying!!!!! */
  938. *((int *)(ptr_virt - CCMNI_BUFF_HEADER_SIZE)) =
  939. CCMNI_BUFF_HEADER;
  940. *((int *)(ptr_virt + CCMNI_BUFF_DATA_FIELD_SIZE)) =
  941. CCMNI_BUFF_FOOTER;
  942. #if CCMNI_DBG_INFO
  943. /* debug info */
  944. dbg_info =
  945. (struct dbg_info_ccmni_t *) (ptr_virt - CCMNI_BUFF_HEADER_SIZE -
  946. CCMNI_BUFF_DBG_INFO_SIZE);
  947. dbg_info->port = ccmni->channel;
  948. dbg_info->avai_in_no = count;
  949. #endif
  950. }
  951. ccmni->uart_rx = uart_rx;
  952. ccmni->uart_rx_ack = uart_rx_ack;
  953. ccmni->uart_tx = uart_tx;
  954. ccmni->uart_tx_ack = uart_tx_ack;
  955. /* Register this ccmni instance to the ccci driver. */
  956. /* pass it the notification handler. */
  957. register_to_logic_ch(md_id, uart_rx, ccmni_v2_callback, (void *)ccmni);
  958. register_to_logic_ch(md_id, uart_tx_ack, ccmni_v2_callback, (void *)ccmni);
  959. /* Initialize the spinlock. */
  960. spin_lock_init(&ccmni->spinlock);
  961. setup_timer(&ccmni->timer, timer_func, (unsigned long)ccmni);
  962. /* Initialize the tasklet. */
  963. tasklet_init(&ccmni->tasklet, ccmni_v2_read, (unsigned long)ccmni);
  964. ctl_b->ccmni_v2_instance[channel] = ccmni;
  965. ccmni->ready = 1;
  966. ccmni->net_if_off = 0;
  967. ccmni->log_count = 1;
  968. return ret;
  969. _ccmni_create_instance_exit:
  970. free_netdev(dev);
  971. kfree(ccmni);
  972. ctl_b->ccmni_v2_instance[channel] = NULL;
  973. return ret;
  974. }
  975. static void ccmni_v2_destroy_instance(int md_id, int channel)
  976. {
  977. struct ccmni_v2_ctl_block_t *ctl_b =
  978. (struct ccmni_v2_ctl_block_t *) ccmni_ctl_block[md_id];
  979. struct ccmni_v2_instance_t *ccmni = ctl_b->ccmni_v2_instance[channel];
  980. if (ccmni != NULL) {
  981. ccmni->ready = 0;
  982. un_register_to_logic_ch(md_id, ccmni->uart_rx);
  983. un_register_to_logic_ch(md_id, ccmni->uart_tx_ack);
  984. if (ccmni->shared_mem != NULL) {
  985. ccmni->shared_mem = NULL;
  986. ccmni->shared_mem_phys_addr = 0;
  987. }
  988. if (ccmni->dev != NULL)
  989. unregister_netdev(ccmni->dev);
  990. /* tasklet_kill(&ccmni->tasklet); */
  991. ctl_b->ccmni_v2_instance[channel] = NULL;
  992. }
  993. }
  994. int ccmni_v2_init(int md_id)
  995. {
  996. int count, ret, curr;
  997. struct ccmni_v2_ctl_block_t *ctl_b;
  998. /* Create control block structure */
  999. ctl_b = kmalloc(sizeof(struct ccmni_v2_ctl_block_t),
  1000. GFP_KERNEL);
  1001. if (ctl_b == NULL)
  1002. return -CCCI_ERR_GET_MEM_FAIL;
  1003. memset(ctl_b, 0, sizeof(struct ccmni_v2_ctl_block_t));
  1004. ccmni_ctl_block[md_id] = ctl_b;
  1005. /* Init ctl_b */
  1006. ctl_b->m_md_id = md_id;
  1007. ctl_b->ccmni_notifier.call = ccmni_v2_notifier_call;
  1008. ctl_b->ccmni_notifier.next = NULL;
  1009. for (count = 0; count < CCMNI_V2_PORT_NUM; count++) {
  1010. ret = ccmni_v2_create_instance(md_id, count);
  1011. if (ret != 0) {
  1012. CCCI_MSG_INF(md_id, "net",
  1013. "CCMNI%d create instance fail: %d\n",
  1014. count, ret);
  1015. goto _CCMNI_INSTANCE_CREATE_FAIL;
  1016. }
  1017. }
  1018. ret = md_register_call_chain(md_id, &ctl_b->ccmni_notifier);
  1019. if (ret) {
  1020. CCCI_MSG_INF(md_id, "net", "md_register_call_chain fail: %d\n",
  1021. ret);
  1022. goto _CCMNI_INSTANCE_CREATE_FAIL;
  1023. }
  1024. snprintf(ctl_b->wakelock_name, sizeof(ctl_b->wakelock_name),
  1025. "ccci%d_net_v2", (md_id + 1));
  1026. wake_lock_init(&ctl_b->ccmni_wake_lock, WAKE_LOCK_SUSPEND,
  1027. ctl_b->wakelock_name);
  1028. return ret;
  1029. _CCMNI_INSTANCE_CREATE_FAIL:
  1030. for (curr = 0; curr <= count; curr++)
  1031. ccmni_v2_destroy_instance(md_id, curr);
  1032. kfree(ctl_b);
  1033. ccmni_ctl_block[md_id] = NULL;
  1034. return ret;
  1035. }
  1036. void ccmni_v2_exit(int md_id)
  1037. {
  1038. int count;
  1039. struct ccmni_v2_ctl_block_t *ctl_b =
  1040. (struct ccmni_v2_ctl_block_t *) ccmni_ctl_block[md_id];
  1041. if (ctl_b) {
  1042. for (count = 0; count < CCMNI_V2_PORT_NUM; count++)
  1043. ccmni_v2_destroy_instance(md_id, count);
  1044. md_unregister_call_chain(md_id, &ctl_b->ccmni_notifier);
  1045. wake_lock_destroy(&ctl_b->ccmni_wake_lock);
  1046. }
  1047. }