ccci_fs_main.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /*****************************************************************************
  2. *
  3. * Filename:
  4. * ---------
  5. * ccci_fs.c
  6. *
  7. * Project:
  8. * --------
  9. * ALPS
  10. *
  11. * Description:
  12. * ------------
  13. * MT65XX CCCI FS Proxy Driver
  14. *
  15. ****************************************************************************/
  16. #include <linux/sched.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/device.h>
  20. #include <linux/cdev.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/wakelock.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/delay.h>
  25. #include <linux/wait.h>
  26. #include <linux/fs.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/dma-mapping.h>
  30. #include <ccci.h>
  31. #define CCCI_FS_DEVNAME "ccci_fs"
  32. /* enable fs_tx or fs_rx log */
  33. unsigned int fs_tx_debug_enable[MAX_MD_NUM] = { 0 };
  34. unsigned int fs_rx_debug_enable[MAX_MD_NUM] = { 0 };
  35. struct fs_ctl_block_t {
  36. unsigned int fs_md_id;
  37. spinlock_t fs_spinlock;
  38. dev_t fs_dev_num;
  39. struct cdev fs_cdev;
  40. struct fs_stream_buffer_t *fs_buffers;
  41. int fs_buffers_phys_addr;
  42. struct kfifo fs_fifo;
  43. int reset_handle;
  44. wait_queue_head_t fs_waitq;
  45. struct wake_lock fs_wake_lock;
  46. char fs_wakelock_name[16];
  47. int fs_smem_size;
  48. };
  49. static struct fs_ctl_block_t *fs_ctl_block[MAX_MD_NUM];
  50. /* will be called when modem sends us something. */
  51. /* we will then copy it to the tty's buffer. */
  52. /* this is essentially the "read" fops. */
  53. static void ccci_fs_callback(void *private)
  54. {
  55. unsigned long flag;
  56. struct logic_channel_info_t *ch_info = (struct logic_channel_info_t *) private;
  57. struct ccci_msg_t msg;
  58. struct fs_ctl_block_t *ctl_b = (struct fs_ctl_block_t *) ch_info->m_owner;
  59. spin_lock_irqsave(&ctl_b->fs_spinlock, flag);
  60. while (get_logic_ch_data(ch_info, &msg)) {
  61. if (msg.channel == CCCI_FS_RX) {
  62. if (fs_rx_debug_enable[ctl_b->fs_md_id]) {
  63. CCCI_DBG_MSG(ctl_b->fs_md_id, "fs ",
  64. "fs_callback: %08X %08X %08X\n",
  65. msg.data0, msg.data1,
  66. msg.reserved);
  67. }
  68. if (kfifo_in
  69. (&ctl_b->fs_fifo, (unsigned char *)&msg.reserved,
  70. sizeof(msg.reserved)) == sizeof(msg.reserved)) {
  71. wake_up_interruptible(&ctl_b->fs_waitq);
  72. wake_lock_timeout(&ctl_b->fs_wake_lock, HZ / 2);
  73. } else {
  74. CCCI_DBG_MSG(ctl_b->fs_md_id, "fs ",
  75. "[Error]Unable to put new request into fifo\n");
  76. }
  77. }
  78. }
  79. spin_unlock_irqrestore(&ctl_b->fs_spinlock, flag);
  80. }
  81. static int ccci_fs_get_index(int md_id)
  82. {
  83. int ret;
  84. unsigned long flag;
  85. struct fs_ctl_block_t *ctl_b;
  86. if (unlikely(fs_ctl_block[md_id] == NULL)) {
  87. CCCI_MSG_INF(md_id, "fs ",
  88. "fs_get_index: fata error, fs_ctl_b is NULL\n");
  89. return -EPERM;
  90. }
  91. ctl_b = fs_ctl_block[md_id];
  92. CCCI_FS_MSG(md_id, "get_fs_index++\n");
  93. if (wait_event_interruptible
  94. (ctl_b->fs_waitq, kfifo_len(&ctl_b->fs_fifo) != 0) != 0) {
  95. if (fs_rx_debug_enable[md_id])
  96. CCCI_MSG_INF(md_id, "fs ",
  97. "fs_get_index: Interrupted by syscall.signal_pend\n");
  98. return -ERESTARTSYS;
  99. }
  100. spin_lock_irqsave(&ctl_b->fs_spinlock, flag);
  101. if (kfifo_out(&ctl_b->fs_fifo, (unsigned char *)&ret, sizeof(int)) !=
  102. sizeof(int)) {
  103. spin_unlock_irqrestore(&ctl_b->fs_spinlock, flag);
  104. CCCI_MSG_INF(md_id, "fs ", "get fs index fail from fifo\n");
  105. return -EFAULT;
  106. }
  107. spin_unlock_irqrestore(&ctl_b->fs_spinlock, flag);
  108. if (fs_rx_debug_enable[md_id])
  109. CCCI_MSG_INF(md_id, "fs ", "fs_index=%d\n", ret);
  110. CCCI_FS_MSG(md_id, "get_fs_index--\n");
  111. return ret;
  112. }
  113. static int ccci_fs_send(int md_id, unsigned long arg)
  114. {
  115. void __user *argp;
  116. struct ccci_msg_t msg;
  117. struct fs_stream_msg_t message;
  118. int ret = 0;
  119. int xmit_retry = 0;
  120. struct fs_ctl_block_t *ctl_b;
  121. CCCI_FS_MSG(md_id, "ccci_fs_send++\n");
  122. if (unlikely(fs_ctl_block[md_id] == NULL)) {
  123. CCCI_MSG_INF(md_id, "fs ",
  124. "fs_get_index: fatal error, fs_ctl_b is NULL\n");
  125. return -EPERM;
  126. }
  127. ctl_b = fs_ctl_block[md_id];
  128. argp = (void __user *)arg;
  129. if (copy_from_user((void *)&message, argp, sizeof(struct fs_stream_msg_t))) {
  130. CCCI_MSG_INF(md_id, "fs ",
  131. "ccci_fs_send: copy_from_user fail!\n");
  132. return -EFAULT;
  133. }
  134. msg.data0 =
  135. ctl_b->fs_buffers_phys_addr - get_md2_ap_phy_addr_fixed() +
  136. (sizeof(struct fs_stream_buffer_t) * message.index);
  137. msg.data1 = message.length + 4;
  138. msg.channel = CCCI_FS_TX;
  139. msg.reserved = message.index;
  140. if (fs_tx_debug_enable[md_id]) {
  141. CCCI_MSG_INF(md_id, "fs ", "fs_send: %08X %08X %08X\n",
  142. msg.data0, msg.data1, msg.reserved);
  143. }
  144. mb(); /* wait write done */
  145. do {
  146. ret = ccci_message_send(md_id, &msg, 1);
  147. if (ret == sizeof(struct ccci_msg_t))
  148. break;
  149. if (ret == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL) {
  150. xmit_retry++;
  151. msleep(20);
  152. if ((xmit_retry & 0xF) == 0) {
  153. CCCI_MSG_INF(md_id, "fs ",
  154. "fs_chr has retried %d times\n",
  155. xmit_retry);
  156. }
  157. } else {
  158. break;
  159. }
  160. } while (1);
  161. if (ret != sizeof(struct ccci_msg_t)) {
  162. CCCI_MSG_INF(md_id, "fs ",
  163. "ccci_fs_send fail <ret=%d>: %08X, %08X, %08X\n",
  164. ret, msg.data0, msg.data1, msg.reserved);
  165. return ret;
  166. }
  167. CCCI_FS_MSG(md_id, "ccci_fs_send--\n");
  168. return 0;
  169. }
  170. static int ccci_fs_mmap(struct file *file, struct vm_area_struct *vma)
  171. {
  172. unsigned long off, start, len;
  173. struct fs_ctl_block_t *ctl_b;
  174. int md_id;
  175. ctl_b = (struct fs_ctl_block_t *) file->private_data;
  176. md_id = ctl_b->fs_md_id;
  177. CCCI_FS_MSG(md_id, "mmap++\n");
  178. if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
  179. CCCI_MSG_INF(md_id, "fs ",
  180. "ccci_fs_mmap: vm_pgoff too large\n");
  181. return -EINVAL;
  182. }
  183. off = vma->vm_pgoff << PAGE_SHIFT;
  184. start = (unsigned long)ctl_b->fs_buffers_phys_addr;
  185. len = PAGE_ALIGN((start & ~PAGE_MASK) + ctl_b->fs_smem_size);
  186. if ((vma->vm_end - vma->vm_start + off) > len) {
  187. CCCI_MSG_INF(md_id, "fs ",
  188. "ccci_fs_mmap: memory require over ccci_fs_smem size\n");
  189. return -1; /* mmap return -1 when fail */
  190. }
  191. off += start & PAGE_MASK;
  192. vma->vm_pgoff = off >> PAGE_SHIFT;
  193. vma->vm_flags |= VM_IO;
  194. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  195. CCCI_FS_MSG(md_id, "mmap--\n");
  196. return remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
  197. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  198. }
  199. static long ccci_fs_ioctl(struct file *file, unsigned int cmd,
  200. unsigned long arg)
  201. {
  202. int ret;
  203. int md_id;
  204. struct fs_ctl_block_t *ctl_b;
  205. ctl_b = (struct fs_ctl_block_t *) file->private_data;
  206. md_id = ctl_b->fs_md_id;
  207. switch (cmd) {
  208. case CCCI_FS_IOCTL_GET_INDEX:
  209. ret = ccci_fs_get_index(md_id);
  210. break;
  211. case CCCI_FS_IOCTL_SEND:
  212. ret = ccci_fs_send(md_id, arg);
  213. break;
  214. default:
  215. CCCI_MSG_INF(md_id, "fs ",
  216. "ccci_fs_ioctl: [Error]unknown ioctl:%d\n", cmd);
  217. ret = -ENOIOCTLCMD;
  218. break;
  219. }
  220. return ret;
  221. }
  222. /* clear kfifo invalid data which may not be processed before close operation */
  223. void ccci_fs_resetfifo(int md_id)
  224. {
  225. struct fs_ctl_block_t *ctl_b = fs_ctl_block[md_id];
  226. unsigned long flag;
  227. CCCI_MSG("ccci_fs_resetfifo\n");
  228. /* Reset FS KFIFO */
  229. spin_lock_irqsave(&ctl_b->fs_spinlock, flag);
  230. kfifo_reset(&ctl_b->fs_fifo);
  231. spin_unlock_irqrestore(&ctl_b->fs_spinlock, flag);
  232. }
  233. static int ccci_fs_open(struct inode *inode, struct file *file)
  234. {
  235. int md_id;
  236. int major;
  237. struct fs_ctl_block_t *ctl_b;
  238. major = imajor(inode);
  239. md_id = get_md_id_by_dev_major(major);
  240. if (md_id < 0) {
  241. CCCI_MSG("FS open fail: invalid major id:%d\n", major);
  242. return -1;
  243. }
  244. CCCI_MSG_INF(md_id, "fs ", "FS open by %s\n", current->comm);
  245. ctl_b = fs_ctl_block[md_id];
  246. file->private_data = ctl_b;
  247. nonseekable_open(inode, file);
  248. /* modem reset registration. */
  249. ctl_b->reset_handle = ccci_reset_register(md_id, "CCCI_FS");
  250. if (ctl_b->reset_handle < 0)
  251. CCCI_ERR_INF(md_id, "fs ", "ctl_b->reset_handle %d < 0\n", ctl_b->reset_handle);
  252. return 0;
  253. }
  254. static int ccci_fs_release(struct inode *inode, struct file *file)
  255. {
  256. int md_id;
  257. int major;
  258. struct fs_ctl_block_t *ctl_b;
  259. /* unsigned long flag; */
  260. major = imajor(inode);
  261. md_id = get_md_id_by_dev_major(major);
  262. if (md_id < 0) {
  263. CCCI_MSG("FS release fail: invalid major id:%d\n", major);
  264. return -1;
  265. }
  266. CCCI_MSG_INF(md_id, "fs ", "FS release by %s\n", current->comm);
  267. ctl_b = fs_ctl_block[md_id];
  268. memset(ctl_b->fs_buffers, 0, ctl_b->fs_smem_size);
  269. ccci_user_ready_to_reset(md_id, ctl_b->reset_handle);
  270. /* CR: 1260702 */
  271. /* clear kfifo invalid data which may not be processed before close operation */
  272. /* spin_lock_irqsave(&ctl_b->fs_spinlock,flag); */
  273. /* kfifo_reset(&ctl_b->fs_fifo); */
  274. /* spin_unlock_irqrestore(&ctl_b->fs_spinlock,flag); */
  275. return 0;
  276. }
  277. static int ccci_fs_start(int md_id)
  278. {
  279. struct fs_ctl_block_t *ctl_b;
  280. unsigned long flag;
  281. if (unlikely(fs_ctl_block[md_id] == NULL)) {
  282. CCCI_MSG_INF(md_id, "fs ",
  283. "ccci_fs_start: fatal error, fs_ctl_b is NULL\n");
  284. return -CCCI_ERR_FATAL_ERR;
  285. }
  286. ctl_b = fs_ctl_block[md_id];
  287. if (0 !=
  288. kfifo_alloc(&ctl_b->fs_fifo, sizeof(unsigned) * CCCI_FS_MAX_BUFFERS,
  289. GFP_KERNEL)) {
  290. CCCI_MSG_INF(md_id, "fs ", "ccci_fs_start: kfifo alloc fail\n");
  291. return -CCCI_ERR_ALLOCATE_MEMORY_FAIL;
  292. }
  293. /* Reset FS KFIFO */
  294. spin_lock_irqsave(&ctl_b->fs_spinlock, flag);
  295. kfifo_reset(&ctl_b->fs_fifo);
  296. spin_unlock_irqrestore(&ctl_b->fs_spinlock, flag);
  297. /* modem related channel registration. */
  298. ccci_fs_base_req(md_id, (int *)&ctl_b->fs_buffers, &ctl_b->fs_buffers_phys_addr,
  299. &ctl_b->fs_smem_size);
  300. register_to_logic_ch(md_id, CCCI_FS_RX, ccci_fs_callback, ctl_b);
  301. return 0;
  302. }
  303. static void ccci_fs_stop(int md_id)
  304. {
  305. struct fs_ctl_block_t *ctl_b;
  306. if (unlikely(fs_ctl_block[md_id] == NULL)) {
  307. CCCI_MSG_INF(md_id, "fs ",
  308. "ccci_fs_stop: fatal error, fs_ctl_b is NULL\n");
  309. return;
  310. }
  311. ctl_b = fs_ctl_block[md_id];
  312. if (ctl_b->fs_buffers != NULL) {
  313. kfifo_free(&ctl_b->fs_fifo);
  314. un_register_to_logic_ch(md_id, CCCI_FS_RX);
  315. ctl_b->fs_buffers = NULL;
  316. ctl_b->fs_buffers_phys_addr = 0;
  317. }
  318. }
  319. static const struct file_operations fs_fops = {
  320. .owner = THIS_MODULE,
  321. .unlocked_ioctl = ccci_fs_ioctl,
  322. .open = ccci_fs_open,
  323. .mmap = ccci_fs_mmap,
  324. .release = ccci_fs_release,
  325. };
  326. int __init ccci_fs_init(int md_id)
  327. {
  328. int ret;
  329. int major, minor;
  330. struct fs_ctl_block_t *ctl_b;
  331. ret = get_dev_id_by_md_id(md_id, "fs", &major, &minor);
  332. if (ret < 0) {
  333. CCCI_MSG("ccci_fs_init: get md device number failed(%d)\n",
  334. ret);
  335. return ret;
  336. }
  337. /* Allocate fs ctrl struct memory */
  338. ctl_b = kmalloc(sizeof(struct fs_ctl_block_t), GFP_KERNEL);
  339. if (ctl_b == NULL)
  340. return -CCCI_ERR_GET_MEM_FAIL;
  341. memset(ctl_b, 0, sizeof(struct fs_ctl_block_t));
  342. fs_ctl_block[md_id] = ctl_b;
  343. /* Init ctl_b */
  344. ctl_b->fs_md_id = md_id;
  345. spin_lock_init(&ctl_b->fs_spinlock);
  346. init_waitqueue_head(&ctl_b->fs_waitq);
  347. ctl_b->fs_dev_num = MKDEV(major, minor);
  348. snprintf(ctl_b->fs_wakelock_name, sizeof(ctl_b->fs_wakelock_name),
  349. "ccci%d_fs", (md_id + 1));
  350. wake_lock_init(&ctl_b->fs_wake_lock, WAKE_LOCK_SUSPEND,
  351. ctl_b->fs_wakelock_name);
  352. ret =
  353. register_chrdev_region(ctl_b->fs_dev_num, 1,
  354. ctl_b->fs_wakelock_name);
  355. if (ret) {
  356. CCCI_MSG_INF(md_id, "fs ",
  357. "ccci_fs_init: Register char device failed(%d)\n",
  358. ret);
  359. goto _REG_CHR_REGION_FAIL;
  360. }
  361. cdev_init(&ctl_b->fs_cdev, &fs_fops);
  362. ctl_b->fs_cdev.owner = THIS_MODULE;
  363. ctl_b->fs_cdev.ops = &fs_fops;
  364. ret = cdev_add(&ctl_b->fs_cdev, ctl_b->fs_dev_num, 1);
  365. if (ret) {
  366. CCCI_MSG_INF(md_id, "fs ", "cdev_add fail(%d)\n", ret);
  367. unregister_chrdev_region(ctl_b->fs_dev_num, 1);
  368. goto _REG_CHR_REGION_FAIL;
  369. }
  370. ret = ccci_fs_start(md_id);
  371. if (ret) {
  372. CCCI_MSG_INF(md_id, "fs ", "FS initialize fail\n");
  373. goto _CCCI_FS_START_FAIL;
  374. }
  375. CCCI_FS_MSG(md_id, "Init complete, device major number = %d\n",
  376. MAJOR(ctl_b->fs_dev_num));
  377. return 0;
  378. _CCCI_FS_START_FAIL:
  379. cdev_del(&ctl_b->fs_cdev);
  380. unregister_chrdev_region(ctl_b->fs_dev_num, 1);
  381. _REG_CHR_REGION_FAIL:
  382. kfree(ctl_b);
  383. fs_ctl_block[md_id] = NULL;
  384. return ret;
  385. }
  386. void __exit ccci_fs_exit(int md_id)
  387. {
  388. struct fs_ctl_block_t *ctl_b = fs_ctl_block[md_id];
  389. if (unlikely(ctl_b == NULL)) {
  390. CCCI_MSG_INF(md_id, "fs ", "ccci_fs_exit: fs_ctl_b is NULL\n");
  391. return;
  392. }
  393. ccci_fs_stop(md_id);
  394. cdev_del(&ctl_b->fs_cdev);
  395. unregister_chrdev_region(ctl_b->fs_dev_num, 1);
  396. wake_lock_destroy(&ctl_b->fs_wake_lock);
  397. kfree(ctl_b);
  398. fs_ctl_block[md_id] = NULL;
  399. }