sync_serial.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557
  1. /*
  2. * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
  3. *
  4. * Copyright (c) 2005 Axis Communications AB
  5. *
  6. * Author: Mikael Starvik
  7. *
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/major.h>
  14. #include <linux/sched.h>
  15. #include <linux/mutex.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/poll.h>
  18. #include <linux/init.h>
  19. #include <linux/timer.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/wait.h>
  22. #include <asm/io.h>
  23. #include <dma.h>
  24. #include <pinmux.h>
  25. #include <hwregs/reg_rdwr.h>
  26. #include <hwregs/sser_defs.h>
  27. #include <hwregs/dma_defs.h>
  28. #include <hwregs/dma.h>
  29. #include <hwregs/intr_vect_defs.h>
  30. #include <hwregs/intr_vect.h>
  31. #include <hwregs/reg_map.h>
  32. #include <asm/sync_serial.h>
  33. /* The receiver is a bit tricky because of the continuous stream of data.*/
  34. /* */
  35. /* Three DMA descriptors are linked together. Each DMA descriptor is */
  36. /* responsible for port->bufchunk of a common buffer. */
  37. /* */
  38. /* +---------------------------------------------+ */
  39. /* | +----------+ +----------+ +----------+ | */
  40. /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
  41. /* +----------+ +----------+ +----------+ */
  42. /* | | | */
  43. /* v v v */
  44. /* +-------------------------------------+ */
  45. /* | BUFFER | */
  46. /* +-------------------------------------+ */
  47. /* |<- data_avail ->| */
  48. /* readp writep */
  49. /* */
  50. /* If the application keeps up the pace readp will be right after writep.*/
  51. /* If the application can't keep the pace we have to throw away data. */
  52. /* The idea is that readp should be ready with the data pointed out by */
  53. /* Descr[i] when the DMA has filled in Descr[i+1]. */
  54. /* Otherwise we will discard */
  55. /* the rest of the data pointed out by Descr1 and set readp to the start */
  56. /* of Descr2 */
  57. #define SYNC_SERIAL_MAJOR 125
  58. /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
  59. /* words can be handled */
  60. #define IN_BUFFER_SIZE 12288
  61. #define IN_DESCR_SIZE 256
  62. #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
  63. #define OUT_BUFFER_SIZE 1024*8
  64. #define NBR_OUT_DESCR 8
  65. #define DEFAULT_FRAME_RATE 0
  66. #define DEFAULT_WORD_RATE 7
  67. /* NOTE: Enabling some debug will likely cause overrun or underrun,
  68. * especially if manual mode is use.
  69. */
  70. #define DEBUG(x)
  71. #define DEBUGREAD(x)
  72. #define DEBUGWRITE(x)
  73. #define DEBUGPOLL(x)
  74. #define DEBUGRXINT(x)
  75. #define DEBUGTXINT(x)
  76. #define DEBUGTRDMA(x)
  77. #define DEBUGOUTBUF(x)
  78. typedef struct sync_port
  79. {
  80. reg_scope_instances regi_sser;
  81. reg_scope_instances regi_dmain;
  82. reg_scope_instances regi_dmaout;
  83. char started; /* 1 if port has been started */
  84. char port_nbr; /* Port 0 or 1 */
  85. char busy; /* 1 if port is busy */
  86. char enabled; /* 1 if port is enabled */
  87. char use_dma; /* 1 if port uses dma */
  88. char tr_running;
  89. char init_irqs;
  90. int output;
  91. int input;
  92. /* Next byte to be read by application */
  93. volatile unsigned char *volatile readp;
  94. /* Next byte to be written by etrax */
  95. volatile unsigned char *volatile writep;
  96. unsigned int in_buffer_size;
  97. unsigned int inbufchunk;
  98. unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
  99. unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
  100. unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
  101. struct dma_descr_data* next_rx_desc;
  102. struct dma_descr_data* prev_rx_desc;
  103. /* Pointer to the first available descriptor in the ring,
  104. * unless active_tr_descr == catch_tr_descr and a dma
  105. * transfer is active */
  106. struct dma_descr_data *active_tr_descr;
  107. /* Pointer to the first allocated descriptor in the ring */
  108. struct dma_descr_data *catch_tr_descr;
  109. /* Pointer to the descriptor with the current end-of-list */
  110. struct dma_descr_data *prev_tr_descr;
  111. int full;
  112. /* Pointer to the first byte being read by DMA
  113. * or current position in out_buffer if not using DMA. */
  114. unsigned char *out_rd_ptr;
  115. /* Number of bytes currently locked for being read by DMA */
  116. int out_buf_count;
  117. dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
  118. dma_descr_context in_context __attribute__ ((__aligned__(32)));
  119. dma_descr_data out_descr[NBR_OUT_DESCR]
  120. __attribute__ ((__aligned__(16)));
  121. dma_descr_context out_context __attribute__ ((__aligned__(32)));
  122. wait_queue_head_t out_wait_q;
  123. wait_queue_head_t in_wait_q;
  124. spinlock_t lock;
  125. } sync_port;
  126. static DEFINE_MUTEX(sync_serial_mutex);
  127. static int etrax_sync_serial_init(void);
  128. static void initialize_port(int portnbr);
  129. static inline int sync_data_avail(struct sync_port *port);
  130. static int sync_serial_open(struct inode *, struct file*);
  131. static int sync_serial_release(struct inode*, struct file*);
  132. static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
  133. static int sync_serial_ioctl(struct file *,
  134. unsigned int cmd, unsigned long arg);
  135. static ssize_t sync_serial_write(struct file * file, const char * buf,
  136. size_t count, loff_t *ppos);
  137. static ssize_t sync_serial_read(struct file *file, char *buf,
  138. size_t count, loff_t *ppos);
  139. #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
  140. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
  141. (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
  142. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
  143. #define SYNC_SER_DMA
  144. #endif
  145. static void send_word(sync_port* port);
  146. static void start_dma_out(struct sync_port *port, const char *data, int count);
  147. static void start_dma_in(sync_port* port);
  148. #ifdef SYNC_SER_DMA
  149. static irqreturn_t tr_interrupt(int irq, void *dev_id);
  150. static irqreturn_t rx_interrupt(int irq, void *dev_id);
  151. #endif
  152. #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
  153. !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
  154. (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
  155. !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
  156. #define SYNC_SER_MANUAL
  157. #endif
  158. #ifdef SYNC_SER_MANUAL
  159. static irqreturn_t manual_interrupt(int irq, void *dev_id);
  160. #endif
  161. #ifdef CONFIG_ETRAXFS /* ETRAX FS */
  162. #define OUT_DMA_NBR 4
  163. #define IN_DMA_NBR 5
  164. #define PINMUX_SSER pinmux_sser0
  165. #define SYNCSER_INST regi_sser0
  166. #define SYNCSER_INTR_VECT SSER0_INTR_VECT
  167. #define OUT_DMA_INST regi_dma4
  168. #define IN_DMA_INST regi_dma5
  169. #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
  170. #define DMA_IN_INTR_VECT DMA5_INTR_VECT
  171. #define REQ_DMA_SYNCSER dma_sser0
  172. #else /* Artpec-3 */
  173. #define OUT_DMA_NBR 6
  174. #define IN_DMA_NBR 7
  175. #define PINMUX_SSER pinmux_sser
  176. #define SYNCSER_INST regi_sser
  177. #define SYNCSER_INTR_VECT SSER_INTR_VECT
  178. #define OUT_DMA_INST regi_dma6
  179. #define IN_DMA_INST regi_dma7
  180. #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
  181. #define DMA_IN_INTR_VECT DMA7_INTR_VECT
  182. #define REQ_DMA_SYNCSER dma_sser
  183. #endif
  184. /* The ports */
  185. static struct sync_port ports[]=
  186. {
  187. {
  188. .regi_sser = SYNCSER_INST,
  189. .regi_dmaout = OUT_DMA_INST,
  190. .regi_dmain = IN_DMA_INST,
  191. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
  192. .use_dma = 1,
  193. #else
  194. .use_dma = 0,
  195. #endif
  196. }
  197. #ifdef CONFIG_ETRAXFS
  198. ,
  199. {
  200. .regi_sser = regi_sser1,
  201. .regi_dmaout = regi_dma6,
  202. .regi_dmain = regi_dma7,
  203. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
  204. .use_dma = 1,
  205. #else
  206. .use_dma = 0,
  207. #endif
  208. }
  209. #endif
  210. };
  211. #define NBR_PORTS ARRAY_SIZE(ports)
  212. static const struct file_operations sync_serial_fops = {
  213. .owner = THIS_MODULE,
  214. .write = sync_serial_write,
  215. .read = sync_serial_read,
  216. .poll = sync_serial_poll,
  217. .unlocked_ioctl = sync_serial_ioctl,
  218. .open = sync_serial_open,
  219. .release = sync_serial_release,
  220. .llseek = noop_llseek,
  221. };
  222. static int __init etrax_sync_serial_init(void)
  223. {
  224. ports[0].enabled = 0;
  225. #ifdef CONFIG_ETRAXFS
  226. ports[1].enabled = 0;
  227. #endif
  228. if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
  229. &sync_serial_fops) < 0) {
  230. printk(KERN_WARNING
  231. "Unable to get major for synchronous serial port\n");
  232. return -EBUSY;
  233. }
  234. /* Initialize Ports */
  235. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
  236. if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
  237. printk(KERN_WARNING
  238. "Unable to alloc pins for synchronous serial port 0\n");
  239. return -EIO;
  240. }
  241. ports[0].enabled = 1;
  242. initialize_port(0);
  243. #endif
  244. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
  245. if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
  246. printk(KERN_WARNING
  247. "Unable to alloc pins for synchronous serial port 0\n");
  248. return -EIO;
  249. }
  250. ports[1].enabled = 1;
  251. initialize_port(1);
  252. #endif
  253. #ifdef CONFIG_ETRAXFS
  254. printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
  255. #else
  256. printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
  257. #endif
  258. return 0;
  259. }
  260. static void __init initialize_port(int portnbr)
  261. {
  262. int __attribute__((unused)) i;
  263. struct sync_port *port = &ports[portnbr];
  264. reg_sser_rw_cfg cfg = {0};
  265. reg_sser_rw_frm_cfg frm_cfg = {0};
  266. reg_sser_rw_tr_cfg tr_cfg = {0};
  267. reg_sser_rw_rec_cfg rec_cfg = {0};
  268. DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
  269. port->port_nbr = portnbr;
  270. port->init_irqs = 1;
  271. port->out_rd_ptr = port->out_buffer;
  272. port->out_buf_count = 0;
  273. port->output = 1;
  274. port->input = 0;
  275. port->readp = port->flip;
  276. port->writep = port->flip;
  277. port->in_buffer_size = IN_BUFFER_SIZE;
  278. port->inbufchunk = IN_DESCR_SIZE;
  279. port->next_rx_desc = &port->in_descr[0];
  280. port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
  281. port->prev_rx_desc->eol = 1;
  282. init_waitqueue_head(&port->out_wait_q);
  283. init_waitqueue_head(&port->in_wait_q);
  284. spin_lock_init(&port->lock);
  285. cfg.out_clk_src = regk_sser_intern_clk;
  286. cfg.out_clk_pol = regk_sser_pos;
  287. cfg.clk_od_mode = regk_sser_no;
  288. cfg.clk_dir = regk_sser_out;
  289. cfg.gate_clk = regk_sser_no;
  290. cfg.base_freq = regk_sser_f29_493;
  291. cfg.clk_div = 256;
  292. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  293. frm_cfg.wordrate = DEFAULT_WORD_RATE;
  294. frm_cfg.type = regk_sser_edge;
  295. frm_cfg.frame_pin_dir = regk_sser_out;
  296. frm_cfg.frame_pin_use = regk_sser_frm;
  297. frm_cfg.status_pin_dir = regk_sser_in;
  298. frm_cfg.status_pin_use = regk_sser_hold;
  299. frm_cfg.out_on = regk_sser_tr;
  300. frm_cfg.tr_delay = 1;
  301. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  302. tr_cfg.urun_stop = regk_sser_no;
  303. tr_cfg.sample_size = 7;
  304. tr_cfg.sh_dir = regk_sser_msbfirst;
  305. tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  306. #if 0
  307. tr_cfg.rate_ctrl = regk_sser_bulk;
  308. tr_cfg.data_pin_use = regk_sser_dout;
  309. #else
  310. tr_cfg.rate_ctrl = regk_sser_iso;
  311. tr_cfg.data_pin_use = regk_sser_dout;
  312. #endif
  313. tr_cfg.bulk_wspace = 1;
  314. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  315. rec_cfg.sample_size = 7;
  316. rec_cfg.sh_dir = regk_sser_msbfirst;
  317. rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  318. rec_cfg.fifo_thr = regk_sser_inf;
  319. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  320. #ifdef SYNC_SER_DMA
  321. /* Setup the descriptor ring for dma out/transmit. */
  322. for (i = 0; i < NBR_OUT_DESCR; i++) {
  323. port->out_descr[i].wait = 0;
  324. port->out_descr[i].intr = 1;
  325. port->out_descr[i].eol = 0;
  326. port->out_descr[i].out_eop = 0;
  327. port->out_descr[i].next =
  328. (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
  329. }
  330. /* Create a ring from the list. */
  331. port->out_descr[NBR_OUT_DESCR-1].next =
  332. (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
  333. /* Setup context for traversing the ring. */
  334. port->active_tr_descr = &port->out_descr[0];
  335. port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
  336. port->catch_tr_descr = &port->out_descr[0];
  337. #endif
  338. }
  339. static inline int sync_data_avail(struct sync_port *port)
  340. {
  341. int avail;
  342. unsigned char *start;
  343. unsigned char *end;
  344. start = (unsigned char*)port->readp; /* cast away volatile */
  345. end = (unsigned char*)port->writep; /* cast away volatile */
  346. /* 0123456789 0123456789
  347. * ----- - -----
  348. * ^rp ^wp ^wp ^rp
  349. */
  350. if (end >= start)
  351. avail = end - start;
  352. else
  353. avail = port->in_buffer_size - (start - end);
  354. return avail;
  355. }
  356. static inline int sync_data_avail_to_end(struct sync_port *port)
  357. {
  358. int avail;
  359. unsigned char *start;
  360. unsigned char *end;
  361. start = (unsigned char*)port->readp; /* cast away volatile */
  362. end = (unsigned char*)port->writep; /* cast away volatile */
  363. /* 0123456789 0123456789
  364. * ----- -----
  365. * ^rp ^wp ^wp ^rp
  366. */
  367. if (end >= start)
  368. avail = end - start;
  369. else
  370. avail = port->flip + port->in_buffer_size - start;
  371. return avail;
  372. }
  373. static int sync_serial_open(struct inode *inode, struct file *file)
  374. {
  375. int dev = iminor(inode);
  376. int ret = -EBUSY;
  377. sync_port *port;
  378. reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
  379. reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
  380. mutex_lock(&sync_serial_mutex);
  381. DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
  382. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  383. {
  384. DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
  385. ret = -ENODEV;
  386. goto out;
  387. }
  388. port = &ports[dev];
  389. /* Allow open this device twice (assuming one reader and one writer) */
  390. if (port->busy == 2)
  391. {
  392. DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
  393. goto out;
  394. }
  395. if (port->init_irqs) {
  396. if (port->use_dma) {
  397. if (port == &ports[0]) {
  398. #ifdef SYNC_SER_DMA
  399. if (request_irq(DMA_OUT_INTR_VECT,
  400. tr_interrupt,
  401. 0,
  402. "synchronous serial 0 dma tr",
  403. &ports[0])) {
  404. printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
  405. goto out;
  406. } else if (request_irq(DMA_IN_INTR_VECT,
  407. rx_interrupt,
  408. 0,
  409. "synchronous serial 1 dma rx",
  410. &ports[0])) {
  411. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  412. printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
  413. goto out;
  414. } else if (crisv32_request_dma(OUT_DMA_NBR,
  415. "synchronous serial 0 dma tr",
  416. DMA_VERBOSE_ON_ERROR,
  417. 0,
  418. REQ_DMA_SYNCSER)) {
  419. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  420. free_irq(DMA_IN_INTR_VECT, &port[0]);
  421. printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
  422. goto out;
  423. } else if (crisv32_request_dma(IN_DMA_NBR,
  424. "synchronous serial 0 dma rec",
  425. DMA_VERBOSE_ON_ERROR,
  426. 0,
  427. REQ_DMA_SYNCSER)) {
  428. crisv32_free_dma(OUT_DMA_NBR);
  429. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  430. free_irq(DMA_IN_INTR_VECT, &port[0]);
  431. printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
  432. goto out;
  433. }
  434. #endif
  435. }
  436. #ifdef CONFIG_ETRAXFS
  437. else if (port == &ports[1]) {
  438. #ifdef SYNC_SER_DMA
  439. if (request_irq(DMA6_INTR_VECT,
  440. tr_interrupt,
  441. 0,
  442. "synchronous serial 1 dma tr",
  443. &ports[1])) {
  444. printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
  445. goto out;
  446. } else if (request_irq(DMA7_INTR_VECT,
  447. rx_interrupt,
  448. 0,
  449. "synchronous serial 1 dma rx",
  450. &ports[1])) {
  451. free_irq(DMA6_INTR_VECT, &ports[1]);
  452. printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
  453. goto out;
  454. } else if (crisv32_request_dma(
  455. SYNC_SER1_TX_DMA_NBR,
  456. "synchronous serial 1 dma tr",
  457. DMA_VERBOSE_ON_ERROR,
  458. 0,
  459. dma_sser1)) {
  460. free_irq(DMA6_INTR_VECT, &ports[1]);
  461. free_irq(DMA7_INTR_VECT, &ports[1]);
  462. printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
  463. goto out;
  464. } else if (crisv32_request_dma(
  465. SYNC_SER1_RX_DMA_NBR,
  466. "synchronous serial 3 dma rec",
  467. DMA_VERBOSE_ON_ERROR,
  468. 0,
  469. dma_sser1)) {
  470. crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
  471. free_irq(DMA6_INTR_VECT, &ports[1]);
  472. free_irq(DMA7_INTR_VECT, &ports[1]);
  473. printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
  474. goto out;
  475. }
  476. #endif
  477. }
  478. #endif
  479. /* Enable DMAs */
  480. REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
  481. REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
  482. /* Enable DMA IRQs */
  483. REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
  484. REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
  485. /* Set up wordsize = 1 for DMAs. */
  486. DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
  487. DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
  488. start_dma_in(port);
  489. port->init_irqs = 0;
  490. } else { /* !port->use_dma */
  491. #ifdef SYNC_SER_MANUAL
  492. if (port == &ports[0]) {
  493. if (request_irq(SYNCSER_INTR_VECT,
  494. manual_interrupt,
  495. 0,
  496. "synchronous serial manual irq",
  497. &ports[0])) {
  498. printk("Can't allocate sync serial manual irq");
  499. goto out;
  500. }
  501. }
  502. #ifdef CONFIG_ETRAXFS
  503. else if (port == &ports[1]) {
  504. if (request_irq(SSER1_INTR_VECT,
  505. manual_interrupt,
  506. 0,
  507. "synchronous serial manual irq",
  508. &ports[1])) {
  509. printk(KERN_CRIT "Can't allocate sync serial manual irq");
  510. goto out;
  511. }
  512. }
  513. #endif
  514. port->init_irqs = 0;
  515. #else
  516. panic("sync_serial: Manual mode not supported.\n");
  517. #endif /* SYNC_SER_MANUAL */
  518. }
  519. } /* port->init_irqs */
  520. port->busy++;
  521. ret = 0;
  522. out:
  523. mutex_unlock(&sync_serial_mutex);
  524. return ret;
  525. }
  526. static int sync_serial_release(struct inode *inode, struct file *file)
  527. {
  528. int dev = iminor(inode);
  529. sync_port *port;
  530. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  531. {
  532. DEBUG(printk("Invalid minor %d\n", dev));
  533. return -ENODEV;
  534. }
  535. port = &ports[dev];
  536. if (port->busy)
  537. port->busy--;
  538. if (!port->busy)
  539. /* XXX */ ;
  540. return 0;
  541. }
  542. static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
  543. {
  544. int dev = iminor(file_inode(file));
  545. unsigned int mask = 0;
  546. sync_port *port;
  547. DEBUGPOLL( static unsigned int prev_mask = 0; );
  548. port = &ports[dev];
  549. if (!port->started) {
  550. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  551. reg_sser_rw_rec_cfg rec_cfg =
  552. REG_RD(sser, port->regi_sser, rw_rec_cfg);
  553. cfg.en = regk_sser_yes;
  554. rec_cfg.rec_en = port->input;
  555. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  556. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  557. port->started = 1;
  558. }
  559. poll_wait(file, &port->out_wait_q, wait);
  560. poll_wait(file, &port->in_wait_q, wait);
  561. /* No active transfer, descriptors are available */
  562. if (port->output && !port->tr_running)
  563. mask |= POLLOUT | POLLWRNORM;
  564. /* Descriptor and buffer space available. */
  565. if (port->output &&
  566. port->active_tr_descr != port->catch_tr_descr &&
  567. port->out_buf_count < OUT_BUFFER_SIZE)
  568. mask |= POLLOUT | POLLWRNORM;
  569. /* At least an inbufchunk of data */
  570. if (port->input && sync_data_avail(port) >= port->inbufchunk)
  571. mask |= POLLIN | POLLRDNORM;
  572. DEBUGPOLL(if (mask != prev_mask)
  573. printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
  574. mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
  575. prev_mask = mask;
  576. );
  577. return mask;
  578. }
  579. static int sync_serial_ioctl(struct file *file,
  580. unsigned int cmd, unsigned long arg)
  581. {
  582. int return_val = 0;
  583. int dma_w_size = regk_dma_set_w_size1;
  584. int dev = iminor(file_inode(file));
  585. sync_port *port;
  586. reg_sser_rw_tr_cfg tr_cfg;
  587. reg_sser_rw_rec_cfg rec_cfg;
  588. reg_sser_rw_frm_cfg frm_cfg;
  589. reg_sser_rw_cfg gen_cfg;
  590. reg_sser_rw_intr_mask intr_mask;
  591. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  592. {
  593. DEBUG(printk("Invalid minor %d\n", dev));
  594. return -1;
  595. }
  596. port = &ports[dev];
  597. spin_lock_irq(&port->lock);
  598. tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  599. rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  600. frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
  601. gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  602. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  603. switch(cmd)
  604. {
  605. case SSP_SPEED:
  606. if (GET_SPEED(arg) == CODEC)
  607. {
  608. unsigned int freq;
  609. gen_cfg.base_freq = regk_sser_f32;
  610. /* Clock divider will internally be
  611. * gen_cfg.clk_div + 1.
  612. */
  613. freq = GET_FREQ(arg);
  614. switch (freq) {
  615. case FREQ_32kHz:
  616. case FREQ_64kHz:
  617. case FREQ_128kHz:
  618. case FREQ_256kHz:
  619. gen_cfg.clk_div = 125 *
  620. (1 << (freq - FREQ_256kHz)) - 1;
  621. break;
  622. case FREQ_512kHz:
  623. gen_cfg.clk_div = 62;
  624. break;
  625. case FREQ_1MHz:
  626. case FREQ_2MHz:
  627. case FREQ_4MHz:
  628. gen_cfg.clk_div = 8 * (1 << freq) - 1;
  629. break;
  630. }
  631. } else {
  632. gen_cfg.base_freq = regk_sser_f29_493;
  633. switch (GET_SPEED(arg)) {
  634. case SSP150:
  635. gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
  636. break;
  637. case SSP300:
  638. gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
  639. break;
  640. case SSP600:
  641. gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
  642. break;
  643. case SSP1200:
  644. gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
  645. break;
  646. case SSP2400:
  647. gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
  648. break;
  649. case SSP4800:
  650. gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
  651. break;
  652. case SSP9600:
  653. gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
  654. break;
  655. case SSP19200:
  656. gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
  657. break;
  658. case SSP28800:
  659. gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
  660. break;
  661. case SSP57600:
  662. gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
  663. break;
  664. case SSP115200:
  665. gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
  666. break;
  667. case SSP230400:
  668. gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
  669. break;
  670. case SSP460800:
  671. gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
  672. break;
  673. case SSP921600:
  674. gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
  675. break;
  676. case SSP3125000:
  677. gen_cfg.base_freq = regk_sser_f100;
  678. gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
  679. break;
  680. }
  681. }
  682. frm_cfg.wordrate = GET_WORD_RATE(arg);
  683. break;
  684. case SSP_MODE:
  685. switch(arg)
  686. {
  687. case MASTER_OUTPUT:
  688. port->output = 1;
  689. port->input = 0;
  690. frm_cfg.out_on = regk_sser_tr;
  691. frm_cfg.frame_pin_dir = regk_sser_out;
  692. gen_cfg.clk_dir = regk_sser_out;
  693. break;
  694. case SLAVE_OUTPUT:
  695. port->output = 1;
  696. port->input = 0;
  697. frm_cfg.frame_pin_dir = regk_sser_in;
  698. gen_cfg.clk_dir = regk_sser_in;
  699. break;
  700. case MASTER_INPUT:
  701. port->output = 0;
  702. port->input = 1;
  703. frm_cfg.frame_pin_dir = regk_sser_out;
  704. frm_cfg.out_on = regk_sser_intern_tb;
  705. gen_cfg.clk_dir = regk_sser_out;
  706. break;
  707. case SLAVE_INPUT:
  708. port->output = 0;
  709. port->input = 1;
  710. frm_cfg.frame_pin_dir = regk_sser_in;
  711. gen_cfg.clk_dir = regk_sser_in;
  712. break;
  713. case MASTER_BIDIR:
  714. port->output = 1;
  715. port->input = 1;
  716. frm_cfg.frame_pin_dir = regk_sser_out;
  717. frm_cfg.out_on = regk_sser_intern_tb;
  718. gen_cfg.clk_dir = regk_sser_out;
  719. break;
  720. case SLAVE_BIDIR:
  721. port->output = 1;
  722. port->input = 1;
  723. frm_cfg.frame_pin_dir = regk_sser_in;
  724. gen_cfg.clk_dir = regk_sser_in;
  725. break;
  726. default:
  727. spin_unlock_irq(&port->lock);
  728. return -EINVAL;
  729. }
  730. if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
  731. intr_mask.rdav = regk_sser_yes;
  732. break;
  733. case SSP_FRAME_SYNC:
  734. if (arg & NORMAL_SYNC) {
  735. frm_cfg.rec_delay = 1;
  736. frm_cfg.tr_delay = 1;
  737. }
  738. else if (arg & EARLY_SYNC)
  739. frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
  740. else if (arg & SECOND_WORD_SYNC) {
  741. frm_cfg.rec_delay = 7;
  742. frm_cfg.tr_delay = 1;
  743. }
  744. tr_cfg.bulk_wspace = frm_cfg.tr_delay;
  745. frm_cfg.early_wend = regk_sser_yes;
  746. if (arg & BIT_SYNC)
  747. frm_cfg.type = regk_sser_edge;
  748. else if (arg & WORD_SYNC)
  749. frm_cfg.type = regk_sser_level;
  750. else if (arg & EXTENDED_SYNC)
  751. frm_cfg.early_wend = regk_sser_no;
  752. if (arg & SYNC_ON)
  753. frm_cfg.frame_pin_use = regk_sser_frm;
  754. else if (arg & SYNC_OFF)
  755. frm_cfg.frame_pin_use = regk_sser_gio0;
  756. dma_w_size = regk_dma_set_w_size2;
  757. if (arg & WORD_SIZE_8) {
  758. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  759. dma_w_size = regk_dma_set_w_size1;
  760. } else if (arg & WORD_SIZE_12)
  761. rec_cfg.sample_size = tr_cfg.sample_size = 11;
  762. else if (arg & WORD_SIZE_16)
  763. rec_cfg.sample_size = tr_cfg.sample_size = 15;
  764. else if (arg & WORD_SIZE_24)
  765. rec_cfg.sample_size = tr_cfg.sample_size = 23;
  766. else if (arg & WORD_SIZE_32)
  767. rec_cfg.sample_size = tr_cfg.sample_size = 31;
  768. if (arg & BIT_ORDER_MSB)
  769. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  770. else if (arg & BIT_ORDER_LSB)
  771. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
  772. if (arg & FLOW_CONTROL_ENABLE) {
  773. frm_cfg.status_pin_use = regk_sser_frm;
  774. rec_cfg.fifo_thr = regk_sser_thr16;
  775. } else if (arg & FLOW_CONTROL_DISABLE) {
  776. frm_cfg.status_pin_use = regk_sser_gio0;
  777. rec_cfg.fifo_thr = regk_sser_inf;
  778. }
  779. if (arg & CLOCK_NOT_GATED)
  780. gen_cfg.gate_clk = regk_sser_no;
  781. else if (arg & CLOCK_GATED)
  782. gen_cfg.gate_clk = regk_sser_yes;
  783. break;
  784. case SSP_IPOLARITY:
  785. /* NOTE!! negedge is considered NORMAL */
  786. if (arg & CLOCK_NORMAL)
  787. rec_cfg.clk_pol = regk_sser_neg;
  788. else if (arg & CLOCK_INVERT)
  789. rec_cfg.clk_pol = regk_sser_pos;
  790. if (arg & FRAME_NORMAL)
  791. frm_cfg.level = regk_sser_pos_hi;
  792. else if (arg & FRAME_INVERT)
  793. frm_cfg.level = regk_sser_neg_lo;
  794. if (arg & STATUS_NORMAL)
  795. gen_cfg.hold_pol = regk_sser_pos;
  796. else if (arg & STATUS_INVERT)
  797. gen_cfg.hold_pol = regk_sser_neg;
  798. break;
  799. case SSP_OPOLARITY:
  800. if (arg & CLOCK_NORMAL)
  801. gen_cfg.out_clk_pol = regk_sser_pos;
  802. else if (arg & CLOCK_INVERT)
  803. gen_cfg.out_clk_pol = regk_sser_neg;
  804. if (arg & FRAME_NORMAL)
  805. frm_cfg.level = regk_sser_pos_hi;
  806. else if (arg & FRAME_INVERT)
  807. frm_cfg.level = regk_sser_neg_lo;
  808. if (arg & STATUS_NORMAL)
  809. gen_cfg.hold_pol = regk_sser_pos;
  810. else if (arg & STATUS_INVERT)
  811. gen_cfg.hold_pol = regk_sser_neg;
  812. break;
  813. case SSP_SPI:
  814. rec_cfg.fifo_thr = regk_sser_inf;
  815. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  816. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  817. frm_cfg.frame_pin_use = regk_sser_frm;
  818. frm_cfg.type = regk_sser_level;
  819. frm_cfg.tr_delay = 1;
  820. frm_cfg.level = regk_sser_neg_lo;
  821. if (arg & SPI_SLAVE)
  822. {
  823. rec_cfg.clk_pol = regk_sser_neg;
  824. gen_cfg.clk_dir = regk_sser_in;
  825. port->input = 1;
  826. port->output = 0;
  827. }
  828. else
  829. {
  830. gen_cfg.out_clk_pol = regk_sser_pos;
  831. port->input = 0;
  832. port->output = 1;
  833. gen_cfg.clk_dir = regk_sser_out;
  834. }
  835. break;
  836. case SSP_INBUFCHUNK:
  837. break;
  838. default:
  839. return_val = -1;
  840. }
  841. if (port->started) {
  842. rec_cfg.rec_en = port->input;
  843. gen_cfg.en = (port->output | port->input);
  844. }
  845. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  846. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  847. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  848. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  849. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  850. if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
  851. WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
  852. int en = gen_cfg.en;
  853. gen_cfg.en = 0;
  854. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  855. /* ##### Should DMA be stoped before we change dma size? */
  856. DMA_WR_CMD(port->regi_dmain, dma_w_size);
  857. DMA_WR_CMD(port->regi_dmaout, dma_w_size);
  858. gen_cfg.en = en;
  859. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  860. }
  861. spin_unlock_irq(&port->lock);
  862. return return_val;
  863. }
  864. static long sync_serial_ioctl(struct file *file,
  865. unsigned int cmd, unsigned long arg)
  866. {
  867. long ret;
  868. mutex_lock(&sync_serial_mutex);
  869. ret = sync_serial_ioctl_unlocked(file, cmd, arg);
  870. mutex_unlock(&sync_serial_mutex);
  871. return ret;
  872. }
  873. /* NOTE: sync_serial_write does not support concurrency */
  874. static ssize_t sync_serial_write(struct file *file, const char *buf,
  875. size_t count, loff_t *ppos)
  876. {
  877. int dev = iminor(file_inode(file));
  878. DECLARE_WAITQUEUE(wait, current);
  879. struct sync_port *port;
  880. int trunc_count;
  881. unsigned long flags;
  882. int bytes_free;
  883. int out_buf_count;
  884. unsigned char *rd_ptr; /* First allocated byte in the buffer */
  885. unsigned char *wr_ptr; /* First free byte in the buffer */
  886. unsigned char *buf_stop_ptr; /* Last byte + 1 */
  887. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
  888. DEBUG(printk("Invalid minor %d\n", dev));
  889. return -ENODEV;
  890. }
  891. port = &ports[dev];
  892. /* |<- OUT_BUFFER_SIZE ->|
  893. * |<- out_buf_count ->|
  894. * |<- trunc_count ->| ...->|
  895. * ______________________________________________________
  896. * | free | data | free |
  897. * |_________|___________________|________________________|
  898. * ^ rd_ptr ^ wr_ptr
  899. */
  900. DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
  901. port->port_nbr, count, port->active_tr_descr,
  902. port->catch_tr_descr));
  903. /* Read variables that may be updated by interrupts */
  904. spin_lock_irqsave(&port->lock, flags);
  905. rd_ptr = port->out_rd_ptr;
  906. out_buf_count = port->out_buf_count;
  907. spin_unlock_irqrestore(&port->lock, flags);
  908. /* Check if resources are available */
  909. if (port->tr_running &&
  910. ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
  911. out_buf_count >= OUT_BUFFER_SIZE)) {
  912. DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
  913. return -EAGAIN;
  914. }
  915. buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
  916. /* Determine pointer to the first free byte, before copying. */
  917. wr_ptr = rd_ptr + out_buf_count;
  918. if (wr_ptr >= buf_stop_ptr)
  919. wr_ptr -= OUT_BUFFER_SIZE;
  920. /* If we wrap the ring buffer, let the user space program handle it by
  921. * truncating the data. This could be more elegant, small buffer
  922. * fragments may occur.
  923. */
  924. bytes_free = OUT_BUFFER_SIZE - out_buf_count;
  925. if (wr_ptr + bytes_free > buf_stop_ptr)
  926. bytes_free = buf_stop_ptr - wr_ptr;
  927. trunc_count = (count < bytes_free) ? count : bytes_free;
  928. if (copy_from_user(wr_ptr, buf, trunc_count))
  929. return -EFAULT;
  930. DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
  931. out_buf_count, trunc_count,
  932. port->out_buf_count, port->out_buffer,
  933. wr_ptr, buf_stop_ptr));
  934. /* Make sure transmitter/receiver is running */
  935. if (!port->started) {
  936. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  937. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  938. cfg.en = regk_sser_yes;
  939. rec_cfg.rec_en = port->input;
  940. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  941. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  942. port->started = 1;
  943. }
  944. /* Setup wait if blocking */
  945. if (!(file->f_flags & O_NONBLOCK)) {
  946. add_wait_queue(&port->out_wait_q, &wait);
  947. set_current_state(TASK_INTERRUPTIBLE);
  948. }
  949. spin_lock_irqsave(&port->lock, flags);
  950. port->out_buf_count += trunc_count;
  951. if (port->use_dma) {
  952. start_dma_out(port, wr_ptr, trunc_count);
  953. } else if (!port->tr_running) {
  954. reg_sser_rw_intr_mask intr_mask;
  955. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  956. /* Start sender by writing data */
  957. send_word(port);
  958. /* and enable transmitter ready IRQ */
  959. intr_mask.trdy = 1;
  960. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  961. }
  962. spin_unlock_irqrestore(&port->lock, flags);
  963. /* Exit if non blocking */
  964. if (file->f_flags & O_NONBLOCK) {
  965. DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
  966. port->port_nbr, trunc_count,
  967. REG_RD_INT(dma, port->regi_dmaout, r_intr)));
  968. return trunc_count;
  969. }
  970. schedule();
  971. remove_wait_queue(&port->out_wait_q, &wait);
  972. if (signal_pending(current))
  973. return -EINTR;
  974. DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
  975. port->port_nbr, trunc_count));
  976. return trunc_count;
  977. }
  978. static ssize_t sync_serial_read(struct file * file, char * buf,
  979. size_t count, loff_t *ppos)
  980. {
  981. int dev = iminor(file_inode(file));
  982. int avail;
  983. sync_port *port;
  984. unsigned char* start;
  985. unsigned char* end;
  986. unsigned long flags;
  987. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  988. {
  989. DEBUG(printk("Invalid minor %d\n", dev));
  990. return -ENODEV;
  991. }
  992. port = &ports[dev];
  993. DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
  994. if (!port->started)
  995. {
  996. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  997. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  998. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  999. cfg.en = regk_sser_yes;
  1000. tr_cfg.tr_en = regk_sser_yes;
  1001. rec_cfg.rec_en = regk_sser_yes;
  1002. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  1003. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1004. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  1005. port->started = 1;
  1006. }
  1007. /* Calculate number of available bytes */
  1008. /* Save pointers to avoid that they are modified by interrupt */
  1009. spin_lock_irqsave(&port->lock, flags);
  1010. start = (unsigned char*)port->readp; /* cast away volatile */
  1011. end = (unsigned char*)port->writep; /* cast away volatile */
  1012. spin_unlock_irqrestore(&port->lock, flags);
  1013. while ((start == end) && !port->full) /* No data */
  1014. {
  1015. DEBUGREAD(printk(KERN_DEBUG "&"));
  1016. if (file->f_flags & O_NONBLOCK)
  1017. return -EAGAIN;
  1018. wait_event_interruptible(port->in_wait_q,
  1019. !(start == end && !port->full));
  1020. if (signal_pending(current))
  1021. return -EINTR;
  1022. spin_lock_irqsave(&port->lock, flags);
  1023. start = (unsigned char*)port->readp; /* cast away volatile */
  1024. end = (unsigned char*)port->writep; /* cast away volatile */
  1025. spin_unlock_irqrestore(&port->lock, flags);
  1026. }
  1027. /* Lazy read, never return wrapped data. */
  1028. if (port->full)
  1029. avail = port->in_buffer_size;
  1030. else if (end > start)
  1031. avail = end - start;
  1032. else
  1033. avail = port->flip + port->in_buffer_size - start;
  1034. count = count > avail ? avail : count;
  1035. if (copy_to_user(buf, start, count))
  1036. return -EFAULT;
  1037. /* Disable interrupts while updating readp */
  1038. spin_lock_irqsave(&port->lock, flags);
  1039. port->readp += count;
  1040. if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
  1041. port->readp = port->flip;
  1042. port->full = 0;
  1043. spin_unlock_irqrestore(&port->lock, flags);
  1044. DEBUGREAD(printk("r %d\n", count));
  1045. return count;
  1046. }
  1047. static void send_word(sync_port* port)
  1048. {
  1049. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1050. reg_sser_rw_tr_data tr_data = {0};
  1051. switch(tr_cfg.sample_size)
  1052. {
  1053. case 8:
  1054. port->out_buf_count--;
  1055. tr_data.data = *port->out_rd_ptr++;
  1056. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1057. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1058. port->out_rd_ptr = port->out_buffer;
  1059. break;
  1060. case 12:
  1061. {
  1062. int data = (*port->out_rd_ptr++) << 8;
  1063. data |= *port->out_rd_ptr++;
  1064. port->out_buf_count -= 2;
  1065. tr_data.data = data;
  1066. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1067. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1068. port->out_rd_ptr = port->out_buffer;
  1069. }
  1070. break;
  1071. case 16:
  1072. port->out_buf_count -= 2;
  1073. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1074. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1075. port->out_rd_ptr += 2;
  1076. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1077. port->out_rd_ptr = port->out_buffer;
  1078. break;
  1079. case 24:
  1080. port->out_buf_count -= 3;
  1081. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1082. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1083. port->out_rd_ptr += 2;
  1084. tr_data.data = *port->out_rd_ptr++;
  1085. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1086. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1087. port->out_rd_ptr = port->out_buffer;
  1088. break;
  1089. case 32:
  1090. port->out_buf_count -= 4;
  1091. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1092. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1093. port->out_rd_ptr += 2;
  1094. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1095. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1096. port->out_rd_ptr += 2;
  1097. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1098. port->out_rd_ptr = port->out_buffer;
  1099. break;
  1100. }
  1101. }
  1102. static void start_dma_out(struct sync_port *port,
  1103. const char *data, int count)
  1104. {
  1105. port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
  1106. port->active_tr_descr->after = port->active_tr_descr->buf + count;
  1107. port->active_tr_descr->intr = 1;
  1108. port->active_tr_descr->eol = 1;
  1109. port->prev_tr_descr->eol = 0;
  1110. DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
  1111. port->prev_tr_descr, port->active_tr_descr));
  1112. port->prev_tr_descr = port->active_tr_descr;
  1113. port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
  1114. if (!port->tr_running) {
  1115. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
  1116. rw_tr_cfg);
  1117. port->out_context.next = 0;
  1118. port->out_context.saved_data =
  1119. (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
  1120. port->out_context.saved_data_buf = port->prev_tr_descr->buf;
  1121. DMA_START_CONTEXT(port->regi_dmaout,
  1122. virt_to_phys((char *)&port->out_context));
  1123. tr_cfg.tr_en = regk_sser_yes;
  1124. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1125. DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
  1126. } else {
  1127. DMA_CONTINUE_DATA(port->regi_dmaout);
  1128. DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
  1129. }
  1130. port->tr_running = 1;
  1131. }
  1132. static void start_dma_in(sync_port *port)
  1133. {
  1134. int i;
  1135. char *buf;
  1136. port->writep = port->flip;
  1137. if (port->writep > port->flip + port->in_buffer_size) {
  1138. panic("Offset too large in sync serial driver\n");
  1139. return;
  1140. }
  1141. buf = (char*)virt_to_phys(port->in_buffer);
  1142. for (i = 0; i < NBR_IN_DESCR; i++) {
  1143. port->in_descr[i].buf = buf;
  1144. port->in_descr[i].after = buf + port->inbufchunk;
  1145. port->in_descr[i].intr = 1;
  1146. port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
  1147. port->in_descr[i].buf = buf;
  1148. buf += port->inbufchunk;
  1149. }
  1150. /* Link the last descriptor to the first */
  1151. port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
  1152. port->in_descr[i-1].eol = regk_sser_yes;
  1153. port->next_rx_desc = &port->in_descr[0];
  1154. port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
  1155. port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
  1156. port->in_context.saved_data_buf = port->in_descr[0].buf;
  1157. DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
  1158. }
  1159. #ifdef SYNC_SER_DMA
  1160. static irqreturn_t tr_interrupt(int irq, void *dev_id)
  1161. {
  1162. reg_dma_r_masked_intr masked;
  1163. reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
  1164. reg_dma_rw_stat stat;
  1165. int i;
  1166. int found = 0;
  1167. int stop_sser = 0;
  1168. for (i = 0; i < NBR_PORTS; i++) {
  1169. sync_port *port = &ports[i];
  1170. if (!port->enabled || !port->use_dma)
  1171. continue;
  1172. /* IRQ active for the port? */
  1173. masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
  1174. if (!masked.data)
  1175. continue;
  1176. found = 1;
  1177. /* Check if we should stop the DMA transfer */
  1178. stat = REG_RD(dma, port->regi_dmaout, rw_stat);
  1179. if (stat.list_state == regk_dma_data_at_eol)
  1180. stop_sser = 1;
  1181. /* Clear IRQ */
  1182. REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
  1183. if (!stop_sser) {
  1184. /* The DMA has completed a descriptor, EOL was not
  1185. * encountered, so step relevant descriptor and
  1186. * datapointers forward. */
  1187. int sent;
  1188. sent = port->catch_tr_descr->after -
  1189. port->catch_tr_descr->buf;
  1190. DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
  1191. "in descr %p (ac: %p)\n",
  1192. port->out_buf_count, sent,
  1193. port->out_buf_count - sent,
  1194. port->catch_tr_descr,
  1195. port->active_tr_descr););
  1196. port->out_buf_count -= sent;
  1197. port->catch_tr_descr =
  1198. phys_to_virt((int) port->catch_tr_descr->next);
  1199. port->out_rd_ptr =
  1200. phys_to_virt((int) port->catch_tr_descr->buf);
  1201. } else {
  1202. int i, sent;
  1203. /* EOL handler.
  1204. * Note that if an EOL was encountered during the irq
  1205. * locked section of sync_ser_write the DMA will be
  1206. * restarted and the eol flag will be cleared.
  1207. * The remaining descriptors will be traversed by
  1208. * the descriptor interrupts as usual.
  1209. */
  1210. i = 0;
  1211. while (!port->catch_tr_descr->eol) {
  1212. sent = port->catch_tr_descr->after -
  1213. port->catch_tr_descr->buf;
  1214. DEBUGOUTBUF(printk(KERN_DEBUG
  1215. "traversing descr %p -%d (%d)\n",
  1216. port->catch_tr_descr,
  1217. sent,
  1218. port->out_buf_count));
  1219. port->out_buf_count -= sent;
  1220. port->catch_tr_descr = phys_to_virt(
  1221. (int)port->catch_tr_descr->next);
  1222. i++;
  1223. if (i >= NBR_OUT_DESCR) {
  1224. /* TODO: Reset and recover */
  1225. panic("sync_serial: missing eol");
  1226. }
  1227. }
  1228. sent = port->catch_tr_descr->after -
  1229. port->catch_tr_descr->buf;
  1230. DEBUGOUTBUF(printk(KERN_DEBUG
  1231. "eol at descr %p -%d (%d)\n",
  1232. port->catch_tr_descr,
  1233. sent,
  1234. port->out_buf_count));
  1235. port->out_buf_count -= sent;
  1236. /* Update read pointer to first free byte, we
  1237. * may already be writing data there. */
  1238. port->out_rd_ptr =
  1239. phys_to_virt((int) port->catch_tr_descr->after);
  1240. if (port->out_rd_ptr > port->out_buffer +
  1241. OUT_BUFFER_SIZE)
  1242. port->out_rd_ptr = port->out_buffer;
  1243. reg_sser_rw_tr_cfg tr_cfg =
  1244. REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1245. DEBUGTXINT(printk(KERN_DEBUG
  1246. "tr_int DMA stop %d, set catch @ %p\n",
  1247. port->out_buf_count,
  1248. port->active_tr_descr));
  1249. if (port->out_buf_count != 0)
  1250. printk(KERN_CRIT "sync_ser: buffer not "
  1251. "empty after eol.\n");
  1252. port->catch_tr_descr = port->active_tr_descr;
  1253. port->tr_running = 0;
  1254. tr_cfg.tr_en = regk_sser_no;
  1255. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1256. }
  1257. /* wake up the waiting process */
  1258. wake_up_interruptible(&port->out_wait_q);
  1259. }
  1260. return IRQ_RETVAL(found);
  1261. } /* tr_interrupt */
  1262. static irqreturn_t rx_interrupt(int irq, void *dev_id)
  1263. {
  1264. reg_dma_r_masked_intr masked;
  1265. reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
  1266. int i;
  1267. int found = 0;
  1268. for (i = 0; i < NBR_PORTS; i++)
  1269. {
  1270. sync_port *port = &ports[i];
  1271. if (!port->enabled || !port->use_dma )
  1272. continue;
  1273. masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
  1274. if (masked.data) /* Descriptor interrupt */
  1275. {
  1276. found = 1;
  1277. while (REG_RD(dma, port->regi_dmain, rw_data) !=
  1278. virt_to_phys(port->next_rx_desc)) {
  1279. DEBUGRXINT(printk(KERN_DEBUG "!"));
  1280. if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
  1281. int first_size = port->flip + port->in_buffer_size - port->writep;
  1282. memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
  1283. memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
  1284. port->writep = port->flip + port->inbufchunk - first_size;
  1285. } else {
  1286. memcpy((char*)port->writep,
  1287. phys_to_virt((unsigned)port->next_rx_desc->buf),
  1288. port->inbufchunk);
  1289. port->writep += port->inbufchunk;
  1290. if (port->writep >= port->flip + port->in_buffer_size)
  1291. port->writep = port->flip;
  1292. }
  1293. if (port->writep == port->readp)
  1294. {
  1295. port->full = 1;
  1296. }
  1297. port->next_rx_desc->eol = 1;
  1298. port->prev_rx_desc->eol = 0;
  1299. /* Cache bug workaround */
  1300. flush_dma_descr(port->prev_rx_desc, 0);
  1301. port->prev_rx_desc = port->next_rx_desc;
  1302. port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
  1303. /* Cache bug workaround */
  1304. flush_dma_descr(port->prev_rx_desc, 1);
  1305. /* wake up the waiting process */
  1306. wake_up_interruptible(&port->in_wait_q);
  1307. DMA_CONTINUE(port->regi_dmain);
  1308. REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
  1309. }
  1310. }
  1311. }
  1312. return IRQ_RETVAL(found);
  1313. } /* rx_interrupt */
  1314. #endif /* SYNC_SER_DMA */
  1315. #ifdef SYNC_SER_MANUAL
  1316. static irqreturn_t manual_interrupt(int irq, void *dev_id)
  1317. {
  1318. int i;
  1319. int found = 0;
  1320. reg_sser_r_masked_intr masked;
  1321. for (i = 0; i < NBR_PORTS; i++)
  1322. {
  1323. sync_port *port = &ports[i];
  1324. if (!port->enabled || port->use_dma)
  1325. {
  1326. continue;
  1327. }
  1328. masked = REG_RD(sser, port->regi_sser, r_masked_intr);
  1329. if (masked.rdav) /* Data received? */
  1330. {
  1331. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  1332. reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
  1333. found = 1;
  1334. /* Read data */
  1335. switch(rec_cfg.sample_size)
  1336. {
  1337. case 8:
  1338. *port->writep++ = data.data & 0xff;
  1339. break;
  1340. case 12:
  1341. *port->writep = (data.data & 0x0ff0) >> 4;
  1342. *(port->writep + 1) = data.data & 0x0f;
  1343. port->writep+=2;
  1344. break;
  1345. case 16:
  1346. *(unsigned short*)port->writep = data.data;
  1347. port->writep+=2;
  1348. break;
  1349. case 24:
  1350. *(unsigned int*)port->writep = data.data;
  1351. port->writep+=3;
  1352. break;
  1353. case 32:
  1354. *(unsigned int*)port->writep = data.data;
  1355. port->writep+=4;
  1356. break;
  1357. }
  1358. if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
  1359. port->writep = port->flip;
  1360. if (port->writep == port->readp) {
  1361. /* receive buffer overrun, discard oldest data
  1362. */
  1363. port->readp++;
  1364. if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
  1365. port->readp = port->flip;
  1366. }
  1367. if (sync_data_avail(port) >= port->inbufchunk)
  1368. wake_up_interruptible(&port->in_wait_q); /* Wake up application */
  1369. }
  1370. if (masked.trdy) /* Transmitter ready? */
  1371. {
  1372. found = 1;
  1373. if (port->out_buf_count > 0) /* More data to send */
  1374. send_word(port);
  1375. else /* transmission finished */
  1376. {
  1377. reg_sser_rw_intr_mask intr_mask;
  1378. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  1379. intr_mask.trdy = 0;
  1380. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  1381. wake_up_interruptible(&port->out_wait_q); /* Wake up application */
  1382. }
  1383. }
  1384. }
  1385. return IRQ_RETVAL(found);
  1386. }
  1387. #endif
  1388. module_init(etrax_sync_serial_init);