i2c-at91.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /*
  2. * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
  3. *
  4. * Copyright (C) 2011 Weinmann Medical GmbH
  5. * Author: Nikolaus Voss <n.voss@weinmann.de>
  6. *
  7. * Evolved from original work by:
  8. * Copyright (C) 2004 Rick Bronson
  9. * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
  10. *
  11. * Borrowed heavily from original work by:
  12. * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. */
  19. #include <linux/clk.h>
  20. #include <linux/completion.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/err.h>
  24. #include <linux/i2c.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/of.h>
  29. #include <linux/of_device.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/slab.h>
  32. #include <linux/platform_data/dma-atmel.h>
  33. #define DEFAULT_TWI_CLK_HZ 100000 /* max 400 Kbits/s */
  34. #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
  35. #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
  36. /* AT91 TWI register definitions */
  37. #define AT91_TWI_CR 0x0000 /* Control Register */
  38. #define AT91_TWI_START 0x0001 /* Send a Start Condition */
  39. #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
  40. #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
  41. #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
  42. #define AT91_TWI_QUICK 0x0040 /* SMBus quick command */
  43. #define AT91_TWI_SWRST 0x0080 /* Software Reset */
  44. #define AT91_TWI_MMR 0x0004 /* Master Mode Register */
  45. #define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
  46. #define AT91_TWI_MREAD 0x1000 /* Master Read Direction */
  47. #define AT91_TWI_IADR 0x000c /* Internal Address Register */
  48. #define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
  49. #define AT91_TWI_SR 0x0020 /* Status Register */
  50. #define AT91_TWI_TXCOMP 0x0001 /* Transmission Complete */
  51. #define AT91_TWI_RXRDY 0x0002 /* Receive Holding Register Ready */
  52. #define AT91_TWI_TXRDY 0x0004 /* Transmit Holding Register Ready */
  53. #define AT91_TWI_OVRE 0x0040 /* Overrun Error */
  54. #define AT91_TWI_UNRE 0x0080 /* Underrun Error */
  55. #define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
  56. #define AT91_TWI_INT_MASK \
  57. (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
  58. #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
  59. #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
  60. #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
  61. #define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
  62. #define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
  63. struct at91_twi_pdata {
  64. unsigned clk_max_div;
  65. unsigned clk_offset;
  66. bool has_unre_flag;
  67. bool has_dma_support;
  68. struct at_dma_slave dma_slave;
  69. };
  70. struct at91_twi_dma {
  71. struct dma_chan *chan_rx;
  72. struct dma_chan *chan_tx;
  73. struct scatterlist sg;
  74. struct dma_async_tx_descriptor *data_desc;
  75. enum dma_data_direction direction;
  76. bool buf_mapped;
  77. bool xfer_in_progress;
  78. };
  79. struct at91_twi_dev {
  80. struct device *dev;
  81. void __iomem *base;
  82. struct completion cmd_complete;
  83. struct clk *clk;
  84. u8 *buf;
  85. size_t buf_len;
  86. struct i2c_msg *msg;
  87. int irq;
  88. unsigned imr;
  89. unsigned transfer_status;
  90. struct i2c_adapter adapter;
  91. unsigned twi_cwgr_reg;
  92. struct at91_twi_pdata *pdata;
  93. bool use_dma;
  94. bool recv_len_abort;
  95. struct at91_twi_dma dma;
  96. };
  97. static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
  98. {
  99. return readl_relaxed(dev->base + reg);
  100. }
  101. static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
  102. {
  103. writel_relaxed(val, dev->base + reg);
  104. }
  105. static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
  106. {
  107. at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
  108. }
  109. static void at91_twi_irq_save(struct at91_twi_dev *dev)
  110. {
  111. dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
  112. at91_disable_twi_interrupts(dev);
  113. }
  114. static void at91_twi_irq_restore(struct at91_twi_dev *dev)
  115. {
  116. at91_twi_write(dev, AT91_TWI_IER, dev->imr);
  117. }
  118. static void at91_init_twi_bus(struct at91_twi_dev *dev)
  119. {
  120. at91_disable_twi_interrupts(dev);
  121. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
  122. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
  123. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
  124. at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
  125. }
  126. /*
  127. * Calculate symmetric clock as stated in datasheet:
  128. * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
  129. */
  130. static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
  131. {
  132. int ckdiv, cdiv, div;
  133. struct at91_twi_pdata *pdata = dev->pdata;
  134. int offset = pdata->clk_offset;
  135. int max_ckdiv = pdata->clk_max_div;
  136. div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
  137. 2 * twi_clk) - offset);
  138. ckdiv = fls(div >> 8);
  139. cdiv = div >> ckdiv;
  140. if (ckdiv > max_ckdiv) {
  141. dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
  142. ckdiv, max_ckdiv);
  143. ckdiv = max_ckdiv;
  144. cdiv = 255;
  145. }
  146. dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv;
  147. dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
  148. }
  149. static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
  150. {
  151. struct at91_twi_dma *dma = &dev->dma;
  152. at91_twi_irq_save(dev);
  153. if (dma->xfer_in_progress) {
  154. if (dma->direction == DMA_FROM_DEVICE)
  155. dmaengine_terminate_all(dma->chan_rx);
  156. else
  157. dmaengine_terminate_all(dma->chan_tx);
  158. dma->xfer_in_progress = false;
  159. }
  160. if (dma->buf_mapped) {
  161. dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
  162. dev->buf_len, dma->direction);
  163. dma->buf_mapped = false;
  164. }
  165. at91_twi_irq_restore(dev);
  166. }
  167. static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
  168. {
  169. if (dev->buf_len <= 0)
  170. return;
  171. at91_twi_write(dev, AT91_TWI_THR, *dev->buf);
  172. /* send stop when last byte has been written */
  173. if (--dev->buf_len == 0)
  174. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  175. dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  176. ++dev->buf;
  177. }
  178. static void at91_twi_write_data_dma_callback(void *data)
  179. {
  180. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  181. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
  182. dev->buf_len, DMA_TO_DEVICE);
  183. /*
  184. * When this callback is called, THR/TX FIFO is likely not to be empty
  185. * yet. So we have to wait for TXCOMP or NACK bits to be set into the
  186. * Status Register to be sure that the STOP bit has been sent and the
  187. * transfer is completed. The NACK interrupt has already been enabled,
  188. * we just have to enable TXCOMP one.
  189. */
  190. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  191. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  192. }
  193. static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
  194. {
  195. dma_addr_t dma_addr;
  196. struct dma_async_tx_descriptor *txdesc;
  197. struct at91_twi_dma *dma = &dev->dma;
  198. struct dma_chan *chan_tx = dma->chan_tx;
  199. if (dev->buf_len <= 0)
  200. return;
  201. dma->direction = DMA_TO_DEVICE;
  202. at91_twi_irq_save(dev);
  203. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
  204. DMA_TO_DEVICE);
  205. if (dma_mapping_error(dev->dev, dma_addr)) {
  206. dev_err(dev->dev, "dma map failed\n");
  207. return;
  208. }
  209. dma->buf_mapped = true;
  210. at91_twi_irq_restore(dev);
  211. sg_dma_len(&dma->sg) = dev->buf_len;
  212. sg_dma_address(&dma->sg) = dma_addr;
  213. txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
  214. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  215. if (!txdesc) {
  216. dev_err(dev->dev, "dma prep slave sg failed\n");
  217. goto error;
  218. }
  219. txdesc->callback = at91_twi_write_data_dma_callback;
  220. txdesc->callback_param = dev;
  221. dma->xfer_in_progress = true;
  222. dmaengine_submit(txdesc);
  223. dma_async_issue_pending(chan_tx);
  224. return;
  225. error:
  226. at91_twi_dma_cleanup(dev);
  227. }
  228. static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
  229. {
  230. if (dev->buf_len <= 0)
  231. return;
  232. *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
  233. --dev->buf_len;
  234. /* return if aborting, we only needed to read RHR to clear RXRDY*/
  235. if (dev->recv_len_abort)
  236. return;
  237. /* handle I2C_SMBUS_BLOCK_DATA */
  238. if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
  239. /* ensure length byte is a valid value */
  240. if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
  241. dev->msg->flags &= ~I2C_M_RECV_LEN;
  242. dev->buf_len += *dev->buf;
  243. dev->msg->len = dev->buf_len + 1;
  244. dev_dbg(dev->dev, "received block length %d\n",
  245. dev->buf_len);
  246. } else {
  247. /* abort and send the stop by reading one more byte */
  248. dev->recv_len_abort = true;
  249. dev->buf_len = 1;
  250. }
  251. }
  252. /* send stop if second but last byte has been read */
  253. if (dev->buf_len == 1)
  254. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  255. dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  256. ++dev->buf;
  257. }
  258. static void at91_twi_read_data_dma_callback(void *data)
  259. {
  260. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  261. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
  262. dev->buf_len, DMA_FROM_DEVICE);
  263. /* The last two bytes have to be read without using dma */
  264. dev->buf += dev->buf_len - 2;
  265. dev->buf_len = 2;
  266. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP);
  267. }
  268. static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
  269. {
  270. dma_addr_t dma_addr;
  271. struct dma_async_tx_descriptor *rxdesc;
  272. struct at91_twi_dma *dma = &dev->dma;
  273. struct dma_chan *chan_rx = dma->chan_rx;
  274. dma->direction = DMA_FROM_DEVICE;
  275. /* Keep in mind that we won't use dma to read the last two bytes */
  276. at91_twi_irq_save(dev);
  277. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
  278. DMA_FROM_DEVICE);
  279. if (dma_mapping_error(dev->dev, dma_addr)) {
  280. dev_err(dev->dev, "dma map failed\n");
  281. return;
  282. }
  283. dma->buf_mapped = true;
  284. at91_twi_irq_restore(dev);
  285. dma->sg.dma_address = dma_addr;
  286. sg_dma_len(&dma->sg) = dev->buf_len - 2;
  287. rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
  288. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  289. if (!rxdesc) {
  290. dev_err(dev->dev, "dma prep slave sg failed\n");
  291. goto error;
  292. }
  293. rxdesc->callback = at91_twi_read_data_dma_callback;
  294. rxdesc->callback_param = dev;
  295. dma->xfer_in_progress = true;
  296. dmaengine_submit(rxdesc);
  297. dma_async_issue_pending(dma->chan_rx);
  298. return;
  299. error:
  300. at91_twi_dma_cleanup(dev);
  301. }
  302. static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
  303. {
  304. struct at91_twi_dev *dev = dev_id;
  305. const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
  306. const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
  307. if (!irqstatus)
  308. return IRQ_NONE;
  309. else if (irqstatus & AT91_TWI_RXRDY)
  310. at91_twi_read_next_byte(dev);
  311. else if (irqstatus & AT91_TWI_TXRDY)
  312. at91_twi_write_next_byte(dev);
  313. /* catch error flags */
  314. dev->transfer_status |= status;
  315. if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
  316. at91_disable_twi_interrupts(dev);
  317. complete(&dev->cmd_complete);
  318. }
  319. return IRQ_HANDLED;
  320. }
  321. static int at91_do_twi_transfer(struct at91_twi_dev *dev)
  322. {
  323. int ret;
  324. bool has_unre_flag = dev->pdata->has_unre_flag;
  325. /*
  326. * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
  327. * read flag but shows the state of the transmission at the time the
  328. * Status Register is read. According to the programmer datasheet,
  329. * TXCOMP is set when both holding register and internal shifter are
  330. * empty and STOP condition has been sent.
  331. * Consequently, we should enable NACK interrupt rather than TXCOMP to
  332. * detect transmission failure.
  333. *
  334. * Besides, the TXCOMP bit is already set before the i2c transaction
  335. * has been started. For read transactions, this bit is cleared when
  336. * writing the START bit into the Control Register. So the
  337. * corresponding interrupt can safely be enabled just after.
  338. * However for write transactions managed by the CPU, we first write
  339. * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
  340. * interrupt. If TXCOMP interrupt were enabled before writing into THR,
  341. * the interrupt handler would be called immediately and the i2c command
  342. * would be reported as completed.
  343. * Also when a write transaction is managed by the DMA controller,
  344. * enabling the TXCOMP interrupt in this function may lead to a race
  345. * condition since we don't know whether the TXCOMP interrupt is enabled
  346. * before or after the DMA has started to write into THR. So the TXCOMP
  347. * interrupt is enabled later by at91_twi_write_data_dma_callback().
  348. * Immediately after in that DMA callback, we still need to send the
  349. * STOP condition manually writing the corresponding bit into the
  350. * Control Register.
  351. */
  352. dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
  353. (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
  354. reinit_completion(&dev->cmd_complete);
  355. dev->transfer_status = 0;
  356. if (!dev->buf_len) {
  357. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
  358. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  359. } else if (dev->msg->flags & I2C_M_RD) {
  360. unsigned start_flags = AT91_TWI_START;
  361. if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
  362. dev_err(dev->dev, "RXRDY still set!");
  363. at91_twi_read(dev, AT91_TWI_RHR);
  364. }
  365. /* if only one byte is to be read, immediately stop transfer */
  366. if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
  367. start_flags |= AT91_TWI_STOP;
  368. at91_twi_write(dev, AT91_TWI_CR, start_flags);
  369. /*
  370. * When using dma, the last byte has to be read manually in
  371. * order to not send the stop command too late and then
  372. * to receive extra data. In practice, there are some issues
  373. * if you use the dma to read n-1 bytes because of latency.
  374. * Reading n-2 bytes with dma and the two last ones manually
  375. * seems to be the best solution.
  376. */
  377. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  378. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
  379. at91_twi_read_data_dma(dev);
  380. } else {
  381. at91_twi_write(dev, AT91_TWI_IER,
  382. AT91_TWI_TXCOMP |
  383. AT91_TWI_NACK |
  384. AT91_TWI_RXRDY);
  385. }
  386. } else {
  387. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  388. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
  389. at91_twi_write_data_dma(dev);
  390. } else {
  391. at91_twi_write_next_byte(dev);
  392. at91_twi_write(dev, AT91_TWI_IER,
  393. AT91_TWI_TXCOMP |
  394. AT91_TWI_NACK |
  395. AT91_TWI_TXRDY);
  396. }
  397. }
  398. ret = wait_for_completion_timeout(&dev->cmd_complete,
  399. dev->adapter.timeout);
  400. if (ret == 0) {
  401. dev_err(dev->dev, "controller timed out\n");
  402. at91_init_twi_bus(dev);
  403. ret = -ETIMEDOUT;
  404. goto error;
  405. }
  406. if (dev->transfer_status & AT91_TWI_NACK) {
  407. dev_dbg(dev->dev, "received nack\n");
  408. ret = -EREMOTEIO;
  409. goto error;
  410. }
  411. if (dev->transfer_status & AT91_TWI_OVRE) {
  412. dev_err(dev->dev, "overrun while reading\n");
  413. ret = -EIO;
  414. goto error;
  415. }
  416. if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
  417. dev_err(dev->dev, "underrun while writing\n");
  418. ret = -EIO;
  419. goto error;
  420. }
  421. if (dev->recv_len_abort) {
  422. dev_err(dev->dev, "invalid smbus block length recvd\n");
  423. ret = -EPROTO;
  424. goto error;
  425. }
  426. dev_dbg(dev->dev, "transfer complete\n");
  427. return 0;
  428. error:
  429. at91_twi_dma_cleanup(dev);
  430. return ret;
  431. }
  432. static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
  433. {
  434. struct at91_twi_dev *dev = i2c_get_adapdata(adap);
  435. int ret;
  436. unsigned int_addr_flag = 0;
  437. struct i2c_msg *m_start = msg;
  438. dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
  439. /*
  440. * The hardware can handle at most two messages concatenated by a
  441. * repeated start via it's internal address feature.
  442. */
  443. if (num > 2) {
  444. dev_err(dev->dev,
  445. "cannot handle more than two concatenated messages.\n");
  446. return 0;
  447. } else if (num == 2) {
  448. int internal_address = 0;
  449. int i;
  450. if (msg->flags & I2C_M_RD) {
  451. dev_err(dev->dev, "first transfer must be write.\n");
  452. return -EINVAL;
  453. }
  454. if (msg->len > 3) {
  455. dev_err(dev->dev, "first message size must be <= 3.\n");
  456. return -EINVAL;
  457. }
  458. /* 1st msg is put into the internal address, start with 2nd */
  459. m_start = &msg[1];
  460. for (i = 0; i < msg->len; ++i) {
  461. const unsigned addr = msg->buf[msg->len - 1 - i];
  462. internal_address |= addr << (8 * i);
  463. int_addr_flag += AT91_TWI_IADRSZ_1;
  464. }
  465. at91_twi_write(dev, AT91_TWI_IADR, internal_address);
  466. }
  467. at91_twi_write(dev, AT91_TWI_MMR, (m_start->addr << 16) | int_addr_flag
  468. | ((m_start->flags & I2C_M_RD) ? AT91_TWI_MREAD : 0));
  469. dev->buf_len = m_start->len;
  470. dev->buf = m_start->buf;
  471. dev->msg = m_start;
  472. dev->recv_len_abort = false;
  473. ret = at91_do_twi_transfer(dev);
  474. return (ret < 0) ? ret : num;
  475. }
  476. static u32 at91_twi_func(struct i2c_adapter *adapter)
  477. {
  478. return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
  479. | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
  480. }
  481. static struct i2c_algorithm at91_twi_algorithm = {
  482. .master_xfer = at91_twi_xfer,
  483. .functionality = at91_twi_func,
  484. };
  485. static struct at91_twi_pdata at91rm9200_config = {
  486. .clk_max_div = 5,
  487. .clk_offset = 3,
  488. .has_unre_flag = true,
  489. .has_dma_support = false,
  490. };
  491. static struct at91_twi_pdata at91sam9261_config = {
  492. .clk_max_div = 5,
  493. .clk_offset = 4,
  494. .has_unre_flag = false,
  495. .has_dma_support = false,
  496. };
  497. static struct at91_twi_pdata at91sam9260_config = {
  498. .clk_max_div = 7,
  499. .clk_offset = 4,
  500. .has_unre_flag = false,
  501. .has_dma_support = false,
  502. };
  503. static struct at91_twi_pdata at91sam9g20_config = {
  504. .clk_max_div = 7,
  505. .clk_offset = 4,
  506. .has_unre_flag = false,
  507. .has_dma_support = false,
  508. };
  509. static struct at91_twi_pdata at91sam9g10_config = {
  510. .clk_max_div = 7,
  511. .clk_offset = 4,
  512. .has_unre_flag = false,
  513. .has_dma_support = false,
  514. };
  515. static const struct platform_device_id at91_twi_devtypes[] = {
  516. {
  517. .name = "i2c-at91rm9200",
  518. .driver_data = (unsigned long) &at91rm9200_config,
  519. }, {
  520. .name = "i2c-at91sam9261",
  521. .driver_data = (unsigned long) &at91sam9261_config,
  522. }, {
  523. .name = "i2c-at91sam9260",
  524. .driver_data = (unsigned long) &at91sam9260_config,
  525. }, {
  526. .name = "i2c-at91sam9g20",
  527. .driver_data = (unsigned long) &at91sam9g20_config,
  528. }, {
  529. .name = "i2c-at91sam9g10",
  530. .driver_data = (unsigned long) &at91sam9g10_config,
  531. }, {
  532. /* sentinel */
  533. }
  534. };
  535. #if defined(CONFIG_OF)
  536. static struct at91_twi_pdata at91sam9x5_config = {
  537. .clk_max_div = 7,
  538. .clk_offset = 4,
  539. .has_unre_flag = false,
  540. .has_dma_support = true,
  541. };
  542. static const struct of_device_id atmel_twi_dt_ids[] = {
  543. {
  544. .compatible = "atmel,at91rm9200-i2c",
  545. .data = &at91rm9200_config,
  546. } , {
  547. .compatible = "atmel,at91sam9260-i2c",
  548. .data = &at91sam9260_config,
  549. } , {
  550. .compatible = "atmel,at91sam9261-i2c",
  551. .data = &at91sam9261_config,
  552. } , {
  553. .compatible = "atmel,at91sam9g20-i2c",
  554. .data = &at91sam9g20_config,
  555. } , {
  556. .compatible = "atmel,at91sam9g10-i2c",
  557. .data = &at91sam9g10_config,
  558. }, {
  559. .compatible = "atmel,at91sam9x5-i2c",
  560. .data = &at91sam9x5_config,
  561. }, {
  562. /* sentinel */
  563. }
  564. };
  565. MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
  566. #endif
  567. static bool filter(struct dma_chan *chan, void *pdata)
  568. {
  569. struct at91_twi_pdata *sl_pdata = pdata;
  570. struct at_dma_slave *sl;
  571. if (!sl_pdata)
  572. return false;
  573. sl = &sl_pdata->dma_slave;
  574. if (sl && (sl->dma_dev == chan->device->dev)) {
  575. chan->private = sl;
  576. return true;
  577. } else {
  578. return false;
  579. }
  580. }
  581. static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
  582. {
  583. int ret = 0;
  584. struct at91_twi_pdata *pdata = dev->pdata;
  585. struct dma_slave_config slave_config;
  586. struct at91_twi_dma *dma = &dev->dma;
  587. dma_cap_mask_t mask;
  588. memset(&slave_config, 0, sizeof(slave_config));
  589. slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
  590. slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  591. slave_config.src_maxburst = 1;
  592. slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
  593. slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  594. slave_config.dst_maxburst = 1;
  595. slave_config.device_fc = false;
  596. dma_cap_zero(mask);
  597. dma_cap_set(DMA_SLAVE, mask);
  598. dma->chan_tx = dma_request_slave_channel_compat(mask, filter, pdata,
  599. dev->dev, "tx");
  600. if (!dma->chan_tx) {
  601. dev_err(dev->dev, "can't get a DMA channel for tx\n");
  602. ret = -EBUSY;
  603. goto error;
  604. }
  605. dma->chan_rx = dma_request_slave_channel_compat(mask, filter, pdata,
  606. dev->dev, "rx");
  607. if (!dma->chan_rx) {
  608. dev_err(dev->dev, "can't get a DMA channel for rx\n");
  609. ret = -EBUSY;
  610. goto error;
  611. }
  612. slave_config.direction = DMA_MEM_TO_DEV;
  613. if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
  614. dev_err(dev->dev, "failed to configure tx channel\n");
  615. ret = -EINVAL;
  616. goto error;
  617. }
  618. slave_config.direction = DMA_DEV_TO_MEM;
  619. if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
  620. dev_err(dev->dev, "failed to configure rx channel\n");
  621. ret = -EINVAL;
  622. goto error;
  623. }
  624. sg_init_table(&dma->sg, 1);
  625. dma->buf_mapped = false;
  626. dma->xfer_in_progress = false;
  627. dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
  628. dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
  629. return ret;
  630. error:
  631. dev_info(dev->dev, "can't use DMA\n");
  632. if (dma->chan_rx)
  633. dma_release_channel(dma->chan_rx);
  634. if (dma->chan_tx)
  635. dma_release_channel(dma->chan_tx);
  636. return ret;
  637. }
  638. static struct at91_twi_pdata *at91_twi_get_driver_data(
  639. struct platform_device *pdev)
  640. {
  641. if (pdev->dev.of_node) {
  642. const struct of_device_id *match;
  643. match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
  644. if (!match)
  645. return NULL;
  646. return (struct at91_twi_pdata *)match->data;
  647. }
  648. return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
  649. }
  650. static int at91_twi_probe(struct platform_device *pdev)
  651. {
  652. struct at91_twi_dev *dev;
  653. struct resource *mem;
  654. int rc;
  655. u32 phy_addr;
  656. u32 bus_clk_rate;
  657. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  658. if (!dev)
  659. return -ENOMEM;
  660. init_completion(&dev->cmd_complete);
  661. dev->dev = &pdev->dev;
  662. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  663. if (!mem)
  664. return -ENODEV;
  665. phy_addr = mem->start;
  666. dev->pdata = at91_twi_get_driver_data(pdev);
  667. if (!dev->pdata)
  668. return -ENODEV;
  669. dev->base = devm_ioremap_resource(&pdev->dev, mem);
  670. if (IS_ERR(dev->base))
  671. return PTR_ERR(dev->base);
  672. dev->irq = platform_get_irq(pdev, 0);
  673. if (dev->irq < 0)
  674. return dev->irq;
  675. rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
  676. dev_name(dev->dev), dev);
  677. if (rc) {
  678. dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
  679. return rc;
  680. }
  681. platform_set_drvdata(pdev, dev);
  682. dev->clk = devm_clk_get(dev->dev, NULL);
  683. if (IS_ERR(dev->clk)) {
  684. dev_err(dev->dev, "no clock defined\n");
  685. return -ENODEV;
  686. }
  687. clk_prepare_enable(dev->clk);
  688. if (dev->pdata->has_dma_support) {
  689. if (at91_twi_configure_dma(dev, phy_addr) == 0)
  690. dev->use_dma = true;
  691. }
  692. rc = of_property_read_u32(dev->dev->of_node, "clock-frequency",
  693. &bus_clk_rate);
  694. if (rc)
  695. bus_clk_rate = DEFAULT_TWI_CLK_HZ;
  696. at91_calc_twi_clock(dev, bus_clk_rate);
  697. at91_init_twi_bus(dev);
  698. snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
  699. i2c_set_adapdata(&dev->adapter, dev);
  700. dev->adapter.owner = THIS_MODULE;
  701. dev->adapter.class = I2C_CLASS_DEPRECATED;
  702. dev->adapter.algo = &at91_twi_algorithm;
  703. dev->adapter.dev.parent = dev->dev;
  704. dev->adapter.nr = pdev->id;
  705. dev->adapter.timeout = AT91_I2C_TIMEOUT;
  706. dev->adapter.dev.of_node = pdev->dev.of_node;
  707. rc = i2c_add_numbered_adapter(&dev->adapter);
  708. if (rc) {
  709. dev_err(dev->dev, "Adapter %s registration failed\n",
  710. dev->adapter.name);
  711. clk_disable_unprepare(dev->clk);
  712. return rc;
  713. }
  714. dev_info(dev->dev, "AT91 i2c bus driver.\n");
  715. return 0;
  716. }
  717. static int at91_twi_remove(struct platform_device *pdev)
  718. {
  719. struct at91_twi_dev *dev = platform_get_drvdata(pdev);
  720. i2c_del_adapter(&dev->adapter);
  721. clk_disable_unprepare(dev->clk);
  722. return 0;
  723. }
  724. #ifdef CONFIG_PM
  725. static int at91_twi_runtime_suspend(struct device *dev)
  726. {
  727. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  728. clk_disable(twi_dev->clk);
  729. return 0;
  730. }
  731. static int at91_twi_runtime_resume(struct device *dev)
  732. {
  733. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  734. return clk_enable(twi_dev->clk);
  735. }
  736. static const struct dev_pm_ops at91_twi_pm = {
  737. .runtime_suspend = at91_twi_runtime_suspend,
  738. .runtime_resume = at91_twi_runtime_resume,
  739. };
  740. #define at91_twi_pm_ops (&at91_twi_pm)
  741. #else
  742. #define at91_twi_pm_ops NULL
  743. #endif
  744. static struct platform_driver at91_twi_driver = {
  745. .probe = at91_twi_probe,
  746. .remove = at91_twi_remove,
  747. .id_table = at91_twi_devtypes,
  748. .driver = {
  749. .name = "at91_i2c",
  750. .owner = THIS_MODULE,
  751. .of_match_table = of_match_ptr(atmel_twi_dt_ids),
  752. .pm = at91_twi_pm_ops,
  753. },
  754. };
  755. static int __init at91_twi_init(void)
  756. {
  757. return platform_driver_register(&at91_twi_driver);
  758. }
  759. static void __exit at91_twi_exit(void)
  760. {
  761. platform_driver_unregister(&at91_twi_driver);
  762. }
  763. subsys_initcall(at91_twi_init);
  764. module_exit(at91_twi_exit);
  765. MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
  766. MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
  767. MODULE_LICENSE("GPL");
  768. MODULE_ALIAS("platform:at91_i2c");