spi.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544
  1. #include <linux/init.h>
  2. #include <linux/module.h>
  3. #include <linux/device.h>
  4. #include <linux/ioport.h>
  5. #include <linux/errno.h>
  6. #include <linux/spi/spi.h>
  7. #include <linux/workqueue.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/irqreturn.h>
  12. #include <linux/types.h>
  13. #include <linux/delay.h>
  14. #if !defined(CONFIG_MTK_CLKMGR)
  15. #include <linux/clk.h>
  16. #endif /* !defined(CONFIG_MTK_CLKMGR) */
  17. #include <linux/err.h>
  18. #include <linux/io.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/sched.h>
  21. #include <linux/wakelock.h>
  22. #ifdef CONFIG_OF
  23. #include <linux/of.h>
  24. #include <linux/of_irq.h>
  25. #include <linux/of_address.h>
  26. #endif
  27. /*#include <mach/irqs.h>*/
  28. #include <mt_spi.h>
  29. #include "mt_spi_hal.h"
  30. /*#include <mach/mt_gpio.h>*/
  31. #if defined(CONFIG_MTK_CLKMGR)
  32. /* mt_clkmgr.h will be removed after CCF porting is finished. */
  33. #include <mach/mt_clkmgr.h>
  34. #endif /* defined(CONFIG_MTK_CLKMGR) */
  35. #if (defined(CONFIG_MTK_FPGA))
  36. #define CONFIG_MT_SPI_FPGA_ENABLE
  37. #endif
  38. /*auto select transfer mode*/
  39. /*#define SPI_AUTO_SELECT_MODE*/
  40. #ifdef SPI_AUTO_SELECT_MODE
  41. #define SPI_DATA_SIZE 16
  42. #endif
  43. /*open base log out*/
  44. /*#define SPI_DEBUG*/
  45. #define SPI_DEBUG
  46. /*open verbose log out*/
  47. /*#define SPI_VERBOSE*/
  48. #define SPI_VERBOSE
  49. #define IDLE 0
  50. #define INPROGRESS 1
  51. #define PAUSED 2
  52. #define PACKET_SIZE 0x400
  53. #define SPI_FIFO_SIZE 32
  54. /* #define FIFO_TX 0
  55. #define FIFO_RX 1 */
  56. enum spi_fifo {
  57. FIFO_TX,
  58. FIFO_RX,
  59. FIFO_ALL
  60. };
  61. #define INVALID_DMA_ADDRESS 0xffffffff
  62. /*open time record debug, log can't affect transfer*/
  63. /* #define SPI_REC_DEBUG */
  64. u32 pad_macro;
  65. static void enable_clk(struct mt_spi_t *ms)
  66. {
  67. #if (!defined(CONFIG_MT_SPI_FPGA_ENABLE))
  68. #if defined(CONFIG_MTK_CLKMGR)
  69. enable_clock(MT_CG_PERI_SPI0, "spi");
  70. #else
  71. int ret;
  72. /* clk_prepare_enable(ms->clk_main); */
  73. ret = clk_enable(ms->clk_main);
  74. #endif
  75. #endif
  76. }
  77. void mt_spi_enable_clk(struct mt_spi_t *ms)
  78. {
  79. enable_clk(ms);
  80. }
  81. static void disable_clk(struct mt_spi_t *ms)
  82. {
  83. #if (!defined(CONFIG_MT_SPI_FPGA_ENABLE))
  84. #if defined(CONFIG_MTK_CLKMGR)
  85. disable_clock(MT_CG_PERI_SPI0, "spi");
  86. #else
  87. /* clk_disable_unprepare(ms->clk_main); */
  88. clk_disable(ms->clk_main);
  89. #endif
  90. #endif
  91. }
  92. void mt_spi_disable_clk(struct mt_spi_t *ms)
  93. {
  94. disable_clk(ms);
  95. }
  96. #ifdef SPI_DEBUG
  97. /*#define SPI_DBG(fmt, args...) printk(KERN_ALERT "mt-spi.c:%5d: <%s>" fmt, __LINE__, __func__, ##args )*/
  98. #define SPI_DBG(fmt, args...) pr_debug("mt-spi.c:%5d: <%s>" fmt, __LINE__, __func__, ##args)
  99. #ifdef SPI_VERBOSE
  100. #define SPI_INFO(dev, fmt, args...) dev_alert(dev, "spi.c:%5d: <%s>" fmt, __LINE__, __func__, ##args)
  101. static void spi_dump_reg(struct mt_spi_t *ms)
  102. {
  103. SPI_DBG("||*****************************************||\n");
  104. SPI_DBG("cfg0:0x%.8x\n", spi_readl(ms, SPI_CFG0_REG));
  105. SPI_DBG("cfg1:0x%.8x\n", spi_readl(ms, SPI_CFG1_REG));
  106. SPI_DBG("cmd :0x%.8x\n", spi_readl(ms, SPI_CMD_REG));
  107. /* SPI_DBG("spi_tx_data_reg:0x%x\n", spi_readl(ms, SPI_TX_DATA_REG));
  108. SPI_DBG("spi_rx_data_reg:0x%x\n", spi_readl(ms, SPI_RX_DATA_REG));*/
  109. SPI_DBG("tx_s:0x%.8x\n", spi_readl(ms, SPI_TX_SRC_REG));
  110. SPI_DBG("rx_d:0x%.8x\n", spi_readl(ms, SPI_RX_DST_REG));
  111. SPI_DBG("sta1:0x%.8x\n", spi_readl(ms, SPI_STATUS1_REG));
  112. SPI_DBG(":0x%.8x\n", spi_readl(ms, SPI_STATUS1_REG));
  113. SPI_DBG("||*****************************************||\n");
  114. }
  115. #else
  116. #define SPI_INFO(dev, fmt, args...)
  117. static void spi_dump_reg(struct mt_spi_t *ms)
  118. {
  119. }
  120. #endif
  121. #else
  122. #define SPI_DBG(fmt, args...)
  123. #define SPI_INFO(dev, fmt, args...)
  124. #endif
  125. #ifdef SPI_REC_DEBUG
  126. /*EVB the clock frequency is 130MHz, may be is reason that ldvt env.
  127. but it is 98500KHz on phone.
  128. */
  129. #define SPI_CLOCK_PERIED 100000 /*kHz */
  130. #include <linux/syscalls.h> /*getpid() */
  131. #define SPI_REC_MSG_MAX 500
  132. #define SPI_REC_NUM 20
  133. #define SPI_REC_STR_LEN 256
  134. static u32 spi_speed;
  135. static char msg_rec[SPI_REC_MSG_MAX][SPI_REC_STR_LEN];
  136. static int rec_count;
  137. static int rec_count_tmp = 1;
  138. static int rec_msg_count;
  139. static atomic_t rec_log_count = ATOMIC_INIT(0);
  140. static unsigned long long rec_msg_time[SPI_REC_MSG_MAX];
  141. static unsigned long long rec_time;
  142. /*should coding in file kernel/arch/arm/kernel/irq.c.
  143. extern unsigned long long spi_rec_t0;
  144. spi_rec_t0 = sched_clock();
  145. */
  146. unsigned long long spi_rec_t0; /* record interrupt act */
  147. DEFINE_SPINLOCK(msg_rec_lock);
  148. /*static unsigned long long t_rec[4]; */
  149. /*
  150. the function invoke time averrage 2us.
  151. */
  152. static inline void spi_rec_time(const char *str)
  153. {
  154. unsigned long flags;
  155. char tmp[64];
  156. spin_lock_irqsave(&msg_rec_lock, flags);
  157. if (strncmp(str, "msgs", 4) == 0) {
  158. rec_msg_count++;
  159. if (rec_msg_count >= SPI_REC_MSG_MAX)
  160. rec_msg_count = 0;
  161. rec_msg_time[rec_msg_count] = sched_clock();
  162. msg_rec[rec_msg_count][0] = '\0';
  163. sprintf(tmp, "%s,pid:%4d;", str, sys_getpid());
  164. strcat(msg_rec[rec_msg_count], tmp);
  165. } else if (strncmp(str, "msgn", 4) == 0) {
  166. rec_count++;
  167. if (rec_count >= SPI_REC_MSG_MAX)
  168. rec_count = 0;
  169. rec_time = sched_clock();
  170. sprintf(tmp, "%s:%8lld;", str, rec_time - rec_msg_time[rec_count]);
  171. strcat(msg_rec[rec_count], tmp);
  172. /*if want to spi interrupt action, cancle the comment*/
  173. } /*else if(strncmp(str, "irqs", 4) == 0){
  174. sprintf(tmp,"%s,%5lld:%8lld;",str,
  175. sched_clock() - spi_rec_t0,sched_clock() - rec_time);
  176. if((strlen(tmp) + strlen(msg_rec[rec_count])) < (SPI_REC_STR_LEN - 64)){
  177. strcat(msg_rec[rec_count],tmp);
  178. }else{
  179. strcat(msg_rec[rec_count],"#");
  180. }
  181. } */
  182. else {
  183. sprintf(tmp, "%s:%8lld;", str, sched_clock() - rec_time);
  184. if ((strlen(tmp) + strlen(msg_rec[rec_count])) < (SPI_REC_STR_LEN - 64))
  185. strcat(msg_rec[rec_count], tmp);
  186. else
  187. strcat(msg_rec[rec_count], "@");
  188. }
  189. spin_unlock_irqrestore(&msg_rec_lock, flags);
  190. }
  191. void mt_spi_workqueue_handler(void *data)
  192. {
  193. int i = 0;
  194. for (i = 0; i < SPI_REC_NUM; i++) {
  195. SPI_DBG("spi-rec%3d-%3d:%s\n", rec_count, rec_count_tmp, msg_rec[rec_count_tmp]);
  196. msg_rec[rec_count_tmp][0] = '\0';
  197. rec_count_tmp++;
  198. if (rec_count_tmp >= SPI_REC_MSG_MAX)
  199. rec_count_tmp = 0;
  200. }
  201. atomic_dec(&rec_log_count);
  202. }
  203. DECLARE_WORK(mt_spi_workqueue, mt_spi_workqueue_handler);
  204. #else
  205. static inline void spi_rec_time(const char *str)
  206. {
  207. }
  208. #endif
  209. #if 0
  210. #if !defined(CONFIG_MTK_LEGACY)
  211. struct pinctrl *pinctrl_spi;
  212. struct pinctrl_state *pins_spi_default;
  213. struct pinctrl_state *pins_spi1_cs_set, *pins_spi1_cs_clear, *pins_spi1_clk_set, *pins_spi1_clk_clear;
  214. struct pinctrl_state *pins_spi1_miso_set, *pins_spi1_miso_clear, *pins_spi1_mosi_set, *pins_spi1_mosi_clear;
  215. static int spi_get_gpio_info(struct platform_device *pdev)
  216. {
  217. int ret;
  218. /*SPI_DBG("spi_get_gpio_info pin ctrl===================\n");*/
  219. pinctrl_spi = devm_pinctrl_get(&pdev->dev);
  220. if (IS_ERR(pinctrl_spi)) {
  221. ret = PTR_ERR(pinctrl_spi);
  222. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl1!\n");
  223. return ret;
  224. }
  225. pins_spi_default = pinctrl_lookup_state(pinctrl_spi, "spi1_gpio_def");
  226. if (IS_ERR(pins_spi_default)) {
  227. ret = PTR_ERR(pins_spi_default);
  228. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl pins_spi_default!\n");
  229. return ret;
  230. }
  231. pins_spi1_cs_set = pinctrl_lookup_state(pinctrl_spi, "spi1_cs_set");
  232. if (IS_ERR(pins_spi1_cs_set)) {
  233. ret = PTR_ERR(pins_spi1_cs_set);
  234. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl pins_spi1_cs_set!\n");
  235. return ret;
  236. }
  237. pins_spi1_cs_clear = pinctrl_lookup_state(pinctrl_spi, "spi1_cs_clr");
  238. if (IS_ERR(pins_spi1_cs_clear)) {
  239. ret = PTR_ERR(pins_spi1_cs_clear);
  240. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl pins_spi1_cs_clear!\n");
  241. return ret;
  242. }
  243. pins_spi1_clk_set = pinctrl_lookup_state(pinctrl_spi, "spi1_clk_set");
  244. if (IS_ERR(pins_spi1_clk_set)) {
  245. ret = PTR_ERR(pins_spi1_clk_set);
  246. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl pins_spi1_clk_set!\n");
  247. return ret;
  248. }
  249. pins_spi1_clk_clear = pinctrl_lookup_state(pinctrl_spi, "spi1_clk_clr");
  250. if (IS_ERR(pins_spi1_clk_clear)) {
  251. ret = PTR_ERR(pins_spi1_clk_clear);
  252. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl pins_spi1_clk_clear!\n");
  253. return ret;
  254. }
  255. pins_spi1_miso_set = pinctrl_lookup_state(pinctrl_spi, "spi1_miso_set");
  256. if (IS_ERR(pins_spi1_miso_set)) {
  257. ret = PTR_ERR(pins_spi1_miso_set);
  258. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl pins_spi1_miso_set!\n");
  259. return ret;
  260. }
  261. pins_spi1_miso_clear = pinctrl_lookup_state(pinctrl_spi, "spi1_miso_clr");
  262. if (IS_ERR(pins_spi1_miso_clear)) {
  263. ret = PTR_ERR(pins_spi1_miso_clear);
  264. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl pins_spi1_miso_clear!\n");
  265. return ret;
  266. }
  267. pins_spi1_mosi_set = pinctrl_lookup_state(pinctrl_spi, "spi1_mosi_set");
  268. if (IS_ERR(pins_spi1_mosi_set)) {
  269. ret = PTR_ERR(pins_spi1_mosi_set);
  270. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl pins_spi1_mosi_set!\n");
  271. return ret;
  272. }
  273. pins_spi1_mosi_clear = pinctrl_lookup_state(pinctrl_spi, "spi1_mosi_clr");
  274. if (IS_ERR(pins_spi1_mosi_clear)) {
  275. ret = PTR_ERR(pins_spi1_mosi_clear);
  276. dev_err(&pdev->dev, "fwq Cannot find touch pinctrl pins_spi1_mosi_clear!\n");
  277. return ret;
  278. }
  279. return 0;
  280. }
  281. static int spi_set_gpio_info(struct platform_device *pdev , int spi_pin_mode)
  282. {
  283. spi_get_gpio_info(pdev);
  284. if (spi_pin_mode == 1) {
  285. pinctrl_select_state(pinctrl_spi, pins_spi1_cs_set);
  286. pinctrl_select_state(pinctrl_spi, pins_spi1_clk_set);
  287. pinctrl_select_state(pinctrl_spi, pins_spi1_miso_set);
  288. pinctrl_select_state(pinctrl_spi, pins_spi1_mosi_set);
  289. /*SPI_DBG("spi_get_gpio_info pin ctrl set 1111===================\n");
  290. SPI_DBG("spi_get_gpio_info pin gpio65 mode= %d\n", mt_get_gpio_mode(65|0x80000000));
  291. SPI_DBG("spi_get_gpio_info pin gpio66 mode= %d\n", mt_get_gpio_mode(66|0x80000000));
  292. SPI_DBG("spi_get_gpio_info pin gpio67 mode= %d\n", mt_get_gpio_mode(67|0x80000000));
  293. SPI_DBG("spi_get_gpio_info pin gpio68 mode= %d\n", mt_get_gpio_mode(68|0x80000000));*/
  294. } else {
  295. pinctrl_select_state(pinctrl_spi, pins_spi1_cs_clear);
  296. pinctrl_select_state(pinctrl_spi, pins_spi1_clk_clear);
  297. pinctrl_select_state(pinctrl_spi, pins_spi1_miso_clear);
  298. pinctrl_select_state(pinctrl_spi, pins_spi1_mosi_clear);
  299. }
  300. return 0;
  301. }
  302. #endif
  303. #endif
  304. static void spi_gpio_set(struct mt_spi_t *ms)
  305. {
  306. /* mt_set_gpio_mode(GPIO_SPI_CS_PIN, GPIO_SPI_CS_PIN_M_SPI_CS_N);
  307. mt_set_gpio_mode(GPIO_SPI_SCK_PIN, GPIO_SPI_SCK_PIN_M_SPI_SCK);
  308. mt_set_gpio_mode(GPIO_SPI_MISO_PIN, GPIO_SPI_MISO_PIN_M_SPI_MISO);
  309. mt_set_gpio_mode(GPIO_SPI_MOSI_PIN, GPIO_SPI_MOSI_PIN_M_SPI_MOSI);*/
  310. /*spi_set_gpio_info(1);*/
  311. }
  312. static void spi_gpio_reset(struct mt_spi_t *ms)
  313. {
  314. /*set dir pull to save power */
  315. /*
  316. mt_set_gpio_mode(GPIO_SPI_CS_PIN, GPIO_SPI_CS_PIN_M_GPIO);
  317. mt_set_gpio_mode(GPIO_SPI_SCK_PIN, GPIO_SPI_SCK_PIN_M_GPIO);
  318. mt_set_gpio_mode(GPIO_SPI_MISO_PIN, GPIO_SPI_MISO_PIN_M_GPIO);
  319. mt_set_gpio_mode(GPIO_SPI_MOSI_PIN, GPIO_SPI_MOSI_PIN_M_GPIO); */
  320. /*spi_set_gpio_info(0);*/
  321. }
  322. static int is_pause_mode(struct spi_message *msg)
  323. {
  324. struct mt_chip_conf *conf;
  325. conf = (struct mt_chip_conf *)msg->state;
  326. return conf->pause;
  327. }
  328. static int is_fifo_read(struct spi_message *msg)
  329. {
  330. struct mt_chip_conf *conf;
  331. u32 value = 0;
  332. conf = (struct mt_chip_conf *)msg->state;
  333. value = (conf->com_mod == FIFO_TRANSFER) || (conf->com_mod == OTHER1);
  334. return value;
  335. }
  336. static int is_interrupt_enable(struct mt_spi_t *ms)
  337. {
  338. u32 cmd;
  339. cmd = spi_readl(ms, SPI_CMD_REG);
  340. return (cmd >> SPI_CMD_FINISH_IE_OFFSET) & 1;
  341. }
  342. static inline void set_pause_bit(struct mt_spi_t *ms)
  343. {
  344. u32 reg_val;
  345. reg_val = spi_readl(ms, SPI_CMD_REG);
  346. reg_val |= 1 << SPI_CMD_PAUSE_EN_OFFSET;
  347. spi_writel(ms, SPI_CMD_REG, reg_val);
  348. }
  349. static inline void clear_pause_bit(struct mt_spi_t *ms)
  350. {
  351. u32 reg_val;
  352. reg_val = spi_readl(ms, SPI_CMD_REG);
  353. reg_val &= ~SPI_CMD_PAUSE_EN_MASK;
  354. spi_writel(ms, SPI_CMD_REG, reg_val);
  355. }
  356. static inline void clear_resume_bit(struct mt_spi_t *ms)
  357. {
  358. u32 reg_val;
  359. reg_val = spi_readl(ms, SPI_CMD_REG);
  360. reg_val &= ~SPI_CMD_RESUME_MASK;
  361. spi_writel(ms, SPI_CMD_REG, reg_val);
  362. }
  363. static inline void spi_disable_dma(struct mt_spi_t *ms)
  364. {
  365. u32 cmd;
  366. cmd = spi_readl(ms, SPI_CMD_REG);
  367. cmd &= ~SPI_CMD_TX_DMA_MASK;
  368. cmd &= ~SPI_CMD_RX_DMA_MASK;
  369. spi_writel(ms, SPI_CMD_REG, cmd);
  370. }
  371. /*
  372. static inline void spi_clear_fifo(struct mt_spi_t *ms, enum spi_fifo fifo)
  373. {
  374. u32 volatile reg_val;
  375. int i;
  376. for(i = 0; i < SPI_FIFO_SIZE/4; i++){
  377. #if 1
  378. if ( fifo == FIFO_TX )
  379. spi_writel( ms, SPI_TX_DATA_REG, 0x0 );
  380. else if( fifo == FIFO_RX )
  381. reg_val = spi_readl ( ms, SPI_RX_DATA_REG );
  382. else if(fifo == FIFO_ALL){
  383. spi_writel( ms, SPI_TX_DATA_REG, 0x0 );
  384. spi_writel( ms, SPI_RX_DATA_REG, 0x0 ); //clear data
  385. reg_val = spi_readl( ms, SPI_RX_DATA_REG );
  386. }else{
  387. SPI_DBG("The parameter is not right.\n");
  388. }
  389. SPI_DBG("SPI_STATUS1_REG:0x%.8x\n", spi_readl(ms, SPI_STATUS1_REG));
  390. #endif
  391. }
  392. }
  393. */
  394. static inline void spi_enable_dma(struct mt_spi_t *ms, u8 mode)
  395. {
  396. u32 cmd;
  397. cmd = spi_readl(ms, SPI_CMD_REG);
  398. #define SPI_4B_ALIGN 0x4
  399. /*set up the DMA bus address */
  400. if ((mode == DMA_TRANSFER) || (mode == OTHER1)) {
  401. if ((ms->cur_transfer->tx_buf != NULL)
  402. || ((ms->cur_transfer->tx_dma != INVALID_DMA_ADDRESS) && (ms->cur_transfer->tx_dma != 0))) {
  403. if (ms->cur_transfer->tx_dma & (SPI_4B_ALIGN - 1)) {
  404. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  405. dev_err(&ms->pdev->dev,
  406. "Warning!Tx_DMA address should be 4Byte alignment,buf:%p,dma:%llx\n",
  407. ms->cur_transfer->tx_buf, ms->cur_transfer->tx_dma);
  408. #else
  409. dev_err(&ms->pdev->dev,
  410. "Warning!Tx_DMA address should be 4Byte alignment,buf:%p,dma:%x\n",
  411. ms->cur_transfer->tx_buf, ms->cur_transfer->tx_dma);
  412. #endif
  413. }
  414. spi_writel(ms, SPI_TX_SRC_REG, cpu_to_le32(ms->cur_transfer->tx_dma));
  415. cmd |= 1 << SPI_CMD_TX_DMA_OFFSET;
  416. }
  417. }
  418. if ((mode == DMA_TRANSFER) || (mode == OTHER2)) {
  419. if ((ms->cur_transfer->rx_buf != NULL)
  420. || ((ms->cur_transfer->rx_dma != INVALID_DMA_ADDRESS) && (ms->cur_transfer->rx_dma != 0))) {
  421. if (ms->cur_transfer->rx_dma & (SPI_4B_ALIGN - 1)) {
  422. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  423. dev_err(&ms->pdev->dev,
  424. "Warning!Rx_DMA address should be 4Byte alignment,buf:%p,dma:%llx\n",
  425. ms->cur_transfer->rx_buf, ms->cur_transfer->rx_dma);
  426. #else
  427. dev_err(&ms->pdev->dev,
  428. "Warning!Rx_DMA address should be 4Byte alignment,buf:%p,dma:%x\n",
  429. ms->cur_transfer->rx_buf, ms->cur_transfer->rx_dma);
  430. #endif
  431. }
  432. spi_writel(ms, SPI_RX_DST_REG, cpu_to_le32(ms->cur_transfer->rx_dma));
  433. cmd |= 1 << SPI_CMD_RX_DMA_OFFSET;
  434. }
  435. }
  436. mb();
  437. spi_writel(ms, SPI_CMD_REG, cmd);
  438. }
  439. static inline int spi_setup_packet(struct mt_spi_t *ms)
  440. {
  441. u32 packet_size, packet_loop, cfg1;
  442. /*set transfer packet and loop */
  443. if (ms->cur_transfer->len < PACKET_SIZE)
  444. packet_size = ms->cur_transfer->len;
  445. else
  446. packet_size = PACKET_SIZE;
  447. if (ms->cur_transfer->len % packet_size) {
  448. packet_loop = ms->cur_transfer->len / packet_size + 1;
  449. /*parameter not correct, there will be more data transfer,notice user to change */
  450. dev_err(&ms->pdev->dev, "ERROR!!The lens must be a multiple of %d, your len %u\n\n", PACKET_SIZE,
  451. ms->cur_transfer->len);
  452. return -EINVAL;
  453. }
  454. packet_loop = (ms->cur_transfer->len) / packet_size;
  455. SPI_DBG("The packet_size:0x%x packet_loop:0x%x\n", packet_size, packet_loop);
  456. cfg1 = spi_readl(ms, SPI_CFG1_REG);
  457. cfg1 &= ~(SPI_CFG1_PACKET_LENGTH_MASK + SPI_CFG1_PACKET_LOOP_MASK);
  458. cfg1 |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
  459. cfg1 |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
  460. spi_writel(ms, SPI_CFG1_REG, cfg1);
  461. return 0;
  462. }
  463. /*
  464. static int spi_is_busy(struct mt_spi_t *ms)
  465. {
  466. u32 reg_val;
  467. unsigned long flags;
  468. spin_lock_irqsave(&ms->spin_lock, flags);
  469. reg_val = spi_readl(ms, SPI_STATUS1_REG);
  470. spin_unlock_irqrestore(&ms->spin_lock, flags);
  471. if ( reg_val & 0x1) {
  472. SPI_DBG("is not busy.\n");
  473. return 0;
  474. }else {
  475. SPI_DBG("is busy.\n");
  476. return 1;
  477. }
  478. }
  479. */
  480. static inline void spi_start_transfer(struct mt_spi_t *ms)
  481. {
  482. u32 reg_val;
  483. reg_val = spi_readl(ms, SPI_CMD_REG);
  484. reg_val |= 1 << SPI_CMD_ACT_OFFSET;
  485. /*All register must be prepared before setting the start bit [SMP] */
  486. mb();
  487. spi_writel(ms, SPI_CMD_REG, reg_val);
  488. }
  489. static inline void spi_resume_transfer(struct mt_spi_t *ms)
  490. {
  491. u32 reg_val;
  492. reg_val = spi_readl(ms, SPI_CMD_REG);
  493. reg_val &= ~SPI_CMD_RESUME_MASK;
  494. reg_val |= 1 << SPI_CMD_RESUME_OFFSET;
  495. /*All register must be prepared before setting the start bit [SMP] */
  496. mb();
  497. spi_writel(ms, SPI_CMD_REG, reg_val);
  498. }
  499. static void reset_spi(struct mt_spi_t *ms)
  500. {
  501. u32 reg_val;
  502. /*set the software reset bit in SPI_CMD_REG. */
  503. reg_val = spi_readl(ms, SPI_CMD_REG);
  504. reg_val &= ~SPI_CMD_RST_MASK;
  505. reg_val |= 1 << SPI_CMD_RST_OFFSET;
  506. spi_writel(ms, SPI_CMD_REG, reg_val);
  507. reg_val = spi_readl(ms, SPI_CMD_REG);
  508. reg_val &= ~SPI_CMD_RST_MASK;
  509. spi_writel(ms, SPI_CMD_REG, reg_val);
  510. }
  511. static inline int is_last_xfer(struct spi_message *msg, struct spi_transfer *xfer)
  512. {
  513. return msg->transfers.prev == &xfer->transfer_list;
  514. }
  515. static int transfer_dma_mapping(struct mt_spi_t *ms, u8 mode, struct spi_transfer *xfer)
  516. {
  517. struct device *dev = &ms->pdev->dev;
  518. xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
  519. if ((mode == DMA_TRANSFER) || (mode == OTHER1)) {
  520. if (xfer->tx_buf) {
  521. xfer->tx_dma = dma_map_single(dev, (void *)xfer->tx_buf, xfer->len, DMA_TO_DEVICE);
  522. if (dma_mapping_error(dev, xfer->tx_dma)) {
  523. dev_err(&ms->pdev->dev, "dma mapping tx_buf error.\n");
  524. return -ENOMEM;
  525. }
  526. }
  527. }
  528. if ((mode == DMA_TRANSFER) || (mode == OTHER2)) {
  529. if (xfer->rx_buf) {
  530. xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len, DMA_FROM_DEVICE);
  531. if (dma_mapping_error(dev, xfer->rx_dma)) {
  532. if (xfer->tx_buf)
  533. dma_unmap_single(dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE);
  534. dev_err(&ms->pdev->dev, "dma mapping rx_buf error.\n");
  535. return -ENOMEM;
  536. }
  537. }
  538. }
  539. if (mode != FIFO_TRANSFER)
  540. SPI_DBG("Transfer_dma_mapping success.\n");
  541. #if 0
  542. /*cross 1K test, non 4byte alignment */
  543. {
  544. #define SPI_CROSS_ALIGN_OFFSET 1009
  545. u8 *p;
  546. SPI_DBG("Transfer dma addr:Tx:0x%x, Rx:0x%x, before\n", xfer->tx_dma, xfer->rx_dma);
  547. SPI_DBG("Transfer buf addr:Tx:0x%x, Rx:0x%x, before\n", xfer->tx_buf, xfer->rx_buf);
  548. xfer->len = 32;
  549. p = (u8 *) xfer->tx_dma;
  550. xfer->tx_dma = (u32 *) (p + SPI_CROSS_ALIGN_OFFSET);
  551. p = (u8 *) xfer->rx_dma;
  552. xfer->rx_dma = (u32 *) (p + SPI_CROSS_ALIGN_OFFSET);
  553. p = (u8 *) xfer->tx_buf;
  554. xfer->tx_buf = (u32 *) (p + SPI_CROSS_ALIGN_OFFSET);
  555. p = (u8 *) xfer->rx_buf;
  556. xfer->rx_buf = (u32 *) (p + SPI_CROSS_ALIGN_OFFSET);
  557. SPI_DBG("Transfer dma addr:Tx:0x%x, Rx:0x%x\n", xfer->tx_dma, xfer->rx_dma);
  558. SPI_DBG("Transfer buf addr:Tx:0x%x, Rx:0x%x\n", xfer->tx_buf, xfer->rx_buf);
  559. }
  560. #endif
  561. return 0;
  562. }
  563. static void transfer_dma_unmapping(struct mt_spi_t *ms, struct spi_transfer *xfer)
  564. {
  565. struct device *dev = &ms->pdev->dev;
  566. if ((xfer->tx_dma != INVALID_DMA_ADDRESS) && (xfer->tx_dma != 0)) {
  567. dma_unmap_single(dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE);
  568. xfer->tx_dma = INVALID_DMA_ADDRESS;
  569. }
  570. if ((xfer->rx_dma != INVALID_DMA_ADDRESS) && (xfer->rx_dma != 0)) {
  571. dma_unmap_single(dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE);
  572. xfer->rx_dma = INVALID_DMA_ADDRESS;
  573. }
  574. }
  575. static void mt_spi_msg_done(struct mt_spi_t *ms, struct spi_message *msg, int status);
  576. static void mt_spi_next_message(struct mt_spi_t *ms);
  577. static int mt_do_spi_setup(struct mt_spi_t *ms, struct mt_chip_conf *chip_config);
  578. static int mt_spi_next_xfer(struct mt_spi_t *ms, struct spi_message *msg)
  579. {
  580. struct spi_transfer *xfer;
  581. struct mt_chip_conf *chip_config = (struct mt_chip_conf *)msg->state;
  582. u8 mode, cnt, i;
  583. int ret = 0;
  584. char xfer_rec[64];
  585. #ifdef SPI_AUTO_SELECT_MODE
  586. u32 reg_val = 0;
  587. #endif
  588. if (unlikely(!ms)) {
  589. dev_err(&msg->spi->dev, "master wrapper is invalid\n");
  590. ret = -EINVAL;
  591. goto fail;
  592. }
  593. if (unlikely(!msg)) {
  594. dev_err(&msg->spi->dev, "msg is invalid\n");
  595. ret = -EINVAL;
  596. goto fail;
  597. }
  598. if (unlikely(!msg->state)) {
  599. dev_err(&msg->spi->dev, "msg config is invalid\n");
  600. ret = -EINVAL;
  601. goto fail;
  602. }
  603. if (unlikely(!is_interrupt_enable(ms))) {
  604. dev_err(&msg->spi->dev, "interrupt is disable\n");
  605. ret = -EINVAL;
  606. goto fail;
  607. }
  608. #ifdef SPI_AUTO_SELECT_MODE
  609. if (ms->cur_transfer.len > SPI_DATA_SIZE) {
  610. chip_config->com_mod = DMA_TRANSFER;
  611. SPI_DBG("SPI auto select DMA mode\n");
  612. reg_val = spi_readl(ms, SPI_CMD_REG);
  613. reg_val |= ((1 << SPI_CMD_TX_DMA_OFFSET) | (1 << SPI_CMD_RX_DMA_OFFSET));
  614. spi_writel(ms, SPI_CMD_REG, reg_val);
  615. SPI_DBG("SPI auto select CMD = 0x%x\n", spi_readl(ms, SPI_CMD_REG));
  616. } else {
  617. SPI_DBG("SPI auto select do nothing\n");
  618. }
  619. #endif
  620. mode = chip_config->com_mod;
  621. xfer = ms->cur_transfer;
  622. SPI_DBG("start xfer 0x%p, mode %d, len %u\n", xfer, mode, xfer->len);
  623. if ((mode == FIFO_TRANSFER) || (mode == OTHER1) || (mode == OTHER2)) {
  624. if (xfer->len > SPI_FIFO_SIZE) {
  625. ret = -EINVAL;
  626. dev_err(&msg->spi->dev, "xfer len is invalid over fifo size\n");
  627. goto fail;
  628. }
  629. }
  630. if (is_last_xfer(msg, xfer)) {
  631. SPI_DBG("The last xfer.\n");
  632. ms->next_transfer = NULL;
  633. clear_pause_bit(ms);
  634. } else {
  635. SPI_DBG("Not the last xfer.\n");
  636. ms->next_transfer = list_entry(xfer->transfer_list.next, struct spi_transfer, transfer_list);
  637. }
  638. /*disable DMA */
  639. spi_disable_dma(ms);
  640. /*spi_clear_fifo(ms, FIFO_ALL); */
  641. ret = spi_setup_packet(ms);
  642. if (ret < 0)
  643. goto fail;
  644. /*Using FIFO to send data */
  645. if ((mode == FIFO_TRANSFER) || (mode == OTHER2)) {
  646. cnt = (xfer->len % 4) ? (xfer->len / 4 + 1) : (xfer->len / 4);
  647. for (i = 0; i < cnt; i++) {
  648. spi_writel(ms, SPI_TX_DATA_REG, *((u32 *) xfer->tx_buf + i));
  649. SPI_INFO(&msg->spi->dev, "tx_buf data is:%x\n", *((u32 *) xfer->tx_buf + i));
  650. SPI_INFO(&msg->spi->dev, "tx_buf addr is:%p\n", (u32 *) xfer->tx_buf + i);
  651. }
  652. }
  653. /*Using DMA to send data */
  654. if ((mode == DMA_TRANSFER) || (mode == OTHER1) || (mode == OTHER2))
  655. spi_enable_dma(ms, mode);
  656. #ifdef SPI_VERBOSE
  657. spi_dump_reg(ms); /*Dump register before transfer */
  658. #endif
  659. if (ms->running == PAUSED) {
  660. SPI_DBG("pause status to resume.\n");
  661. spi_resume_transfer(ms);
  662. } else if (ms->running == IDLE) {
  663. SPI_DBG("The xfer start\n");
  664. /*if there is only one transfer, pause bit should not be set. */
  665. if (is_pause_mode(msg) && !is_last_xfer(msg, xfer)) {
  666. SPI_DBG("set pause mode.\n");
  667. set_pause_bit(ms);
  668. }
  669. /*All register must be prepared before setting the start bit [SMP] */
  670. spi_start_transfer(ms);
  671. } else {
  672. dev_err(&msg->spi->dev, "Wrong status\n");
  673. ret = -1;
  674. goto fail;
  675. }
  676. sprintf(xfer_rec, "xfer,%3d", xfer->len);
  677. spi_rec_time(xfer_rec);
  678. ms->running = INPROGRESS;
  679. /*exit pause mode */
  680. if (is_pause_mode(msg) && is_last_xfer(msg, xfer))
  681. clear_resume_bit(ms);
  682. return 0;
  683. fail:
  684. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  685. if ((!msg->is_dma_mapped))
  686. transfer_dma_unmapping(ms, xfer);
  687. }
  688. ms->running = IDLE;
  689. mt_spi_msg_done(ms, msg, ret);
  690. return ret;
  691. }
  692. static void mt_spi_msg_done(struct mt_spi_t *ms, struct spi_message *msg, int status)
  693. {
  694. list_del(&msg->queue);
  695. msg->status = status;
  696. SPI_DBG("msg:%p complete(%d): %u bytes transferred\n", msg, status, msg->actual_length);
  697. spi_disable_dma(ms);
  698. /*spi_clear_fifo(ms, FIFO_ALL); */
  699. msg->complete(msg->context);
  700. ms->running = IDLE;
  701. ms->cur_transfer = NULL;
  702. ms->next_transfer = NULL;
  703. /*disable_clk(); */
  704. spi_rec_time("msge");
  705. #ifdef SPI_REC_DEBUG
  706. if (!(rec_count % SPI_REC_NUM)) {
  707. atomic_inc(&rec_log_count);
  708. schedule_work(&mt_spi_workqueue);
  709. }
  710. /* if(atomic_read(&rec_log_count) > 0){
  711. schedule_work(&mt_spi_workqueue);
  712. tasklet_schedule(&spi_tasklet);
  713. SPI_OUT_REC_TIME;
  714. rec_count = 0;
  715. } */
  716. #endif
  717. /* continue if needed */
  718. if (list_empty(&ms->queue)) {
  719. SPI_DBG("All msg is completion.\n\n");
  720. /*clock and gpio reset */
  721. spi_gpio_reset(ms);
  722. /*disable_clk(); */
  723. disable_clk(ms);
  724. /*schedule_work(&mt_spi_msgdone_workqueue);//disable clock */
  725. wake_unlock(&ms->wk_lock);
  726. } else
  727. mt_spi_next_message(ms);
  728. }
  729. static void mt_spi_next_message(struct mt_spi_t *ms)
  730. {
  731. struct spi_message *msg;
  732. struct mt_chip_conf *chip_config;
  733. char msg_addr[64];
  734. msg = list_entry(ms->queue.next, struct spi_message, queue);
  735. chip_config = (struct mt_chip_conf *)msg->state;
  736. #ifdef SPI_REC_DEBUG
  737. spi_speed = SPI_CLOCK_PERIED / (chip_config->low_time + chip_config->high_time);
  738. sprintf(msg_addr, "msgn,%4dKHz", spi_speed);
  739. #else
  740. sprintf(msg_addr, "msgn");
  741. #endif
  742. spi_rec_time(msg_addr);
  743. SPI_DBG("start transfer message:0x%p\n", msg);
  744. ms->cur_transfer = list_entry(msg->transfers.next, struct spi_transfer, transfer_list);
  745. /*clock and gpio set */
  746. /* spi_gpio_set(ms);
  747. enable_clk();
  748. t_rec[0] = sched_clock();
  749. spi_rec_time("clke");
  750. t_rec[1] = sched_clock();
  751. printk(KERN_ALERT"clke rec consume time%lld",t_rec[1] - t_rec[0]);*/
  752. reset_spi(ms);
  753. mt_do_spi_setup(ms, chip_config);
  754. mt_spi_next_xfer(ms, msg);
  755. }
  756. static int mt_spi_transfer(struct spi_device *spidev, struct spi_message *msg)
  757. {
  758. struct spi_master *master;
  759. struct mt_spi_t *ms;
  760. struct spi_transfer *xfer;
  761. struct mt_chip_conf *chip_config;
  762. unsigned long flags;
  763. char msg_addr[64];
  764. master = spidev->master;
  765. ms = spi_master_get_devdata(master);
  766. /*wake_lock ( &ms->wk_lock ); */
  767. SPI_DBG("enter,start add msg:0x%p\n", msg);
  768. if (unlikely(!msg)) {
  769. dev_err(&spidev->dev, "msg is NULL pointer.\n");
  770. msg->status = -EINVAL;
  771. goto out;
  772. }
  773. if (unlikely(list_empty(&msg->transfers))) {
  774. dev_err(&spidev->dev, "the message is NULL.\n");
  775. msg->status = -EINVAL;
  776. msg->actual_length = 0;
  777. goto out;
  778. }
  779. /*if device don't config chip, set default */
  780. if (master->setup(spidev)) {
  781. dev_err(&spidev->dev, "set up error.\n");
  782. msg->status = -EINVAL;
  783. msg->actual_length = 0;
  784. goto out;
  785. }
  786. sprintf(msg_addr, "msgs:%p", msg);
  787. spi_rec_time(msg_addr);
  788. chip_config = (struct mt_chip_conf *)spidev->controller_data;
  789. msg->state = chip_config;
  790. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  791. if (!((xfer->tx_buf || xfer->rx_buf) && xfer->len)) {
  792. dev_err(&spidev->dev, "missing tx %p or rx %p buf, len%d\n", xfer->tx_buf, xfer->rx_buf,
  793. xfer->len);
  794. msg->status = -EINVAL;
  795. goto out;
  796. }
  797. /*
  798. * DMA map early, for performance (empties dcache ASAP) and
  799. * better fault reporting.
  800. *
  801. * NOTE that if dma_unmap_single() ever starts to do work on
  802. * platforms supported by this driver, we would need to clean
  803. * up mappings for previously-mapped transfers.
  804. */
  805. if ((!msg->is_dma_mapped)) {
  806. if (transfer_dma_mapping(ms, chip_config->com_mod, xfer) < 0)
  807. return -ENOMEM;
  808. }
  809. }
  810. #ifdef SPI_VERBOSE
  811. /* list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  812. SPI_INFO(&spidev->dev,"xfer %p: len %04u tx %p/%08x rx %p/%08x\n",
  813. xfer, xfer->len,xfer->tx_buf, xfer->tx_dma, xfer->rx_buf, xfer->rx_dma);
  814. }*/
  815. #endif
  816. msg->status = -EINPROGRESS;
  817. msg->actual_length = 0;
  818. spin_lock_irqsave(&ms->lock, flags);
  819. list_add_tail(&msg->queue, &ms->queue);
  820. SPI_DBG("add msg %p to queue\n", msg);
  821. if (!ms->cur_transfer) {
  822. wake_lock(&ms->wk_lock);
  823. spi_gpio_set(ms);
  824. /*enable_clk(); */
  825. enable_clk(ms);
  826. mt_spi_next_message(ms);
  827. }
  828. spin_unlock_irqrestore(&ms->lock, flags);
  829. return 0;
  830. out:
  831. return -1;
  832. }
  833. static irqreturn_t mt_spi_interrupt(int irq, void *dev_id)
  834. {
  835. struct mt_spi_t *ms = (struct mt_spi_t *)dev_id;
  836. struct spi_message *msg;
  837. struct spi_transfer *xfer;
  838. struct mt_chip_conf *chip_config;
  839. unsigned long flags;
  840. u32 reg_val, cnt;
  841. u8 mode, i;
  842. spi_rec_time("irqs");
  843. spin_lock_irqsave(&ms->lock, flags);
  844. xfer = ms->cur_transfer;
  845. msg = list_entry(ms->queue.next, struct spi_message, queue);
  846. /*Clear interrupt status first by reading the register */
  847. reg_val = spi_readl(ms, SPI_STATUS0_REG);
  848. SPI_DBG("xfer:0x%p interrupt status:%x\n", xfer, reg_val & 0x3);
  849. if (unlikely(!msg)) {
  850. SPI_DBG("msg in interrupt %d is NULL pointer.\n", reg_val & 0x3);
  851. goto out;
  852. }
  853. if (unlikely(!xfer)) {
  854. SPI_DBG("xfer in interrupt %d is NULL pointer.\n", reg_val & 0x3);
  855. goto out;
  856. }
  857. chip_config = (struct mt_chip_conf *)msg->state;
  858. mode = chip_config->com_mod;
  859. /*clear the interrupt status bits by reading the register */
  860. /*
  861. reg_val = spi_readl(ms,SPI_STATUS0_REG);
  862. SPI_DBG("xfer:0x%p interrupt status:%x\n",xfer,reg_val&0x3); */
  863. if ((reg_val & 0x03) == 0)
  864. goto out;
  865. if (!msg->is_dma_mapped)
  866. transfer_dma_unmapping(ms, ms->cur_transfer);
  867. if (is_pause_mode(msg)) {
  868. if (ms->running == INPROGRESS)
  869. ms->running = PAUSED;
  870. else
  871. dev_err(&msg->spi->dev, "Wrong spi status.\n");
  872. } else
  873. ms->running = IDLE;
  874. if (is_fifo_read(msg) && xfer->rx_buf) {
  875. cnt = (xfer->len % 4) ? (xfer->len / 4 + 1) : (xfer->len / 4);
  876. for (i = 0; i < cnt; i++) {
  877. reg_val = spi_readl(ms, SPI_RX_DATA_REG); /*get the data from rx */
  878. SPI_INFO(&msg->spi->dev, "SPI_RX_DATA_REG:0x%x", reg_val);
  879. *((u32 *) xfer->rx_buf + i) = reg_val;
  880. }
  881. }
  882. msg->actual_length += xfer->len;
  883. if (is_last_xfer(msg, xfer)) {
  884. mt_spi_msg_done(ms, msg, 0);
  885. } else {
  886. ms->cur_transfer = ms->next_transfer;
  887. mt_spi_next_xfer(ms, msg);
  888. }
  889. spin_unlock_irqrestore(&ms->lock, flags);
  890. return IRQ_HANDLED;
  891. out:
  892. spin_unlock_irqrestore(&ms->lock, flags);
  893. SPI_DBG("return IRQ_NONE.\n");
  894. return IRQ_NONE;
  895. }
  896. /* Write chip configuration to HW register */
  897. static int mt_do_spi_setup(struct mt_spi_t *ms, struct mt_chip_conf *chip_config)
  898. {
  899. u32 reg_val;
  900. #ifdef SPI_VERBOSE
  901. u32 speed;
  902. #define SPI_MODULE_CLOCK 134300
  903. speed = SPI_MODULE_CLOCK / (chip_config->low_time + chip_config->high_time);
  904. SPI_DBG("mode:%d, speed:%d KHz,CPOL%d,CPHA%d\n", chip_config->com_mod, speed, chip_config->cpol,
  905. chip_config->cpha);
  906. #endif
  907. /*clear RST bits */
  908. /* reg_val = spi_readl ( ms, SPI_CMD_REG );
  909. reg_val &= ~ SPI_CMD_RST_MASK;
  910. spi_writel ( ms, SPI_CMD_REG, reg_val ); */
  911. /*set the timing */
  912. reg_val = spi_readl(ms, SPI_CFG0_REG);
  913. reg_val &= ~(SPI_CFG0_CS_HOLD_MASK | SPI_CFG0_CS_SETUP_MASK);
  914. reg_val |= ((chip_config->holdtime - 1) << SPI_CFG0_CS_HOLD_OFFSET);
  915. reg_val |= ((chip_config->setuptime - 1) << SPI_CFG0_CS_SETUP_OFFSET);
  916. spi_writel(ms, SPI_CFG0_REG, reg_val);
  917. reg_val = spi_readl(ms, SPI_CFG1_REG);
  918. reg_val &= ~(SPI_CFG1_CS_IDLE_MASK);
  919. reg_val |= ((chip_config->cs_idletime - 1) << SPI_CFG1_CS_IDLE_OFFSET);
  920. reg_val &= ~(SPI_CFG1_GET_TICK_DLY_MASK);
  921. reg_val |= ((chip_config->tckdly) << SPI_CFG1_GET_TICK_DLY_OFFSET);
  922. spi_writel(ms, SPI_CFG1_REG, reg_val);
  923. /*set the mlsbx and mlsbtx */
  924. reg_val = spi_readl(ms, SPI_CMD_REG);
  925. reg_val &= ~(SPI_CMD_TX_ENDIAN_MASK | SPI_CMD_RX_ENDIAN_MASK);
  926. reg_val &= ~(SPI_CMD_TXMSBF_MASK | SPI_CMD_RXMSBF_MASK);
  927. reg_val &= ~(SPI_CMD_CPHA_MASK | SPI_CMD_CPOL_MASK);
  928. reg_val |= (chip_config->tx_mlsb << SPI_CMD_TXMSBF_OFFSET);
  929. reg_val |= (chip_config->rx_mlsb << SPI_CMD_RXMSBF_OFFSET);
  930. reg_val |= (chip_config->tx_endian << SPI_CMD_TX_ENDIAN_OFFSET);
  931. reg_val |= (chip_config->rx_endian << SPI_CMD_RX_ENDIAN_OFFSET);
  932. reg_val |= (chip_config->sample_sel << SPI_CMD_SAMPLE_SEL_OFFSET);
  933. reg_val |= (chip_config->cs_pol << SPI_CMD_CS_POL_OFFSET);
  934. reg_val |= (chip_config->cpha << SPI_CMD_CPHA_OFFSET);
  935. reg_val |= (chip_config->cpol << SPI_CMD_CPOL_OFFSET);
  936. spi_writel(ms, SPI_CMD_REG, reg_val);
  937. /*set pause mode */
  938. reg_val = spi_readl(ms, SPI_CMD_REG);
  939. reg_val &= ~SPI_CMD_PAUSE_EN_MASK;
  940. reg_val &= ~SPI_CMD_PAUSE_IE_MASK;
  941. /*if ( chip_config->com_mod == DMA_TRANSFER ) */
  942. reg_val |= (chip_config->pause << SPI_CMD_PAUSE_IE_OFFSET);
  943. spi_writel(ms, SPI_CMD_REG, reg_val);
  944. /*set finish interrupt always enable */
  945. reg_val = spi_readl(ms, SPI_CMD_REG);
  946. reg_val &= ~SPI_CMD_FINISH_IE_MASK;
  947. /*reg_val |= ( chip_config->finish_intr << SPI_CMD_FINISH_IE_OFFSET ); */
  948. reg_val |= (1 << SPI_CMD_FINISH_IE_OFFSET);
  949. spi_writel(ms, SPI_CMD_REG, reg_val);
  950. /*set the communication of mode */
  951. reg_val = spi_readl(ms, SPI_CMD_REG);
  952. reg_val &= ~SPI_CMD_TX_DMA_MASK;
  953. reg_val &= ~SPI_CMD_RX_DMA_MASK;
  954. spi_writel(ms, SPI_CMD_REG, reg_val);
  955. /*set deassert mode */
  956. reg_val = spi_readl(ms, SPI_CMD_REG);
  957. reg_val &= ~SPI_CMD_DEASSERT_MASK;
  958. reg_val |= (chip_config->deassert << SPI_CMD_DEASSERT_OFFSET);
  959. spi_writel(ms, SPI_CMD_REG, reg_val);
  960. /* spi_writel(ms, SPI_PAD_SEL_REG, pad_macro);*/
  961. /*
  962. #if defined(GPIO_SPI_CS_PIN) && defined(GPIO_SPI_SCK_PIN)
  963. &&defined(GPIO_SPI_MISO_PIN) && defined(GPIO_SPI_MOSI_PIN)
  964. spi_writel(ms, SPI_PAD_SEL_REG, 0);
  965. #elif defined(GPIO_SPI2_CS_PIN) && defined(GPIO_SPI2_SCK_PIN)
  966. &&defined(GPIO_SPI2_MISO_PIN) && defined(GPIO_SPI2_MOSI_PIN)
  967. spi_writel(ms, SPI_PAD_SEL_REG, 1);
  968. #endif
  969. */
  970. /*set the timing */
  971. reg_val = spi_readl(ms, SPI_CFG2_REG);
  972. reg_val &= ~(SPI_CFG0_SCK_HIGH_MASK | SPI_CFG0_SCK_LOW_MASK);
  973. reg_val |= ((chip_config->high_time - 1) << SPI_CFG0_SCK_HIGH_OFFSET);
  974. reg_val |= ((chip_config->low_time - 1) << SPI_CFG0_SCK_LOW_OFFSET);
  975. spi_writel(ms, SPI_CFG2_REG, reg_val);
  976. #if 0
  977. /*set ultra high priority */
  978. reg_val &= ~SPI_ULTRA_HIGH_EN_MASK;
  979. reg_val |= chip_config->ulthigh << SPI_ULTRA_HIGH_EN_OFFSET;
  980. reg_val &= ~SPI_ULTRA_HIGH_THRESH_MASK;
  981. reg_val |= (chip_config->ulthgh_thrsh << SPI_ULTRA_HIGH_THRESH_OFFSET);
  982. spi_writel(ms, SPI_ULTRA_HIGH_REG, reg_val);
  983. #endif
  984. return 0;
  985. }
  986. static int mt_spi_setup(struct spi_device *spidev)
  987. {
  988. struct spi_master *master;
  989. struct mt_spi_t *ms;
  990. struct mt_chip_conf *chip_config = NULL;
  991. master = spidev->master;
  992. ms = spi_master_get_devdata(master);
  993. if (!spidev)
  994. dev_err(&spidev->dev, "spi device %s: error.\n", dev_name(&spidev->dev));
  995. if (spidev->chip_select >= master->num_chipselect) {
  996. dev_err(&spidev->dev, "spi device chip select excesses the number of master's chipselect number.\n");
  997. return -EINVAL;
  998. }
  999. chip_config = (struct mt_chip_conf *)spidev->controller_data;
  1000. if (!chip_config) {
  1001. chip_config = kzalloc(sizeof(struct mt_chip_conf), GFP_KERNEL);
  1002. if (!chip_config) {
  1003. dev_err(&spidev->dev, " spidev %s: can not get enough memory.\n", dev_name(&spidev->dev));
  1004. return -ENOMEM;
  1005. }
  1006. SPI_DBG("device %s: set default at chip's runtime state\n", dev_name(&spidev->dev));
  1007. chip_config->setuptime = 3;
  1008. chip_config->holdtime = 3;
  1009. chip_config->high_time = 10;
  1010. chip_config->low_time = 10;
  1011. chip_config->cs_idletime = 2;
  1012. chip_config->ulthgh_thrsh = 0;
  1013. chip_config->cpol = 0;
  1014. chip_config->cpha = 1;
  1015. chip_config->rx_mlsb = 1;
  1016. chip_config->tx_mlsb = 1;
  1017. chip_config->tx_endian = 0;
  1018. chip_config->rx_endian = 0;
  1019. chip_config->com_mod = DMA_TRANSFER;
  1020. chip_config->pause = 0;
  1021. chip_config->finish_intr = 1;
  1022. chip_config->deassert = 0;
  1023. chip_config->ulthigh = 0;
  1024. chip_config->tckdly = 0;
  1025. spidev->controller_data = chip_config;
  1026. }
  1027. SPI_INFO(&spidev->dev, "set up chip config,mode:%d\n", chip_config->com_mod);
  1028. /* #ifdef SPI_REC_DEBUG
  1029. pi_speed = 134300/(chip_config->low_time + chip_config->high_time);
  1030. #endif */
  1031. /*check chip configuration valid */
  1032. ms->config = chip_config;
  1033. if (!((chip_config->pause == PAUSE_MODE_ENABLE) || (chip_config->pause == PAUSE_MODE_DISABLE)) ||
  1034. !((chip_config->cpol == SPI_CPOL_0) || (chip_config->cpol == SPI_CPOL_1)) ||
  1035. !((chip_config->cpha == SPI_CPHA_0) || (chip_config->cpha == SPI_CPHA_1)) ||
  1036. !((chip_config->tx_mlsb == SPI_LSB) || (chip_config->tx_mlsb == SPI_MSB)) ||
  1037. !((chip_config->com_mod == FIFO_TRANSFER) || (chip_config->com_mod == DMA_TRANSFER) ||
  1038. (chip_config->com_mod == OTHER1) || (chip_config->com_mod == OTHER2))) {
  1039. return -EINVAL;
  1040. }
  1041. return 0;
  1042. }
  1043. static void mt_spi_cleanup(struct spi_device *spidev)
  1044. {
  1045. struct spi_master *master;
  1046. struct mt_spi_t *ms;
  1047. master = spidev->master;
  1048. ms = spi_master_get_devdata(master);
  1049. SPI_DBG("Calling mt_spi_cleanup.\n");
  1050. spidev->controller_data = NULL;
  1051. spidev->master = NULL;
  1052. }
  1053. static int __init mt_spi_probe(struct platform_device *pdev)
  1054. {
  1055. int ret = 0;
  1056. int irq;
  1057. struct resource *regs;
  1058. struct spi_master *master;
  1059. struct mt_spi_t *ms;
  1060. #ifdef CONFIG_OF
  1061. void __iomem *spi_base;
  1062. /*unsigned int pin[4];
  1063. unsigned int pin_mode[4];
  1064. unsigned int if_config = 1;
  1065. unsigned int i;*/
  1066. #endif
  1067. master = spi_alloc_master(&pdev->dev, sizeof(struct mt_spi_t));
  1068. ms = spi_master_get_devdata(master);
  1069. regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1070. if (!regs) {
  1071. dev_err(&pdev->dev, "get resource regs NULL.\n");
  1072. return -ENXIO;
  1073. }
  1074. irq = platform_get_irq(pdev, 0);
  1075. if (irq < 0) {
  1076. dev_err(&pdev->dev, "platform_get_irq error. get invalid irq\n");
  1077. return irq;
  1078. }
  1079. if (!request_mem_region(regs->start, resource_size(regs), pdev->name)) {
  1080. dev_err(&pdev->dev, "SPI register memory region failed");
  1081. return -ENOMEM;
  1082. }
  1083. #ifdef CONFIG_OF
  1084. spi_base = of_iomap(pdev->dev.of_node, 0);
  1085. if (!spi_base) {
  1086. dev_err(&pdev->dev, "SPI iomap failed\n");
  1087. return -ENODEV;
  1088. }
  1089. if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
  1090. dev_err(&pdev->dev, "SPI get cell-index failed\n");
  1091. return -ENODEV;
  1092. }
  1093. if (!pdev->dev.dma_mask)
  1094. pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  1095. SPI_DBG("SPI reg: 0x%p irq: %d id: %d\n", spi_base, irq, pdev->id);
  1096. if (pdev->dev.of_node) {
  1097. #if !defined(CONFIG_MTK_CLKMGR)
  1098. /*
  1099. if(of_property_read_u32_index(pdev->dev.of_node,"spi-cs",0,&pin[0]) ||
  1100. of_property_read_u32_index(pdev->dev.of_node,"spi-clk",0,&pin[1]) ||
  1101. of_property_read_u32_index(pdev->dev.of_node,"spi-mo",0,&pin[2]) ||
  1102. of_property_read_u32_index(pdev->dev.of_node,"spi-mi",0,&pin[3]))
  1103. {
  1104. if_config=0;
  1105. dev_err(&pdev->dev, "SPI get spi pin failed\n");
  1106. }
  1107. if(of_property_read_u32_index(pdev->dev.of_node,"spi-cs",1,&pin_mode[0]) ||
  1108. of_property_read_u32_index(pdev->dev.of_node,"spi-clk",1,&pin_mode[1]) ||
  1109. of_property_read_u32_index(pdev->dev.of_node,"spi-mo",1,&pin_mode[2]) ||
  1110. of_property_read_u32_index(pdev->dev.of_node,"spi-mi",1,&pin_mode[3]))
  1111. {
  1112. dev_err(&pdev->dev, "SPI get spi pin mode failed\n");
  1113. }
  1114. if(if_config == 1)
  1115. {
  1116. for(i=0;i<4;i++)
  1117. {
  1118. #ifndef CONFIG_MTK_FPGA
  1119. mt_set_gpio_out((pin[i]|0x80000000),GPIO_OUT_ONE);
  1120. mt_set_gpio_mode((pin[i]|0x80000000), pin_mode[i]);
  1121. #endif
  1122. }
  1123. }
  1124. if (of_property_read_u32(pdev->dev.of_node, "spi-padmacro",
  1125. &pad_macro)) {
  1126. dev_err(&pdev->dev, "SPI get pad macro fail failed\n");
  1127. return -ENODEV;
  1128. }*/
  1129. ms->clk_main = devm_clk_get(&pdev->dev, "spi-main");
  1130. if (IS_ERR(ms->clk_main)) {
  1131. dev_err(&pdev->dev, "cannot get spi1 main clock or dma clock. main clk err : %ld .\n",
  1132. PTR_ERR(ms->clk_main));
  1133. return PTR_ERR(ms->clk_main);
  1134. }
  1135. #endif
  1136. }
  1137. #if 0
  1138. #if !defined(CONFIG_MTK_LEGACY)
  1139. spi_set_gpio_info(pdev, 1);
  1140. #endif
  1141. #endif
  1142. #endif
  1143. /*master = spi_alloc_master(&pdev->dev, sizeof(struct mt_spi_t)); */
  1144. if (!master) {
  1145. dev_err(&pdev->dev, " device %s: alloc spi master fail.\n", dev_name(&pdev->dev));
  1146. goto out;
  1147. }
  1148. /*hardware can only connect 1 slave.if you want to multiple, using gpio CS */
  1149. master->num_chipselect = 2;
  1150. master->mode_bits = (SPI_CPOL | SPI_CPHA);
  1151. master->bus_num = pdev->id;
  1152. master->setup = mt_spi_setup;
  1153. master->transfer = mt_spi_transfer;
  1154. master->cleanup = mt_spi_cleanup;
  1155. platform_set_drvdata(pdev, master);
  1156. /*ms = spi_master_get_devdata(master); */
  1157. #ifdef CONFIG_OF
  1158. ms->regs = spi_base;
  1159. #else
  1160. ms->regs = ioremap(regs->start, resource_size(regs));
  1161. #endif
  1162. ms->pdev = pdev;
  1163. ms->irq = irq;
  1164. ms->running = IDLE;
  1165. ms->cur_transfer = NULL;
  1166. ms->next_transfer = NULL;
  1167. wake_lock_init(&ms->wk_lock, WAKE_LOCK_SUSPEND, "spi_wakelock");
  1168. spin_lock_init(&ms->lock);
  1169. INIT_LIST_HEAD(&ms->queue);
  1170. SPI_INFO(&pdev->dev, "Controller at 0x%p (irq %d)\n", ms->regs, irq);
  1171. #ifdef CONFIG_OF
  1172. ret = request_irq(irq, mt_spi_interrupt, IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ms);
  1173. #else
  1174. ret = request_irq(irq, mt_spi_interrupt, IRQF_TRIGGER_LOW, dev_name(&pdev->dev), ms);
  1175. #endif
  1176. if (ret) {
  1177. dev_err(&pdev->dev, "registering interrupt handler fails.\n");
  1178. goto out;
  1179. }
  1180. spi_master_set_devdata(master, ms);
  1181. #if !defined(CONFIG_MTK_CLKMGR)
  1182. /*
  1183. * prepare the clock source
  1184. */
  1185. ret = clk_prepare(ms->clk_main);
  1186. #endif
  1187. /*
  1188. * enable clk before access spi register
  1189. */
  1190. enable_clk(ms);
  1191. reset_spi(ms);
  1192. /*
  1193. * disable clk when finishing access spi register
  1194. */
  1195. disable_clk(ms);
  1196. ret = spi_register_master(master);
  1197. if (ret) {
  1198. dev_err(&pdev->dev, "spi_register_master fails.\n");
  1199. goto out_free;
  1200. } else {
  1201. SPI_DBG("spi register master success.\n");
  1202. return 0;
  1203. }
  1204. out_free:
  1205. free_irq(irq, ms);
  1206. out:
  1207. spi_master_put(master);
  1208. return ret;
  1209. }
  1210. static int __exit mt_spi_remove(struct platform_device *pdev)
  1211. {
  1212. struct mt_spi_t *ms;
  1213. struct spi_message *msg;
  1214. struct spi_master *master = platform_get_drvdata(pdev);
  1215. if (!master) {
  1216. dev_err(&pdev->dev, "master %s: is invalid.\n", dev_name(&pdev->dev));
  1217. return -EINVAL;
  1218. }
  1219. ms = spi_master_get_devdata(master);
  1220. list_for_each_entry(msg, &ms->queue, queue) {
  1221. msg->status = -ESHUTDOWN;
  1222. msg->complete(msg->context);
  1223. }
  1224. ms->cur_transfer = NULL;
  1225. ms->running = IDLE;
  1226. reset_spi(ms);
  1227. free_irq(ms->irq, master);
  1228. spi_unregister_master(master);
  1229. return 0;
  1230. }
  1231. #ifdef CONFIG_PM
  1232. static int mt_spi_suspend(struct platform_device *pdev, pm_message_t message)
  1233. {
  1234. /* if interrupt is enabled,
  1235. * then wait for interrupt complete. */
  1236. struct mt_spi_t *ms;
  1237. struct spi_master *master = platform_get_drvdata(pdev);
  1238. ms = spi_master_get_devdata(master);
  1239. #if !defined(CONFIG_MTK_CLKMGR)
  1240. /*
  1241. * unprepare the clock source
  1242. */
  1243. clk_unprepare(ms->clk_main);
  1244. SPI_DBG("spi mt_spi_suspend clk_unpreparer success.\n");
  1245. #endif
  1246. return 0;
  1247. }
  1248. static int mt_spi_resume(struct platform_device *pdev)
  1249. {
  1250. #if !defined(CONFIG_MTK_CLKMGR)
  1251. int ret;
  1252. #endif
  1253. struct mt_spi_t *ms;
  1254. struct spi_master *master = platform_get_drvdata(pdev);
  1255. ms = spi_master_get_devdata(master);
  1256. #if !defined(CONFIG_MTK_CLKMGR)
  1257. /*
  1258. * prepare the clock source
  1259. */
  1260. ret = clk_prepare(ms->clk_main);
  1261. SPI_DBG("spi mt_spi_resume clk_prepare success.\n");
  1262. #endif
  1263. return 0;
  1264. }
  1265. #else
  1266. #define mt_spi_suspend NULL
  1267. #define mt_spi_resume NULL
  1268. #endif
  1269. static const struct of_device_id mt_spi_of_match[] = {
  1270. /*{.compatible = "mediatek,SPI1",},*/
  1271. {.compatible = "mediatek,mt6735-spi",},
  1272. {.compatible = "mediatek,mt6735m-spi",},
  1273. {.compatible = "mediatek,mt6753-spi",},
  1274. {.compatible = "mediatek,mt6797-spi",},
  1275. {},
  1276. };
  1277. MODULE_DEVICE_TABLE(of, mt_spi_of_match);
  1278. struct platform_driver mt_spi_driver = {
  1279. .driver = {
  1280. .name = "mt-spi",
  1281. .owner = THIS_MODULE,
  1282. .of_match_table = mt_spi_of_match,
  1283. },
  1284. .probe = mt_spi_probe,
  1285. .suspend = mt_spi_suspend,
  1286. .resume = mt_spi_resume,
  1287. .remove = __exit_p(mt_spi_remove),
  1288. };
  1289. static int __init mt_spi_init(void)
  1290. {
  1291. int ret;
  1292. ret = platform_driver_register(&mt_spi_driver);
  1293. return ret;
  1294. }
  1295. static void __init mt_spi_exit(void)
  1296. {
  1297. platform_driver_unregister(&mt_spi_driver);
  1298. }
  1299. module_init(mt_spi_init);
  1300. module_exit(mt_spi_exit);
  1301. MODULE_DESCRIPTION("mt SPI Controller driver");
  1302. MODULE_AUTHOR("Ranran Lu <ranran.lu@mediatek.com>");
  1303. MODULE_LICENSE("GPL");
  1304. MODULE_ALIAS("platform: mt_spi");