xd.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094
  1. /* Driver for Realtek PCI-Express card reader
  2. *
  3. * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the
  7. * Free Software Foundation; either version 2, or (at your option) any
  8. * later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program; if not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * Author:
  19. * Wei WANG (wei_wang@realsil.com.cn)
  20. * Micky Ching (micky_ching@realsil.com.cn)
  21. */
  22. #include <linux/blkdev.h>
  23. #include <linux/kthread.h>
  24. #include <linux/sched.h>
  25. #include <linux/vmalloc.h>
  26. #include "rtsx.h"
  27. #include "rtsx_transport.h"
  28. #include "rtsx_scsi.h"
  29. #include "rtsx_card.h"
  30. #include "xd.h"
  31. static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no);
  32. static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff,
  33. u8 start_page, u8 end_page);
  34. static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code)
  35. {
  36. struct xd_info *xd_card = &(chip->xd_card);
  37. xd_card->err_code = err_code;
  38. }
  39. static inline int xd_check_err_code(struct rtsx_chip *chip, u8 err_code)
  40. {
  41. struct xd_info *xd_card = &(chip->xd_card);
  42. return (xd_card->err_code == err_code);
  43. }
  44. static int xd_set_init_para(struct rtsx_chip *chip)
  45. {
  46. struct xd_info *xd_card = &(chip->xd_card);
  47. int retval;
  48. if (chip->asic_code)
  49. xd_card->xd_clock = 47;
  50. else
  51. xd_card->xd_clock = CLK_50;
  52. retval = switch_clock(chip, xd_card->xd_clock);
  53. if (retval != STATUS_SUCCESS)
  54. TRACE_RET(chip, STATUS_FAIL);
  55. return STATUS_SUCCESS;
  56. }
  57. static int xd_switch_clock(struct rtsx_chip *chip)
  58. {
  59. struct xd_info *xd_card = &(chip->xd_card);
  60. int retval;
  61. retval = select_card(chip, XD_CARD);
  62. if (retval != STATUS_SUCCESS)
  63. TRACE_RET(chip, STATUS_FAIL);
  64. retval = switch_clock(chip, xd_card->xd_clock);
  65. if (retval != STATUS_SUCCESS)
  66. TRACE_RET(chip, STATUS_FAIL);
  67. return STATUS_SUCCESS;
  68. }
  69. static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
  70. {
  71. int retval, i;
  72. u8 *ptr;
  73. rtsx_init_cmd(chip);
  74. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
  75. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
  76. XD_TRANSFER_START | XD_READ_ID);
  77. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
  78. XD_TRANSFER_END);
  79. for (i = 0; i < 4; i++)
  80. rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0);
  81. retval = rtsx_send_cmd(chip, XD_CARD, 20);
  82. if (retval < 0)
  83. TRACE_RET(chip, STATUS_FAIL);
  84. ptr = rtsx_get_cmd_data(chip) + 1;
  85. if (id_buf && buf_len) {
  86. if (buf_len > 4)
  87. buf_len = 4;
  88. memcpy(id_buf, ptr, buf_len);
  89. }
  90. return STATUS_SUCCESS;
  91. }
  92. static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
  93. {
  94. struct xd_info *xd_card = &(chip->xd_card);
  95. switch (mode) {
  96. case XD_RW_ADDR:
  97. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
  98. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr);
  99. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
  100. 0xFF, (u8)(addr >> 8));
  101. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3,
  102. 0xFF, (u8)(addr >> 16));
  103. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
  104. xd_card->addr_cycle | XD_CALC_ECC | XD_BA_NO_TRANSFORM);
  105. break;
  106. case XD_ERASE_ADDR:
  107. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr);
  108. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1,
  109. 0xFF, (u8)(addr >> 8));
  110. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
  111. 0xFF, (u8)(addr >> 16));
  112. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
  113. (xd_card->addr_cycle - 1) | XD_CALC_ECC |
  114. XD_BA_NO_TRANSFORM);
  115. break;
  116. default:
  117. break;
  118. }
  119. }
  120. static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
  121. u8 *buf, int buf_len)
  122. {
  123. int retval, i;
  124. rtsx_init_cmd(chip);
  125. xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
  126. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
  127. 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
  128. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  129. XD_TRANSFER_END, XD_TRANSFER_END);
  130. for (i = 0; i < 6; i++)
  131. rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i),
  132. 0, 0);
  133. for (i = 0; i < 4; i++)
  134. rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i),
  135. 0, 0);
  136. rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
  137. retval = rtsx_send_cmd(chip, XD_CARD, 500);
  138. if (retval < 0)
  139. TRACE_RET(chip, STATUS_FAIL);
  140. if (buf && buf_len) {
  141. u8 *ptr = rtsx_get_cmd_data(chip) + 1;
  142. if (buf_len > 11)
  143. buf_len = 11;
  144. memcpy(buf, ptr, buf_len);
  145. }
  146. return STATUS_SUCCESS;
  147. }
  148. static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
  149. u8 *buf, int buf_len)
  150. {
  151. int retval, i;
  152. if (!buf || (buf_len < 0))
  153. TRACE_RET(chip, STATUS_FAIL);
  154. rtsx_init_cmd(chip);
  155. for (i = 0; i < buf_len; i++)
  156. rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i,
  157. 0, 0);
  158. retval = rtsx_send_cmd(chip, 0, 250);
  159. if (retval < 0) {
  160. rtsx_clear_xd_error(chip);
  161. TRACE_RET(chip, STATUS_FAIL);
  162. }
  163. memcpy(buf, rtsx_get_cmd_data(chip), buf_len);
  164. return STATUS_SUCCESS;
  165. }
  166. static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
  167. int buf_len)
  168. {
  169. int retval;
  170. u8 reg;
  171. if (!buf || (buf_len < 10))
  172. TRACE_RET(chip, STATUS_FAIL);
  173. rtsx_init_cmd(chip);
  174. xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
  175. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
  176. 0x01, PINGPONG_BUFFER);
  177. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
  178. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
  179. XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
  180. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
  181. XD_TRANSFER_START | XD_READ_PAGES);
  182. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
  183. XD_TRANSFER_END);
  184. retval = rtsx_send_cmd(chip, XD_CARD, 250);
  185. if (retval == -ETIMEDOUT) {
  186. rtsx_clear_xd_error(chip);
  187. TRACE_RET(chip, STATUS_FAIL);
  188. }
  189. RTSX_READ_REG(chip, XD_PAGE_STATUS, &reg);
  190. if (reg != XD_GPG) {
  191. rtsx_clear_xd_error(chip);
  192. TRACE_RET(chip, STATUS_FAIL);
  193. }
  194. RTSX_READ_REG(chip, XD_CTL, &reg);
  195. if (!(reg & XD_ECC1_ERROR) || !(reg & XD_ECC1_UNCORRECTABLE)) {
  196. retval = xd_read_data_from_ppb(chip, 0, buf, buf_len);
  197. if (retval != STATUS_SUCCESS)
  198. TRACE_RET(chip, STATUS_FAIL);
  199. if (reg & XD_ECC1_ERROR) {
  200. u8 ecc_bit, ecc_byte;
  201. RTSX_READ_REG(chip, XD_ECC_BIT1, &ecc_bit);
  202. RTSX_READ_REG(chip, XD_ECC_BYTE1, &ecc_byte);
  203. dev_dbg(rtsx_dev(chip), "ECC_BIT1 = 0x%x, ECC_BYTE1 = 0x%x\n",
  204. ecc_bit, ecc_byte);
  205. if (ecc_byte < buf_len) {
  206. dev_dbg(rtsx_dev(chip), "Before correct: 0x%x\n",
  207. buf[ecc_byte]);
  208. buf[ecc_byte] ^= (1 << ecc_bit);
  209. dev_dbg(rtsx_dev(chip), "After correct: 0x%x\n",
  210. buf[ecc_byte]);
  211. }
  212. }
  213. } else if (!(reg & XD_ECC2_ERROR) || !(reg & XD_ECC2_UNCORRECTABLE)) {
  214. rtsx_clear_xd_error(chip);
  215. retval = xd_read_data_from_ppb(chip, 256, buf, buf_len);
  216. if (retval != STATUS_SUCCESS)
  217. TRACE_RET(chip, STATUS_FAIL);
  218. if (reg & XD_ECC2_ERROR) {
  219. u8 ecc_bit, ecc_byte;
  220. RTSX_READ_REG(chip, XD_ECC_BIT2, &ecc_bit);
  221. RTSX_READ_REG(chip, XD_ECC_BYTE2, &ecc_byte);
  222. dev_dbg(rtsx_dev(chip), "ECC_BIT2 = 0x%x, ECC_BYTE2 = 0x%x\n",
  223. ecc_bit, ecc_byte);
  224. if (ecc_byte < buf_len) {
  225. dev_dbg(rtsx_dev(chip), "Before correct: 0x%x\n",
  226. buf[ecc_byte]);
  227. buf[ecc_byte] ^= (1 << ecc_bit);
  228. dev_dbg(rtsx_dev(chip), "After correct: 0x%x\n",
  229. buf[ecc_byte]);
  230. }
  231. }
  232. } else {
  233. rtsx_clear_xd_error(chip);
  234. TRACE_RET(chip, STATUS_FAIL);
  235. }
  236. return STATUS_SUCCESS;
  237. }
  238. static void xd_fill_pull_ctl_disable(struct rtsx_chip *chip)
  239. {
  240. if (CHECK_PID(chip, 0x5208)) {
  241. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
  242. XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
  243. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
  244. XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
  245. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
  246. XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
  247. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
  248. XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
  249. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
  250. MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
  251. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
  252. MS_D5_PD | MS_D4_PD);
  253. } else if (CHECK_PID(chip, 0x5288)) {
  254. if (CHECK_BARO_PKG(chip, QFN)) {
  255. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
  256. 0xFF, 0x55);
  257. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
  258. 0xFF, 0x55);
  259. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
  260. 0xFF, 0x4B);
  261. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
  262. 0xFF, 0x69);
  263. }
  264. }
  265. }
  266. static void xd_fill_pull_ctl_stage1_barossa(struct rtsx_chip *chip)
  267. {
  268. if (CHECK_BARO_PKG(chip, QFN)) {
  269. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
  270. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
  271. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x4B);
  272. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
  273. }
  274. }
  275. static void xd_fill_pull_ctl_enable(struct rtsx_chip *chip)
  276. {
  277. if (CHECK_PID(chip, 0x5208)) {
  278. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
  279. XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
  280. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
  281. XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
  282. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
  283. XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
  284. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
  285. XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
  286. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
  287. MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
  288. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
  289. MS_D5_PD | MS_D4_PD);
  290. } else if (CHECK_PID(chip, 0x5288)) {
  291. if (CHECK_BARO_PKG(chip, QFN)) {
  292. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
  293. 0xFF, 0x55);
  294. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
  295. 0xFF, 0x55);
  296. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
  297. 0xFF, 0x53);
  298. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
  299. 0xFF, 0xA9);
  300. }
  301. }
  302. }
  303. static int xd_pull_ctl_disable(struct rtsx_chip *chip)
  304. {
  305. if (CHECK_PID(chip, 0x5208)) {
  306. RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF,
  307. XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
  308. RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF,
  309. XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
  310. RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF,
  311. XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
  312. RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF,
  313. XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
  314. RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
  315. MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
  316. RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF, MS_D5_PD | MS_D4_PD);
  317. } else if (CHECK_PID(chip, 0x5288)) {
  318. if (CHECK_BARO_PKG(chip, QFN)) {
  319. RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55);
  320. RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55);
  321. RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0x4B);
  322. RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x69);
  323. }
  324. }
  325. return STATUS_SUCCESS;
  326. }
  327. static int reset_xd(struct rtsx_chip *chip)
  328. {
  329. struct xd_info *xd_card = &(chip->xd_card);
  330. int retval, i, j;
  331. u8 *ptr, id_buf[4], redunt[11];
  332. retval = select_card(chip, XD_CARD);
  333. if (retval != STATUS_SUCCESS)
  334. TRACE_RET(chip, STATUS_FAIL);
  335. rtsx_init_cmd(chip);
  336. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
  337. XD_PGSTS_NOT_FF);
  338. if (chip->asic_code) {
  339. if (!CHECK_PID(chip, 0x5288))
  340. xd_fill_pull_ctl_disable(chip);
  341. else
  342. xd_fill_pull_ctl_stage1_barossa(chip);
  343. } else {
  344. rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
  345. (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) | 0x20);
  346. }
  347. if (!chip->ft2_fast_mode)
  348. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT,
  349. XD_NO_AUTO_PWR_OFF, 0);
  350. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
  351. retval = rtsx_send_cmd(chip, XD_CARD, 100);
  352. if (retval < 0)
  353. TRACE_RET(chip, STATUS_FAIL);
  354. if (!chip->ft2_fast_mode) {
  355. retval = card_power_off(chip, XD_CARD);
  356. if (retval != STATUS_SUCCESS)
  357. TRACE_RET(chip, STATUS_FAIL);
  358. wait_timeout(250);
  359. rtsx_init_cmd(chip);
  360. if (chip->asic_code) {
  361. xd_fill_pull_ctl_enable(chip);
  362. } else {
  363. rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
  364. (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
  365. 0x20);
  366. }
  367. retval = rtsx_send_cmd(chip, XD_CARD, 100);
  368. if (retval < 0)
  369. TRACE_RET(chip, STATUS_FAIL);
  370. retval = card_power_on(chip, XD_CARD);
  371. if (retval != STATUS_SUCCESS)
  372. TRACE_RET(chip, STATUS_FAIL);
  373. #ifdef SUPPORT_OCP
  374. wait_timeout(50);
  375. if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
  376. dev_dbg(rtsx_dev(chip), "Over current, OCPSTAT is 0x%x\n",
  377. chip->ocp_stat);
  378. TRACE_RET(chip, STATUS_FAIL);
  379. }
  380. #endif
  381. }
  382. rtsx_init_cmd(chip);
  383. if (chip->ft2_fast_mode) {
  384. if (chip->asic_code) {
  385. xd_fill_pull_ctl_enable(chip);
  386. } else {
  387. rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
  388. (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
  389. 0x20);
  390. }
  391. }
  392. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, XD_OUTPUT_EN);
  393. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CTL, XD_CE_DISEN, XD_CE_DISEN);
  394. retval = rtsx_send_cmd(chip, XD_CARD, 100);
  395. if (retval < 0)
  396. TRACE_RET(chip, STATUS_FAIL);
  397. if (!chip->ft2_fast_mode)
  398. wait_timeout(200);
  399. retval = xd_set_init_para(chip);
  400. if (retval != STATUS_SUCCESS)
  401. TRACE_RET(chip, STATUS_FAIL);
  402. /* Read ID to check if the timing setting is right */
  403. for (i = 0; i < 4; i++) {
  404. rtsx_init_cmd(chip);
  405. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
  406. XD_TIME_SETUP_STEP * 3 +
  407. XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
  408. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
  409. XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 + i) +
  410. XD_TIME_RWN_STEP * (3 + i));
  411. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
  412. XD_TRANSFER_START | XD_RESET);
  413. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  414. XD_TRANSFER_END, XD_TRANSFER_END);
  415. rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
  416. rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
  417. retval = rtsx_send_cmd(chip, XD_CARD, 100);
  418. if (retval < 0)
  419. TRACE_RET(chip, STATUS_FAIL);
  420. ptr = rtsx_get_cmd_data(chip) + 1;
  421. dev_dbg(rtsx_dev(chip), "XD_DAT: 0x%x, XD_CTL: 0x%x\n",
  422. ptr[0], ptr[1]);
  423. if (((ptr[0] & READY_FLAG) != READY_STATE) ||
  424. !(ptr[1] & XD_RDY))
  425. continue;
  426. retval = xd_read_id(chip, READ_ID, id_buf, 4);
  427. if (retval != STATUS_SUCCESS)
  428. TRACE_RET(chip, STATUS_FAIL);
  429. dev_dbg(rtsx_dev(chip), "READ_ID: 0x%x 0x%x 0x%x 0x%x\n",
  430. id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
  431. xd_card->device_code = id_buf[1];
  432. /* Check if the xD card is supported */
  433. switch (xd_card->device_code) {
  434. case XD_4M_X8_512_1:
  435. case XD_4M_X8_512_2:
  436. xd_card->block_shift = 4;
  437. xd_card->page_off = 0x0F;
  438. xd_card->addr_cycle = 3;
  439. xd_card->zone_cnt = 1;
  440. xd_card->capacity = 8000;
  441. XD_SET_4MB(xd_card);
  442. break;
  443. case XD_8M_X8_512:
  444. xd_card->block_shift = 4;
  445. xd_card->page_off = 0x0F;
  446. xd_card->addr_cycle = 3;
  447. xd_card->zone_cnt = 1;
  448. xd_card->capacity = 16000;
  449. break;
  450. case XD_16M_X8_512:
  451. XD_PAGE_512(xd_card);
  452. xd_card->addr_cycle = 3;
  453. xd_card->zone_cnt = 1;
  454. xd_card->capacity = 32000;
  455. break;
  456. case XD_32M_X8_512:
  457. XD_PAGE_512(xd_card);
  458. xd_card->addr_cycle = 3;
  459. xd_card->zone_cnt = 2;
  460. xd_card->capacity = 64000;
  461. break;
  462. case XD_64M_X8_512:
  463. XD_PAGE_512(xd_card);
  464. xd_card->addr_cycle = 4;
  465. xd_card->zone_cnt = 4;
  466. xd_card->capacity = 128000;
  467. break;
  468. case XD_128M_X8_512:
  469. XD_PAGE_512(xd_card);
  470. xd_card->addr_cycle = 4;
  471. xd_card->zone_cnt = 8;
  472. xd_card->capacity = 256000;
  473. break;
  474. case XD_256M_X8_512:
  475. XD_PAGE_512(xd_card);
  476. xd_card->addr_cycle = 4;
  477. xd_card->zone_cnt = 16;
  478. xd_card->capacity = 512000;
  479. break;
  480. case XD_512M_X8:
  481. XD_PAGE_512(xd_card);
  482. xd_card->addr_cycle = 4;
  483. xd_card->zone_cnt = 32;
  484. xd_card->capacity = 1024000;
  485. break;
  486. case xD_1G_X8_512:
  487. XD_PAGE_512(xd_card);
  488. xd_card->addr_cycle = 4;
  489. xd_card->zone_cnt = 64;
  490. xd_card->capacity = 2048000;
  491. break;
  492. case xD_2G_X8_512:
  493. XD_PAGE_512(xd_card);
  494. xd_card->addr_cycle = 4;
  495. xd_card->zone_cnt = 128;
  496. xd_card->capacity = 4096000;
  497. break;
  498. default:
  499. continue;
  500. }
  501. /* Confirm timing setting */
  502. for (j = 0; j < 10; j++) {
  503. retval = xd_read_id(chip, READ_ID, id_buf, 4);
  504. if (retval != STATUS_SUCCESS)
  505. TRACE_RET(chip, STATUS_FAIL);
  506. if (id_buf[1] != xd_card->device_code)
  507. break;
  508. }
  509. if (j == 10)
  510. break;
  511. }
  512. if (i == 4) {
  513. xd_card->block_shift = 0;
  514. xd_card->page_off = 0;
  515. xd_card->addr_cycle = 0;
  516. xd_card->capacity = 0;
  517. TRACE_RET(chip, STATUS_FAIL);
  518. }
  519. retval = xd_read_id(chip, READ_xD_ID, id_buf, 4);
  520. if (retval != STATUS_SUCCESS)
  521. TRACE_RET(chip, STATUS_FAIL);
  522. dev_dbg(rtsx_dev(chip), "READ_xD_ID: 0x%x 0x%x 0x%x 0x%x\n",
  523. id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
  524. if (id_buf[2] != XD_ID_CODE)
  525. TRACE_RET(chip, STATUS_FAIL);
  526. /* Search CIS block */
  527. for (i = 0; i < 24; i++) {
  528. u32 page_addr;
  529. if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS)
  530. TRACE_RET(chip, STATUS_FAIL);
  531. page_addr = (u32)i << xd_card->block_shift;
  532. for (j = 0; j < 3; j++) {
  533. retval = xd_read_redundant(chip, page_addr, redunt, 11);
  534. if (retval == STATUS_SUCCESS)
  535. break;
  536. }
  537. if (j == 3)
  538. continue;
  539. if (redunt[BLOCK_STATUS] != XD_GBLK)
  540. continue;
  541. j = 0;
  542. if (redunt[PAGE_STATUS] != XD_GPG) {
  543. for (j = 1; j <= 8; j++) {
  544. retval = xd_read_redundant(chip, page_addr + j,
  545. redunt, 11);
  546. if (retval == STATUS_SUCCESS) {
  547. if (redunt[PAGE_STATUS] == XD_GPG)
  548. break;
  549. }
  550. }
  551. if (j == 9)
  552. break;
  553. }
  554. /* Check CIS data */
  555. if ((redunt[BLOCK_STATUS] == XD_GBLK) &&
  556. (redunt[PARITY] & XD_BA1_ALL0)) {
  557. u8 buf[10];
  558. page_addr += j;
  559. retval = xd_read_cis(chip, page_addr, buf, 10);
  560. if (retval != STATUS_SUCCESS)
  561. TRACE_RET(chip, STATUS_FAIL);
  562. if ((buf[0] == 0x01) && (buf[1] == 0x03) &&
  563. (buf[2] == 0xD9)
  564. && (buf[3] == 0x01) && (buf[4] == 0xFF)
  565. && (buf[5] == 0x18) && (buf[6] == 0x02)
  566. && (buf[7] == 0xDF) && (buf[8] == 0x01)
  567. && (buf[9] == 0x20)) {
  568. xd_card->cis_block = (u16)i;
  569. }
  570. }
  571. break;
  572. }
  573. dev_dbg(rtsx_dev(chip), "CIS block: 0x%x\n", xd_card->cis_block);
  574. if (xd_card->cis_block == 0xFFFF)
  575. TRACE_RET(chip, STATUS_FAIL);
  576. chip->capacity[chip->card2lun[XD_CARD]] = xd_card->capacity;
  577. return STATUS_SUCCESS;
  578. }
  579. static int xd_check_data_blank(u8 *redunt)
  580. {
  581. int i;
  582. for (i = 0; i < 6; i++) {
  583. if (redunt[PAGE_STATUS + i] != 0xFF)
  584. return 0;
  585. }
  586. if ((redunt[PARITY] & (XD_ECC1_ALL1 | XD_ECC2_ALL1))
  587. != (XD_ECC1_ALL1 | XD_ECC2_ALL1))
  588. return 0;
  589. for (i = 0; i < 4; i++) {
  590. if (redunt[RESERVED0 + i] != 0xFF)
  591. return 0;
  592. }
  593. return 1;
  594. }
  595. static u16 xd_load_log_block_addr(u8 *redunt)
  596. {
  597. u16 addr = 0xFFFF;
  598. if (redunt[PARITY] & XD_BA1_BA2_EQL)
  599. addr = ((u16)redunt[BLOCK_ADDR1_H] << 8) |
  600. redunt[BLOCK_ADDR1_L];
  601. else if (redunt[PARITY] & XD_BA1_VALID)
  602. addr = ((u16)redunt[BLOCK_ADDR1_H] << 8) |
  603. redunt[BLOCK_ADDR1_L];
  604. else if (redunt[PARITY] & XD_BA2_VALID)
  605. addr = ((u16)redunt[BLOCK_ADDR2_H] << 8) |
  606. redunt[BLOCK_ADDR2_L];
  607. return addr;
  608. }
  609. static int xd_init_l2p_tbl(struct rtsx_chip *chip)
  610. {
  611. struct xd_info *xd_card = &(chip->xd_card);
  612. int size, i;
  613. dev_dbg(rtsx_dev(chip), "xd_init_l2p_tbl: zone_cnt = %d\n",
  614. xd_card->zone_cnt);
  615. if (xd_card->zone_cnt < 1)
  616. TRACE_RET(chip, STATUS_FAIL);
  617. size = xd_card->zone_cnt * sizeof(struct zone_entry);
  618. dev_dbg(rtsx_dev(chip), "Buffer size for l2p table is %d\n", size);
  619. xd_card->zone = vmalloc(size);
  620. if (!xd_card->zone)
  621. TRACE_RET(chip, STATUS_ERROR);
  622. for (i = 0; i < xd_card->zone_cnt; i++) {
  623. xd_card->zone[i].build_flag = 0;
  624. xd_card->zone[i].l2p_table = NULL;
  625. xd_card->zone[i].free_table = NULL;
  626. xd_card->zone[i].get_index = 0;
  627. xd_card->zone[i].set_index = 0;
  628. xd_card->zone[i].unused_blk_cnt = 0;
  629. }
  630. return STATUS_SUCCESS;
  631. }
  632. static inline void free_zone(struct zone_entry *zone)
  633. {
  634. if (!zone)
  635. return;
  636. zone->build_flag = 0;
  637. zone->set_index = 0;
  638. zone->get_index = 0;
  639. zone->unused_blk_cnt = 0;
  640. if (zone->l2p_table) {
  641. vfree(zone->l2p_table);
  642. zone->l2p_table = NULL;
  643. }
  644. if (zone->free_table) {
  645. vfree(zone->free_table);
  646. zone->free_table = NULL;
  647. }
  648. }
  649. static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
  650. {
  651. struct xd_info *xd_card = &(chip->xd_card);
  652. struct zone_entry *zone;
  653. int zone_no;
  654. zone_no = (int)phy_blk >> 10;
  655. if (zone_no >= xd_card->zone_cnt) {
  656. dev_dbg(rtsx_dev(chip), "Set unused block to invalid zone (zone_no = %d, zone_cnt = %d)\n",
  657. zone_no, xd_card->zone_cnt);
  658. return;
  659. }
  660. zone = &(xd_card->zone[zone_no]);
  661. if (zone->free_table == NULL) {
  662. if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
  663. return;
  664. }
  665. if ((zone->set_index >= XD_FREE_TABLE_CNT)
  666. || (zone->set_index < 0)) {
  667. free_zone(zone);
  668. dev_dbg(rtsx_dev(chip), "Set unused block fail, invalid set_index\n");
  669. return;
  670. }
  671. dev_dbg(rtsx_dev(chip), "Set unused block to index %d\n",
  672. zone->set_index);
  673. zone->free_table[zone->set_index++] = (u16) (phy_blk & 0x3ff);
  674. if (zone->set_index >= XD_FREE_TABLE_CNT)
  675. zone->set_index = 0;
  676. zone->unused_blk_cnt++;
  677. }
  678. static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
  679. {
  680. struct xd_info *xd_card = &(chip->xd_card);
  681. struct zone_entry *zone;
  682. u32 phy_blk;
  683. if (zone_no >= xd_card->zone_cnt) {
  684. dev_dbg(rtsx_dev(chip), "Get unused block from invalid zone (zone_no = %d, zone_cnt = %d)\n",
  685. zone_no, xd_card->zone_cnt);
  686. return BLK_NOT_FOUND;
  687. }
  688. zone = &(xd_card->zone[zone_no]);
  689. if ((zone->unused_blk_cnt == 0) ||
  690. (zone->set_index == zone->get_index)) {
  691. free_zone(zone);
  692. dev_dbg(rtsx_dev(chip), "Get unused block fail, no unused block available\n");
  693. return BLK_NOT_FOUND;
  694. }
  695. if ((zone->get_index >= XD_FREE_TABLE_CNT) || (zone->get_index < 0)) {
  696. free_zone(zone);
  697. dev_dbg(rtsx_dev(chip), "Get unused block fail, invalid get_index\n");
  698. return BLK_NOT_FOUND;
  699. }
  700. dev_dbg(rtsx_dev(chip), "Get unused block from index %d\n",
  701. zone->get_index);
  702. phy_blk = zone->free_table[zone->get_index];
  703. zone->free_table[zone->get_index++] = 0xFFFF;
  704. if (zone->get_index >= XD_FREE_TABLE_CNT)
  705. zone->get_index = 0;
  706. zone->unused_blk_cnt--;
  707. phy_blk += ((u32)(zone_no) << 10);
  708. return phy_blk;
  709. }
  710. static void xd_set_l2p_tbl(struct rtsx_chip *chip,
  711. int zone_no, u16 log_off, u16 phy_off)
  712. {
  713. struct xd_info *xd_card = &(chip->xd_card);
  714. struct zone_entry *zone;
  715. zone = &(xd_card->zone[zone_no]);
  716. zone->l2p_table[log_off] = phy_off;
  717. }
  718. static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
  719. {
  720. struct xd_info *xd_card = &(chip->xd_card);
  721. struct zone_entry *zone;
  722. int retval;
  723. zone = &(xd_card->zone[zone_no]);
  724. if (zone->l2p_table[log_off] == 0xFFFF) {
  725. u32 phy_blk = 0;
  726. int i;
  727. #ifdef XD_DELAY_WRITE
  728. retval = xd_delay_write(chip);
  729. if (retval != STATUS_SUCCESS) {
  730. dev_dbg(rtsx_dev(chip), "In xd_get_l2p_tbl, delay write fail!\n");
  731. return BLK_NOT_FOUND;
  732. }
  733. #endif
  734. if (zone->unused_blk_cnt <= 0) {
  735. dev_dbg(rtsx_dev(chip), "No unused block!\n");
  736. return BLK_NOT_FOUND;
  737. }
  738. for (i = 0; i < zone->unused_blk_cnt; i++) {
  739. phy_blk = xd_get_unused_block(chip, zone_no);
  740. if (phy_blk == BLK_NOT_FOUND) {
  741. dev_dbg(rtsx_dev(chip), "No unused block available!\n");
  742. return BLK_NOT_FOUND;
  743. }
  744. retval = xd_init_page(chip, phy_blk, log_off,
  745. 0, xd_card->page_off + 1);
  746. if (retval == STATUS_SUCCESS)
  747. break;
  748. }
  749. if (i >= zone->unused_blk_cnt) {
  750. dev_dbg(rtsx_dev(chip), "No good unused block available!\n");
  751. return BLK_NOT_FOUND;
  752. }
  753. xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(phy_blk & 0x3FF));
  754. return phy_blk;
  755. }
  756. return (u32)zone->l2p_table[log_off] + ((u32)(zone_no) << 10);
  757. }
  758. int reset_xd_card(struct rtsx_chip *chip)
  759. {
  760. struct xd_info *xd_card = &(chip->xd_card);
  761. int retval;
  762. memset(xd_card, 0, sizeof(struct xd_info));
  763. xd_card->block_shift = 0;
  764. xd_card->page_off = 0;
  765. xd_card->addr_cycle = 0;
  766. xd_card->capacity = 0;
  767. xd_card->zone_cnt = 0;
  768. xd_card->cis_block = 0xFFFF;
  769. xd_card->delay_write.delay_write_flag = 0;
  770. retval = enable_card_clock(chip, XD_CARD);
  771. if (retval != STATUS_SUCCESS)
  772. TRACE_RET(chip, STATUS_FAIL);
  773. retval = reset_xd(chip);
  774. if (retval != STATUS_SUCCESS)
  775. TRACE_RET(chip, STATUS_FAIL);
  776. retval = xd_init_l2p_tbl(chip);
  777. if (retval != STATUS_SUCCESS)
  778. TRACE_RET(chip, STATUS_FAIL);
  779. return STATUS_SUCCESS;
  780. }
  781. static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
  782. {
  783. struct xd_info *xd_card = &(chip->xd_card);
  784. int retval;
  785. u32 page_addr;
  786. u8 reg = 0;
  787. dev_dbg(rtsx_dev(chip), "mark block 0x%x as bad block\n", phy_blk);
  788. if (phy_blk == BLK_NOT_FOUND)
  789. TRACE_RET(chip, STATUS_FAIL);
  790. rtsx_init_cmd(chip);
  791. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
  792. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_LATER_BBLK);
  793. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF, 0xFF);
  794. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, 0xFF);
  795. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_H, 0xFF, 0xFF);
  796. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_L, 0xFF, 0xFF);
  797. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED0, 0xFF, 0xFF);
  798. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED1, 0xFF, 0xFF);
  799. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED2, 0xFF, 0xFF);
  800. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED3, 0xFF, 0xFF);
  801. page_addr = phy_blk << xd_card->block_shift;
  802. xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
  803. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
  804. xd_card->page_off + 1);
  805. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
  806. XD_TRANSFER_START | XD_WRITE_REDUNDANT);
  807. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  808. XD_TRANSFER_END, XD_TRANSFER_END);
  809. retval = rtsx_send_cmd(chip, XD_CARD, 500);
  810. if (retval < 0) {
  811. rtsx_clear_xd_error(chip);
  812. rtsx_read_register(chip, XD_DAT, &reg);
  813. if (reg & PROGRAM_ERROR)
  814. xd_set_err_code(chip, XD_PRG_ERROR);
  815. else
  816. xd_set_err_code(chip, XD_TO_ERROR);
  817. TRACE_RET(chip, STATUS_FAIL);
  818. }
  819. return STATUS_SUCCESS;
  820. }
  821. static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
  822. u16 logoff, u8 start_page, u8 end_page)
  823. {
  824. struct xd_info *xd_card = &(chip->xd_card);
  825. int retval;
  826. u32 page_addr;
  827. u8 reg = 0;
  828. dev_dbg(rtsx_dev(chip), "Init block 0x%x\n", phy_blk);
  829. if (start_page > end_page)
  830. TRACE_RET(chip, STATUS_FAIL);
  831. if (phy_blk == BLK_NOT_FOUND)
  832. TRACE_RET(chip, STATUS_FAIL);
  833. rtsx_init_cmd(chip);
  834. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
  835. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
  836. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
  837. 0xFF, (u8)(logoff >> 8));
  838. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff);
  839. page_addr = (phy_blk << xd_card->block_shift) + start_page;
  840. xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
  841. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG,
  842. XD_BA_TRANSFORM, XD_BA_TRANSFORM);
  843. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT,
  844. 0xFF, (end_page - start_page));
  845. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
  846. 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
  847. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  848. XD_TRANSFER_END, XD_TRANSFER_END);
  849. retval = rtsx_send_cmd(chip, XD_CARD, 500);
  850. if (retval < 0) {
  851. rtsx_clear_xd_error(chip);
  852. rtsx_read_register(chip, XD_DAT, &reg);
  853. if (reg & PROGRAM_ERROR) {
  854. xd_mark_bad_block(chip, phy_blk);
  855. xd_set_err_code(chip, XD_PRG_ERROR);
  856. } else {
  857. xd_set_err_code(chip, XD_TO_ERROR);
  858. }
  859. TRACE_RET(chip, STATUS_FAIL);
  860. }
  861. return STATUS_SUCCESS;
  862. }
  863. static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
  864. u8 start_page, u8 end_page)
  865. {
  866. struct xd_info *xd_card = &(chip->xd_card);
  867. u32 old_page, new_page;
  868. u8 i, reg = 0;
  869. int retval;
  870. dev_dbg(rtsx_dev(chip), "Copy page from block 0x%x to block 0x%x\n",
  871. old_blk, new_blk);
  872. if (start_page > end_page)
  873. TRACE_RET(chip, STATUS_FAIL);
  874. if ((old_blk == BLK_NOT_FOUND) || (new_blk == BLK_NOT_FOUND))
  875. TRACE_RET(chip, STATUS_FAIL);
  876. old_page = (old_blk << xd_card->block_shift) + start_page;
  877. new_page = (new_blk << xd_card->block_shift) + start_page;
  878. XD_CLR_BAD_NEWBLK(xd_card);
  879. RTSX_WRITE_REG(chip, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
  880. for (i = start_page; i < end_page; i++) {
  881. if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
  882. rtsx_clear_xd_error(chip);
  883. xd_set_err_code(chip, XD_NO_CARD);
  884. TRACE_RET(chip, STATUS_FAIL);
  885. }
  886. rtsx_init_cmd(chip);
  887. xd_assign_phy_addr(chip, old_page, XD_RW_ADDR);
  888. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
  889. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
  890. XD_AUTO_CHK_DATA_STATUS, 0);
  891. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
  892. XD_TRANSFER_START | XD_READ_PAGES);
  893. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  894. XD_TRANSFER_END, XD_TRANSFER_END);
  895. retval = rtsx_send_cmd(chip, XD_CARD, 500);
  896. if (retval < 0) {
  897. rtsx_clear_xd_error(chip);
  898. reg = 0;
  899. rtsx_read_register(chip, XD_CTL, &reg);
  900. if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
  901. wait_timeout(100);
  902. if (detect_card_cd(chip,
  903. XD_CARD) != STATUS_SUCCESS) {
  904. xd_set_err_code(chip, XD_NO_CARD);
  905. TRACE_RET(chip, STATUS_FAIL);
  906. }
  907. if (((reg & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
  908. (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
  909. || ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
  910. (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
  911. rtsx_write_register(chip,
  912. XD_PAGE_STATUS, 0xFF,
  913. XD_BPG);
  914. rtsx_write_register(chip,
  915. XD_BLOCK_STATUS, 0xFF,
  916. XD_GBLK);
  917. XD_SET_BAD_OLDBLK(xd_card);
  918. dev_dbg(rtsx_dev(chip), "old block 0x%x ecc error\n",
  919. old_blk);
  920. }
  921. } else {
  922. xd_set_err_code(chip, XD_TO_ERROR);
  923. TRACE_RET(chip, STATUS_FAIL);
  924. }
  925. }
  926. if (XD_CHK_BAD_OLDBLK(xd_card))
  927. rtsx_clear_xd_error(chip);
  928. rtsx_init_cmd(chip);
  929. xd_assign_phy_addr(chip, new_page, XD_RW_ADDR);
  930. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
  931. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
  932. XD_TRANSFER_START | XD_WRITE_PAGES);
  933. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  934. XD_TRANSFER_END, XD_TRANSFER_END);
  935. retval = rtsx_send_cmd(chip, XD_CARD, 300);
  936. if (retval < 0) {
  937. rtsx_clear_xd_error(chip);
  938. reg = 0;
  939. rtsx_read_register(chip, XD_DAT, &reg);
  940. if (reg & PROGRAM_ERROR) {
  941. xd_mark_bad_block(chip, new_blk);
  942. xd_set_err_code(chip, XD_PRG_ERROR);
  943. XD_SET_BAD_NEWBLK(xd_card);
  944. } else {
  945. xd_set_err_code(chip, XD_TO_ERROR);
  946. }
  947. TRACE_RET(chip, STATUS_FAIL);
  948. }
  949. old_page++;
  950. new_page++;
  951. }
  952. return STATUS_SUCCESS;
  953. }
  954. static int xd_reset_cmd(struct rtsx_chip *chip)
  955. {
  956. int retval;
  957. u8 *ptr;
  958. rtsx_init_cmd(chip);
  959. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
  960. 0xFF, XD_TRANSFER_START | XD_RESET);
  961. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  962. XD_TRANSFER_END, XD_TRANSFER_END);
  963. rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
  964. rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
  965. retval = rtsx_send_cmd(chip, XD_CARD, 100);
  966. if (retval < 0)
  967. TRACE_RET(chip, STATUS_FAIL);
  968. ptr = rtsx_get_cmd_data(chip) + 1;
  969. if (((ptr[0] & READY_FLAG) == READY_STATE) && (ptr[1] & XD_RDY))
  970. return STATUS_SUCCESS;
  971. TRACE_RET(chip, STATUS_FAIL);
  972. }
  973. static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
  974. {
  975. struct xd_info *xd_card = &(chip->xd_card);
  976. u32 page_addr;
  977. u8 reg = 0, *ptr;
  978. int i, retval;
  979. if (phy_blk == BLK_NOT_FOUND)
  980. TRACE_RET(chip, STATUS_FAIL);
  981. page_addr = phy_blk << xd_card->block_shift;
  982. for (i = 0; i < 3; i++) {
  983. rtsx_init_cmd(chip);
  984. xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
  985. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
  986. XD_TRANSFER_START | XD_ERASE);
  987. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  988. XD_TRANSFER_END, XD_TRANSFER_END);
  989. rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
  990. retval = rtsx_send_cmd(chip, XD_CARD, 250);
  991. if (retval < 0) {
  992. rtsx_clear_xd_error(chip);
  993. rtsx_read_register(chip, XD_DAT, &reg);
  994. if (reg & PROGRAM_ERROR) {
  995. xd_mark_bad_block(chip, phy_blk);
  996. xd_set_err_code(chip, XD_PRG_ERROR);
  997. TRACE_RET(chip, STATUS_FAIL);
  998. } else {
  999. xd_set_err_code(chip, XD_ERASE_FAIL);
  1000. }
  1001. retval = xd_reset_cmd(chip);
  1002. if (retval != STATUS_SUCCESS)
  1003. TRACE_RET(chip, STATUS_FAIL);
  1004. continue;
  1005. }
  1006. ptr = rtsx_get_cmd_data(chip) + 1;
  1007. if (*ptr & PROGRAM_ERROR) {
  1008. xd_mark_bad_block(chip, phy_blk);
  1009. xd_set_err_code(chip, XD_PRG_ERROR);
  1010. TRACE_RET(chip, STATUS_FAIL);
  1011. }
  1012. return STATUS_SUCCESS;
  1013. }
  1014. xd_mark_bad_block(chip, phy_blk);
  1015. xd_set_err_code(chip, XD_ERASE_FAIL);
  1016. TRACE_RET(chip, STATUS_FAIL);
  1017. }
  1018. static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
  1019. {
  1020. struct xd_info *xd_card = &(chip->xd_card);
  1021. struct zone_entry *zone;
  1022. int retval;
  1023. u32 start, end, i;
  1024. u16 max_logoff, cur_fst_page_logoff;
  1025. u16 cur_lst_page_logoff, ent_lst_page_logoff;
  1026. u8 redunt[11];
  1027. dev_dbg(rtsx_dev(chip), "xd_build_l2p_tbl: %d\n", zone_no);
  1028. if (xd_card->zone == NULL) {
  1029. retval = xd_init_l2p_tbl(chip);
  1030. if (retval != STATUS_SUCCESS)
  1031. return retval;
  1032. }
  1033. if (xd_card->zone[zone_no].build_flag) {
  1034. dev_dbg(rtsx_dev(chip), "l2p table of zone %d has been built\n",
  1035. zone_no);
  1036. return STATUS_SUCCESS;
  1037. }
  1038. zone = &(xd_card->zone[zone_no]);
  1039. if (zone->l2p_table == NULL) {
  1040. zone->l2p_table = vmalloc(2000);
  1041. if (zone->l2p_table == NULL)
  1042. TRACE_GOTO(chip, Build_Fail);
  1043. }
  1044. memset((u8 *)(zone->l2p_table), 0xff, 2000);
  1045. if (zone->free_table == NULL) {
  1046. zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
  1047. if (zone->free_table == NULL)
  1048. TRACE_GOTO(chip, Build_Fail);
  1049. }
  1050. memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
  1051. if (zone_no == 0) {
  1052. if (xd_card->cis_block == 0xFFFF)
  1053. start = 0;
  1054. else
  1055. start = xd_card->cis_block + 1;
  1056. if (XD_CHK_4MB(xd_card)) {
  1057. end = 0x200;
  1058. max_logoff = 499;
  1059. } else {
  1060. end = 0x400;
  1061. max_logoff = 999;
  1062. }
  1063. } else {
  1064. start = (u32)(zone_no) << 10;
  1065. end = (u32)(zone_no + 1) << 10;
  1066. max_logoff = 999;
  1067. }
  1068. dev_dbg(rtsx_dev(chip), "start block 0x%x, end block 0x%x\n",
  1069. start, end);
  1070. zone->set_index = zone->get_index = 0;
  1071. zone->unused_blk_cnt = 0;
  1072. for (i = start; i < end; i++) {
  1073. u32 page_addr = i << xd_card->block_shift;
  1074. u32 phy_block;
  1075. retval = xd_read_redundant(chip, page_addr, redunt, 11);
  1076. if (retval != STATUS_SUCCESS)
  1077. continue;
  1078. if (redunt[BLOCK_STATUS] != 0xFF) {
  1079. dev_dbg(rtsx_dev(chip), "bad block\n");
  1080. continue;
  1081. }
  1082. if (xd_check_data_blank(redunt)) {
  1083. dev_dbg(rtsx_dev(chip), "blank block\n");
  1084. xd_set_unused_block(chip, i);
  1085. continue;
  1086. }
  1087. cur_fst_page_logoff = xd_load_log_block_addr(redunt);
  1088. if ((cur_fst_page_logoff == 0xFFFF) ||
  1089. (cur_fst_page_logoff > max_logoff)) {
  1090. retval = xd_erase_block(chip, i);
  1091. if (retval == STATUS_SUCCESS)
  1092. xd_set_unused_block(chip, i);
  1093. continue;
  1094. }
  1095. if ((zone_no == 0) && (cur_fst_page_logoff == 0) &&
  1096. (redunt[PAGE_STATUS] != XD_GPG))
  1097. XD_SET_MBR_FAIL(xd_card);
  1098. if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
  1099. zone->l2p_table[cur_fst_page_logoff] = (u16)(i & 0x3FF);
  1100. continue;
  1101. }
  1102. phy_block = zone->l2p_table[cur_fst_page_logoff] +
  1103. ((u32)((zone_no) << 10));
  1104. page_addr = ((i + 1) << xd_card->block_shift) - 1;
  1105. retval = xd_read_redundant(chip, page_addr, redunt, 11);
  1106. if (retval != STATUS_SUCCESS)
  1107. continue;
  1108. cur_lst_page_logoff = xd_load_log_block_addr(redunt);
  1109. if (cur_lst_page_logoff == cur_fst_page_logoff) {
  1110. int m;
  1111. page_addr = ((phy_block + 1) <<
  1112. xd_card->block_shift) - 1;
  1113. for (m = 0; m < 3; m++) {
  1114. retval = xd_read_redundant(chip, page_addr,
  1115. redunt, 11);
  1116. if (retval == STATUS_SUCCESS)
  1117. break;
  1118. }
  1119. if (m == 3) {
  1120. zone->l2p_table[cur_fst_page_logoff] =
  1121. (u16)(i & 0x3FF);
  1122. retval = xd_erase_block(chip, phy_block);
  1123. if (retval == STATUS_SUCCESS)
  1124. xd_set_unused_block(chip, phy_block);
  1125. continue;
  1126. }
  1127. ent_lst_page_logoff = xd_load_log_block_addr(redunt);
  1128. if (ent_lst_page_logoff != cur_fst_page_logoff) {
  1129. zone->l2p_table[cur_fst_page_logoff] =
  1130. (u16)(i & 0x3FF);
  1131. retval = xd_erase_block(chip, phy_block);
  1132. if (retval == STATUS_SUCCESS)
  1133. xd_set_unused_block(chip, phy_block);
  1134. continue;
  1135. } else {
  1136. retval = xd_erase_block(chip, i);
  1137. if (retval == STATUS_SUCCESS)
  1138. xd_set_unused_block(chip, i);
  1139. }
  1140. } else {
  1141. retval = xd_erase_block(chip, i);
  1142. if (retval == STATUS_SUCCESS)
  1143. xd_set_unused_block(chip, i);
  1144. }
  1145. }
  1146. if (XD_CHK_4MB(xd_card))
  1147. end = 500;
  1148. else
  1149. end = 1000;
  1150. i = 0;
  1151. for (start = 0; start < end; start++) {
  1152. if (zone->l2p_table[start] == 0xFFFF)
  1153. i++;
  1154. }
  1155. dev_dbg(rtsx_dev(chip), "Block count %d, invalid L2P entry %d\n",
  1156. end, i);
  1157. dev_dbg(rtsx_dev(chip), "Total unused block: %d\n",
  1158. zone->unused_blk_cnt);
  1159. if ((zone->unused_blk_cnt - i) < 1)
  1160. chip->card_wp |= XD_CARD;
  1161. zone->build_flag = 1;
  1162. return STATUS_SUCCESS;
  1163. Build_Fail:
  1164. if (zone->l2p_table) {
  1165. vfree(zone->l2p_table);
  1166. zone->l2p_table = NULL;
  1167. }
  1168. if (zone->free_table) {
  1169. vfree(zone->free_table);
  1170. zone->free_table = NULL;
  1171. }
  1172. return STATUS_FAIL;
  1173. }
  1174. static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
  1175. {
  1176. int retval;
  1177. rtsx_init_cmd(chip);
  1178. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
  1179. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
  1180. XD_TRANSFER_START | XD_SET_CMD);
  1181. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  1182. XD_TRANSFER_END, XD_TRANSFER_END);
  1183. retval = rtsx_send_cmd(chip, XD_CARD, 200);
  1184. if (retval < 0)
  1185. TRACE_RET(chip, STATUS_FAIL);
  1186. return STATUS_SUCCESS;
  1187. }
  1188. static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
  1189. u32 log_blk, u8 start_page, u8 end_page,
  1190. u8 *buf, unsigned int *index,
  1191. unsigned int *offset)
  1192. {
  1193. struct xd_info *xd_card = &(chip->xd_card);
  1194. u32 page_addr, new_blk;
  1195. u16 log_off;
  1196. u8 reg_val, page_cnt;
  1197. int zone_no, retval, i;
  1198. if (start_page > end_page)
  1199. TRACE_RET(chip, STATUS_FAIL);
  1200. page_cnt = end_page - start_page;
  1201. zone_no = (int)(log_blk / 1000);
  1202. log_off = (u16)(log_blk % 1000);
  1203. if ((phy_blk & 0x3FF) == 0x3FF) {
  1204. for (i = 0; i < 256; i++) {
  1205. page_addr = ((u32)i) << xd_card->block_shift;
  1206. retval = xd_read_redundant(chip, page_addr, NULL, 0);
  1207. if (retval == STATUS_SUCCESS)
  1208. break;
  1209. if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
  1210. xd_set_err_code(chip, XD_NO_CARD);
  1211. TRACE_RET(chip, STATUS_FAIL);
  1212. }
  1213. }
  1214. }
  1215. page_addr = (phy_blk << xd_card->block_shift) + start_page;
  1216. rtsx_init_cmd(chip);
  1217. xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
  1218. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_PPB_TO_SIE, XD_PPB_TO_SIE);
  1219. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
  1220. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
  1221. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
  1222. XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
  1223. trans_dma_enable(chip->srb->sc_data_direction, chip,
  1224. page_cnt * 512, DMA_512);
  1225. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
  1226. XD_TRANSFER_START | XD_READ_PAGES);
  1227. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  1228. XD_TRANSFER_END | XD_PPB_EMPTY, XD_TRANSFER_END | XD_PPB_EMPTY);
  1229. rtsx_send_cmd_no_wait(chip);
  1230. retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
  1231. scsi_sg_count(chip->srb),
  1232. index, offset, DMA_FROM_DEVICE,
  1233. chip->xd_timeout);
  1234. if (retval < 0) {
  1235. rtsx_clear_xd_error(chip);
  1236. if (retval == -ETIMEDOUT) {
  1237. xd_set_err_code(chip, XD_TO_ERROR);
  1238. TRACE_RET(chip, STATUS_FAIL);
  1239. } else {
  1240. TRACE_GOTO(chip, Fail);
  1241. }
  1242. }
  1243. return STATUS_SUCCESS;
  1244. Fail:
  1245. RTSX_READ_REG(chip, XD_PAGE_STATUS, &reg_val);
  1246. if (reg_val != XD_GPG)
  1247. xd_set_err_code(chip, XD_PRG_ERROR);
  1248. RTSX_READ_REG(chip, XD_CTL, &reg_val);
  1249. if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
  1250. == (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
  1251. || ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))
  1252. == (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
  1253. wait_timeout(100);
  1254. if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
  1255. xd_set_err_code(chip, XD_NO_CARD);
  1256. TRACE_RET(chip, STATUS_FAIL);
  1257. }
  1258. xd_set_err_code(chip, XD_ECC_ERROR);
  1259. new_blk = xd_get_unused_block(chip, zone_no);
  1260. if (new_blk == NO_NEW_BLK) {
  1261. XD_CLR_BAD_OLDBLK(xd_card);
  1262. TRACE_RET(chip, STATUS_FAIL);
  1263. }
  1264. retval = xd_copy_page(chip, phy_blk, new_blk, 0,
  1265. xd_card->page_off + 1);
  1266. if (retval != STATUS_SUCCESS) {
  1267. if (!XD_CHK_BAD_NEWBLK(xd_card)) {
  1268. retval = xd_erase_block(chip, new_blk);
  1269. if (retval == STATUS_SUCCESS)
  1270. xd_set_unused_block(chip, new_blk);
  1271. } else {
  1272. XD_CLR_BAD_NEWBLK(xd_card);
  1273. }
  1274. XD_CLR_BAD_OLDBLK(xd_card);
  1275. TRACE_RET(chip, STATUS_FAIL);
  1276. }
  1277. xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
  1278. xd_erase_block(chip, phy_blk);
  1279. xd_mark_bad_block(chip, phy_blk);
  1280. XD_CLR_BAD_OLDBLK(xd_card);
  1281. }
  1282. TRACE_RET(chip, STATUS_FAIL);
  1283. }
  1284. static int xd_finish_write(struct rtsx_chip *chip,
  1285. u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
  1286. {
  1287. struct xd_info *xd_card = &(chip->xd_card);
  1288. int retval, zone_no;
  1289. u16 log_off;
  1290. dev_dbg(rtsx_dev(chip), "xd_finish_write, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x\n",
  1291. old_blk, new_blk, log_blk);
  1292. if (page_off > xd_card->page_off)
  1293. TRACE_RET(chip, STATUS_FAIL);
  1294. zone_no = (int)(log_blk / 1000);
  1295. log_off = (u16)(log_blk % 1000);
  1296. if (old_blk == BLK_NOT_FOUND) {
  1297. retval = xd_init_page(chip, new_blk, log_off,
  1298. page_off, xd_card->page_off + 1);
  1299. if (retval != STATUS_SUCCESS) {
  1300. retval = xd_erase_block(chip, new_blk);
  1301. if (retval == STATUS_SUCCESS)
  1302. xd_set_unused_block(chip, new_blk);
  1303. TRACE_RET(chip, STATUS_FAIL);
  1304. }
  1305. } else {
  1306. retval = xd_copy_page(chip, old_blk, new_blk,
  1307. page_off, xd_card->page_off + 1);
  1308. if (retval != STATUS_SUCCESS) {
  1309. if (!XD_CHK_BAD_NEWBLK(xd_card)) {
  1310. retval = xd_erase_block(chip, new_blk);
  1311. if (retval == STATUS_SUCCESS)
  1312. xd_set_unused_block(chip, new_blk);
  1313. }
  1314. XD_CLR_BAD_NEWBLK(xd_card);
  1315. TRACE_RET(chip, STATUS_FAIL);
  1316. }
  1317. retval = xd_erase_block(chip, old_blk);
  1318. if (retval == STATUS_SUCCESS) {
  1319. if (XD_CHK_BAD_OLDBLK(xd_card)) {
  1320. xd_mark_bad_block(chip, old_blk);
  1321. XD_CLR_BAD_OLDBLK(xd_card);
  1322. } else {
  1323. xd_set_unused_block(chip, old_blk);
  1324. }
  1325. } else {
  1326. xd_set_err_code(chip, XD_NO_ERROR);
  1327. XD_CLR_BAD_OLDBLK(xd_card);
  1328. }
  1329. }
  1330. xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
  1331. return STATUS_SUCCESS;
  1332. }
  1333. static int xd_prepare_write(struct rtsx_chip *chip,
  1334. u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
  1335. {
  1336. int retval;
  1337. dev_dbg(rtsx_dev(chip), "%s, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x, page_off = %d\n",
  1338. __func__, old_blk, new_blk, log_blk, (int)page_off);
  1339. if (page_off) {
  1340. retval = xd_copy_page(chip, old_blk, new_blk, 0, page_off);
  1341. if (retval != STATUS_SUCCESS)
  1342. TRACE_RET(chip, STATUS_FAIL);
  1343. }
  1344. return STATUS_SUCCESS;
  1345. }
  1346. static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
  1347. u32 new_blk, u32 log_blk, u8 start_page,
  1348. u8 end_page, u8 *buf, unsigned int *index,
  1349. unsigned int *offset)
  1350. {
  1351. struct xd_info *xd_card = &(chip->xd_card);
  1352. u32 page_addr;
  1353. int zone_no, retval;
  1354. u16 log_off;
  1355. u8 page_cnt, reg_val;
  1356. dev_dbg(rtsx_dev(chip), "%s, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x\n",
  1357. __func__, old_blk, new_blk, log_blk);
  1358. if (start_page > end_page)
  1359. TRACE_RET(chip, STATUS_FAIL);
  1360. page_cnt = end_page - start_page;
  1361. zone_no = (int)(log_blk / 1000);
  1362. log_off = (u16)(log_blk % 1000);
  1363. page_addr = (new_blk << xd_card->block_shift) + start_page;
  1364. retval = xd_send_cmd(chip, READ1_1);
  1365. if (retval != STATUS_SUCCESS)
  1366. TRACE_RET(chip, STATUS_FAIL);
  1367. rtsx_init_cmd(chip);
  1368. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
  1369. 0xFF, (u8)(log_off >> 8));
  1370. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off);
  1371. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
  1372. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
  1373. xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
  1374. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
  1375. XD_BA_TRANSFORM);
  1376. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
  1377. rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
  1378. trans_dma_enable(chip->srb->sc_data_direction, chip,
  1379. page_cnt * 512, DMA_512);
  1380. rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
  1381. 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
  1382. rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
  1383. XD_TRANSFER_END, XD_TRANSFER_END);
  1384. rtsx_send_cmd_no_wait(chip);
  1385. retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
  1386. scsi_sg_count(chip->srb),
  1387. index, offset, DMA_TO_DEVICE, chip->xd_timeout);
  1388. if (retval < 0) {
  1389. rtsx_clear_xd_error(chip);
  1390. if (retval == -ETIMEDOUT) {
  1391. xd_set_err_code(chip, XD_TO_ERROR);
  1392. TRACE_RET(chip, STATUS_FAIL);
  1393. } else {
  1394. TRACE_GOTO(chip, Fail);
  1395. }
  1396. }
  1397. if (end_page == (xd_card->page_off + 1)) {
  1398. xd_card->delay_write.delay_write_flag = 0;
  1399. if (old_blk != BLK_NOT_FOUND) {
  1400. retval = xd_erase_block(chip, old_blk);
  1401. if (retval == STATUS_SUCCESS) {
  1402. if (XD_CHK_BAD_OLDBLK(xd_card)) {
  1403. xd_mark_bad_block(chip, old_blk);
  1404. XD_CLR_BAD_OLDBLK(xd_card);
  1405. } else {
  1406. xd_set_unused_block(chip, old_blk);
  1407. }
  1408. } else {
  1409. xd_set_err_code(chip, XD_NO_ERROR);
  1410. XD_CLR_BAD_OLDBLK(xd_card);
  1411. }
  1412. }
  1413. xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
  1414. }
  1415. return STATUS_SUCCESS;
  1416. Fail:
  1417. RTSX_READ_REG(chip, XD_DAT, &reg_val);
  1418. if (reg_val & PROGRAM_ERROR) {
  1419. xd_set_err_code(chip, XD_PRG_ERROR);
  1420. xd_mark_bad_block(chip, new_blk);
  1421. }
  1422. TRACE_RET(chip, STATUS_FAIL);
  1423. }
  1424. #ifdef XD_DELAY_WRITE
  1425. int xd_delay_write(struct rtsx_chip *chip)
  1426. {
  1427. struct xd_info *xd_card = &(chip->xd_card);
  1428. struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
  1429. int retval;
  1430. if (delay_write->delay_write_flag) {
  1431. dev_dbg(rtsx_dev(chip), "xd_delay_write\n");
  1432. retval = xd_switch_clock(chip);
  1433. if (retval != STATUS_SUCCESS)
  1434. TRACE_RET(chip, STATUS_FAIL);
  1435. delay_write->delay_write_flag = 0;
  1436. retval = xd_finish_write(chip,
  1437. delay_write->old_phyblock,
  1438. delay_write->new_phyblock,
  1439. delay_write->logblock, delay_write->pageoff);
  1440. if (retval != STATUS_SUCCESS)
  1441. TRACE_RET(chip, STATUS_FAIL);
  1442. }
  1443. return STATUS_SUCCESS;
  1444. }
  1445. #endif
  1446. int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
  1447. u32 start_sector, u16 sector_cnt)
  1448. {
  1449. struct xd_info *xd_card = &(chip->xd_card);
  1450. unsigned int lun = SCSI_LUN(srb);
  1451. #ifdef XD_DELAY_WRITE
  1452. struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
  1453. #endif
  1454. int retval, zone_no;
  1455. unsigned int index = 0, offset = 0;
  1456. u32 log_blk, old_blk = 0, new_blk = 0;
  1457. u16 log_off, total_sec_cnt = sector_cnt;
  1458. u8 start_page, end_page = 0, page_cnt;
  1459. u8 *ptr;
  1460. xd_set_err_code(chip, XD_NO_ERROR);
  1461. xd_card->cleanup_counter = 0;
  1462. dev_dbg(rtsx_dev(chip), "xd_rw: scsi_sg_count = %d\n",
  1463. scsi_sg_count(srb));
  1464. ptr = (u8 *)scsi_sglist(srb);
  1465. retval = xd_switch_clock(chip);
  1466. if (retval != STATUS_SUCCESS)
  1467. TRACE_RET(chip, STATUS_FAIL);
  1468. if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
  1469. chip->card_fail |= XD_CARD;
  1470. set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
  1471. TRACE_RET(chip, STATUS_FAIL);
  1472. }
  1473. log_blk = start_sector >> xd_card->block_shift;
  1474. start_page = (u8)start_sector & xd_card->page_off;
  1475. zone_no = (int)(log_blk / 1000);
  1476. log_off = (u16)(log_blk % 1000);
  1477. if (xd_card->zone[zone_no].build_flag == 0) {
  1478. retval = xd_build_l2p_tbl(chip, zone_no);
  1479. if (retval != STATUS_SUCCESS) {
  1480. chip->card_fail |= XD_CARD;
  1481. set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
  1482. TRACE_RET(chip, STATUS_FAIL);
  1483. }
  1484. }
  1485. if (srb->sc_data_direction == DMA_TO_DEVICE) {
  1486. #ifdef XD_DELAY_WRITE
  1487. if (delay_write->delay_write_flag &&
  1488. (delay_write->logblock == log_blk) &&
  1489. (start_page > delay_write->pageoff)) {
  1490. delay_write->delay_write_flag = 0;
  1491. if (delay_write->old_phyblock != BLK_NOT_FOUND) {
  1492. retval = xd_copy_page(chip,
  1493. delay_write->old_phyblock,
  1494. delay_write->new_phyblock,
  1495. delay_write->pageoff, start_page);
  1496. if (retval != STATUS_SUCCESS) {
  1497. set_sense_type(chip, lun,
  1498. SENSE_TYPE_MEDIA_WRITE_ERR);
  1499. TRACE_RET(chip, STATUS_FAIL);
  1500. }
  1501. }
  1502. old_blk = delay_write->old_phyblock;
  1503. new_blk = delay_write->new_phyblock;
  1504. } else if (delay_write->delay_write_flag &&
  1505. (delay_write->logblock == log_blk) &&
  1506. (start_page == delay_write->pageoff)) {
  1507. delay_write->delay_write_flag = 0;
  1508. old_blk = delay_write->old_phyblock;
  1509. new_blk = delay_write->new_phyblock;
  1510. } else {
  1511. retval = xd_delay_write(chip);
  1512. if (retval != STATUS_SUCCESS) {
  1513. set_sense_type(chip, lun,
  1514. SENSE_TYPE_MEDIA_WRITE_ERR);
  1515. TRACE_RET(chip, STATUS_FAIL);
  1516. }
  1517. #endif
  1518. old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
  1519. new_blk = xd_get_unused_block(chip, zone_no);
  1520. if ((old_blk == BLK_NOT_FOUND) ||
  1521. (new_blk == BLK_NOT_FOUND)) {
  1522. set_sense_type(chip, lun,
  1523. SENSE_TYPE_MEDIA_WRITE_ERR);
  1524. TRACE_RET(chip, STATUS_FAIL);
  1525. }
  1526. retval = xd_prepare_write(chip, old_blk, new_blk,
  1527. log_blk, start_page);
  1528. if (retval != STATUS_SUCCESS) {
  1529. if (detect_card_cd(chip, XD_CARD) !=
  1530. STATUS_SUCCESS) {
  1531. set_sense_type(chip, lun,
  1532. SENSE_TYPE_MEDIA_NOT_PRESENT);
  1533. TRACE_RET(chip, STATUS_FAIL);
  1534. }
  1535. set_sense_type(chip, lun,
  1536. SENSE_TYPE_MEDIA_WRITE_ERR);
  1537. TRACE_RET(chip, STATUS_FAIL);
  1538. }
  1539. #ifdef XD_DELAY_WRITE
  1540. }
  1541. #endif
  1542. } else {
  1543. #ifdef XD_DELAY_WRITE
  1544. retval = xd_delay_write(chip);
  1545. if (retval != STATUS_SUCCESS) {
  1546. if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
  1547. set_sense_type(chip, lun,
  1548. SENSE_TYPE_MEDIA_NOT_PRESENT);
  1549. TRACE_RET(chip, STATUS_FAIL);
  1550. }
  1551. set_sense_type(chip, lun,
  1552. SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
  1553. TRACE_RET(chip, STATUS_FAIL);
  1554. }
  1555. #endif
  1556. old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
  1557. if (old_blk == BLK_NOT_FOUND) {
  1558. set_sense_type(chip, lun,
  1559. SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
  1560. TRACE_RET(chip, STATUS_FAIL);
  1561. }
  1562. }
  1563. dev_dbg(rtsx_dev(chip), "old_blk = 0x%x\n", old_blk);
  1564. while (total_sec_cnt) {
  1565. if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
  1566. chip->card_fail |= XD_CARD;
  1567. set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
  1568. TRACE_RET(chip, STATUS_FAIL);
  1569. }
  1570. if ((start_page + total_sec_cnt) > (xd_card->page_off + 1))
  1571. end_page = xd_card->page_off + 1;
  1572. else
  1573. end_page = start_page + (u8)total_sec_cnt;
  1574. page_cnt = end_page - start_page;
  1575. if (srb->sc_data_direction == DMA_FROM_DEVICE) {
  1576. retval = xd_read_multiple_pages(chip, old_blk, log_blk,
  1577. start_page, end_page, ptr,
  1578. &index, &offset);
  1579. if (retval != STATUS_SUCCESS) {
  1580. set_sense_type(chip, lun,
  1581. SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
  1582. TRACE_RET(chip, STATUS_FAIL);
  1583. }
  1584. } else {
  1585. retval = xd_write_multiple_pages(chip, old_blk,
  1586. new_blk, log_blk,
  1587. start_page, end_page, ptr,
  1588. &index, &offset);
  1589. if (retval != STATUS_SUCCESS) {
  1590. set_sense_type(chip, lun,
  1591. SENSE_TYPE_MEDIA_WRITE_ERR);
  1592. TRACE_RET(chip, STATUS_FAIL);
  1593. }
  1594. }
  1595. total_sec_cnt -= page_cnt;
  1596. if (scsi_sg_count(srb) == 0)
  1597. ptr += page_cnt * 512;
  1598. if (total_sec_cnt == 0)
  1599. break;
  1600. log_blk++;
  1601. zone_no = (int)(log_blk / 1000);
  1602. log_off = (u16)(log_blk % 1000);
  1603. if (xd_card->zone[zone_no].build_flag == 0) {
  1604. retval = xd_build_l2p_tbl(chip, zone_no);
  1605. if (retval != STATUS_SUCCESS) {
  1606. chip->card_fail |= XD_CARD;
  1607. set_sense_type(chip, lun,
  1608. SENSE_TYPE_MEDIA_NOT_PRESENT);
  1609. TRACE_RET(chip, STATUS_FAIL);
  1610. }
  1611. }
  1612. old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
  1613. if (old_blk == BLK_NOT_FOUND) {
  1614. if (srb->sc_data_direction == DMA_FROM_DEVICE)
  1615. set_sense_type(chip, lun,
  1616. SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
  1617. else
  1618. set_sense_type(chip, lun,
  1619. SENSE_TYPE_MEDIA_WRITE_ERR);
  1620. TRACE_RET(chip, STATUS_FAIL);
  1621. }
  1622. if (srb->sc_data_direction == DMA_TO_DEVICE) {
  1623. new_blk = xd_get_unused_block(chip, zone_no);
  1624. if (new_blk == BLK_NOT_FOUND) {
  1625. set_sense_type(chip, lun,
  1626. SENSE_TYPE_MEDIA_WRITE_ERR);
  1627. TRACE_RET(chip, STATUS_FAIL);
  1628. }
  1629. }
  1630. start_page = 0;
  1631. }
  1632. if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
  1633. (end_page != (xd_card->page_off + 1))) {
  1634. #ifdef XD_DELAY_WRITE
  1635. delay_write->delay_write_flag = 1;
  1636. delay_write->old_phyblock = old_blk;
  1637. delay_write->new_phyblock = new_blk;
  1638. delay_write->logblock = log_blk;
  1639. delay_write->pageoff = end_page;
  1640. #else
  1641. if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
  1642. chip->card_fail |= XD_CARD;
  1643. set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
  1644. TRACE_RET(chip, STATUS_FAIL);
  1645. }
  1646. retval = xd_finish_write(chip, old_blk, new_blk,
  1647. log_blk, end_page);
  1648. if (retval != STATUS_SUCCESS) {
  1649. if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
  1650. set_sense_type(chip, lun,
  1651. SENSE_TYPE_MEDIA_NOT_PRESENT);
  1652. TRACE_RET(chip, STATUS_FAIL);
  1653. }
  1654. set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
  1655. TRACE_RET(chip, STATUS_FAIL);
  1656. }
  1657. #endif
  1658. }
  1659. scsi_set_resid(srb, 0);
  1660. return STATUS_SUCCESS;
  1661. }
  1662. void xd_free_l2p_tbl(struct rtsx_chip *chip)
  1663. {
  1664. struct xd_info *xd_card = &(chip->xd_card);
  1665. int i = 0;
  1666. if (xd_card->zone != NULL) {
  1667. for (i = 0; i < xd_card->zone_cnt; i++) {
  1668. if (xd_card->zone[i].l2p_table != NULL) {
  1669. vfree(xd_card->zone[i].l2p_table);
  1670. xd_card->zone[i].l2p_table = NULL;
  1671. }
  1672. if (xd_card->zone[i].free_table != NULL) {
  1673. vfree(xd_card->zone[i].free_table);
  1674. xd_card->zone[i].free_table = NULL;
  1675. }
  1676. }
  1677. vfree(xd_card->zone);
  1678. xd_card->zone = NULL;
  1679. }
  1680. }
  1681. void xd_cleanup_work(struct rtsx_chip *chip)
  1682. {
  1683. #ifdef XD_DELAY_WRITE
  1684. struct xd_info *xd_card = &(chip->xd_card);
  1685. if (xd_card->delay_write.delay_write_flag) {
  1686. dev_dbg(rtsx_dev(chip), "xD: delay write\n");
  1687. xd_delay_write(chip);
  1688. xd_card->cleanup_counter = 0;
  1689. }
  1690. #endif
  1691. }
  1692. int xd_power_off_card3v3(struct rtsx_chip *chip)
  1693. {
  1694. int retval;
  1695. retval = disable_card_clock(chip, XD_CARD);
  1696. if (retval != STATUS_SUCCESS)
  1697. TRACE_RET(chip, STATUS_FAIL);
  1698. RTSX_WRITE_REG(chip, CARD_OE, XD_OUTPUT_EN, 0);
  1699. if (!chip->ft2_fast_mode) {
  1700. retval = card_power_off(chip, XD_CARD);
  1701. if (retval != STATUS_SUCCESS)
  1702. TRACE_RET(chip, STATUS_FAIL);
  1703. wait_timeout(50);
  1704. }
  1705. if (chip->asic_code) {
  1706. retval = xd_pull_ctl_disable(chip);
  1707. if (retval != STATUS_SUCCESS)
  1708. TRACE_RET(chip, STATUS_FAIL);
  1709. } else {
  1710. RTSX_WRITE_REG(chip, FPGA_PULL_CTL, 0xFF, 0xDF);
  1711. }
  1712. return STATUS_SUCCESS;
  1713. }
  1714. int release_xd_card(struct rtsx_chip *chip)
  1715. {
  1716. struct xd_info *xd_card = &(chip->xd_card);
  1717. int retval;
  1718. chip->card_ready &= ~XD_CARD;
  1719. chip->card_fail &= ~XD_CARD;
  1720. chip->card_wp &= ~XD_CARD;
  1721. xd_card->delay_write.delay_write_flag = 0;
  1722. xd_free_l2p_tbl(chip);
  1723. retval = xd_power_off_card3v3(chip);
  1724. if (retval != STATUS_SUCCESS)
  1725. TRACE_RET(chip, STATUS_FAIL);
  1726. return STATUS_SUCCESS;
  1727. }