be_cmds.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. /**
  2. * Copyright (C) 2005 - 2014 Emulex
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@emulex.com
  12. *
  13. * Emulex
  14. * 3333 Susan Street
  15. * Costa Mesa, CA 92626
  16. */
  17. #include <scsi/iscsi_proto.h>
  18. #include "be_main.h"
  19. #include "be.h"
  20. #include "be_mgmt.h"
  21. int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
  22. {
  23. u32 sreset;
  24. u8 *pci_reset_offset = 0;
  25. u8 *pci_online0_offset = 0;
  26. u8 *pci_online1_offset = 0;
  27. u32 pconline0 = 0;
  28. u32 pconline1 = 0;
  29. u32 i;
  30. pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
  31. pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
  32. pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
  33. sreset = readl((void *)pci_reset_offset);
  34. sreset |= BE2_SET_RESET;
  35. writel(sreset, (void *)pci_reset_offset);
  36. i = 0;
  37. while (sreset & BE2_SET_RESET) {
  38. if (i > 64)
  39. break;
  40. msleep(100);
  41. sreset = readl((void *)pci_reset_offset);
  42. i++;
  43. }
  44. if (sreset & BE2_SET_RESET) {
  45. printk(KERN_ERR DRV_NAME
  46. " Soft Reset did not deassert\n");
  47. return -EIO;
  48. }
  49. pconline1 = BE2_MPU_IRAM_ONLINE;
  50. writel(pconline0, (void *)pci_online0_offset);
  51. writel(pconline1, (void *)pci_online1_offset);
  52. sreset |= BE2_SET_RESET;
  53. writel(sreset, (void *)pci_reset_offset);
  54. i = 0;
  55. while (sreset & BE2_SET_RESET) {
  56. if (i > 64)
  57. break;
  58. msleep(1);
  59. sreset = readl((void *)pci_reset_offset);
  60. i++;
  61. }
  62. if (sreset & BE2_SET_RESET) {
  63. printk(KERN_ERR DRV_NAME
  64. " MPU Online Soft Reset did not deassert\n");
  65. return -EIO;
  66. }
  67. return 0;
  68. }
  69. int be_chk_reset_complete(struct beiscsi_hba *phba)
  70. {
  71. unsigned int num_loop;
  72. u8 *mpu_sem = 0;
  73. u32 status;
  74. num_loop = 1000;
  75. mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
  76. msleep(5000);
  77. while (num_loop) {
  78. status = readl((void *)mpu_sem);
  79. if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
  80. break;
  81. msleep(60);
  82. num_loop--;
  83. }
  84. if ((status & 0x80000000) || (!num_loop)) {
  85. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  86. "BC_%d : Failed in be_chk_reset_complete"
  87. "status = 0x%x\n", status);
  88. return -EIO;
  89. }
  90. return 0;
  91. }
  92. void be_mcc_notify(struct beiscsi_hba *phba)
  93. {
  94. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  95. u32 val = 0;
  96. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  97. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  98. iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
  99. }
  100. unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
  101. {
  102. unsigned int tag = 0;
  103. if (phba->ctrl.mcc_tag_available) {
  104. tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
  105. phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
  106. phba->ctrl.mcc_numtag[tag] = 0;
  107. }
  108. if (tag) {
  109. phba->ctrl.mcc_tag_available--;
  110. if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
  111. phba->ctrl.mcc_alloc_index = 0;
  112. else
  113. phba->ctrl.mcc_alloc_index++;
  114. }
  115. return tag;
  116. }
  117. /*
  118. * beiscsi_mccq_compl()- Wait for completion of MBX
  119. * @phba: Driver private structure
  120. * @tag: Tag for the MBX Command
  121. * @wrb: the WRB used for the MBX Command
  122. * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
  123. *
  124. * Waits for MBX completion with the passed TAG.
  125. *
  126. * return
  127. * Success: 0
  128. * Failure: Non-Zero
  129. **/
  130. int beiscsi_mccq_compl(struct beiscsi_hba *phba,
  131. uint32_t tag, struct be_mcc_wrb **wrb,
  132. struct be_dma_mem *mbx_cmd_mem)
  133. {
  134. int rc = 0;
  135. uint32_t mcc_tag_response;
  136. uint16_t status = 0, addl_status = 0, wrb_num = 0;
  137. struct be_mcc_wrb *temp_wrb;
  138. struct be_cmd_req_hdr *mbx_hdr;
  139. struct be_cmd_resp_hdr *mbx_resp_hdr;
  140. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  141. if (beiscsi_error(phba)) {
  142. free_mcc_tag(&phba->ctrl, tag);
  143. return -EPERM;
  144. }
  145. /* Set MBX Tag state to Active */
  146. spin_lock(&phba->ctrl.mbox_lock);
  147. phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
  148. spin_unlock(&phba->ctrl.mbox_lock);
  149. /* wait for the mccq completion */
  150. rc = wait_event_interruptible_timeout(
  151. phba->ctrl.mcc_wait[tag],
  152. phba->ctrl.mcc_numtag[tag],
  153. msecs_to_jiffies(
  154. BEISCSI_HOST_MBX_TIMEOUT));
  155. if (rc <= 0) {
  156. struct be_dma_mem *tag_mem;
  157. /* Set MBX Tag state to timeout */
  158. spin_lock(&phba->ctrl.mbox_lock);
  159. phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
  160. spin_unlock(&phba->ctrl.mbox_lock);
  161. /* Store resource addr to be freed later */
  162. tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
  163. if (mbx_cmd_mem) {
  164. tag_mem->size = mbx_cmd_mem->size;
  165. tag_mem->va = mbx_cmd_mem->va;
  166. tag_mem->dma = mbx_cmd_mem->dma;
  167. } else
  168. tag_mem->size = 0;
  169. beiscsi_log(phba, KERN_ERR,
  170. BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
  171. BEISCSI_LOG_CONFIG,
  172. "BC_%d : MBX Cmd Completion timed out\n");
  173. return -EBUSY;
  174. } else {
  175. rc = 0;
  176. /* Set MBX Tag state to completed */
  177. spin_lock(&phba->ctrl.mbox_lock);
  178. phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
  179. spin_unlock(&phba->ctrl.mbox_lock);
  180. }
  181. mcc_tag_response = phba->ctrl.mcc_numtag[tag];
  182. status = (mcc_tag_response & CQE_STATUS_MASK);
  183. addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
  184. CQE_STATUS_ADDL_SHIFT);
  185. if (mbx_cmd_mem) {
  186. mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
  187. } else {
  188. wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
  189. CQE_STATUS_WRB_SHIFT;
  190. temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
  191. mbx_hdr = embedded_payload(temp_wrb);
  192. if (wrb)
  193. *wrb = temp_wrb;
  194. }
  195. if (status || addl_status) {
  196. beiscsi_log(phba, KERN_WARNING,
  197. BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
  198. BEISCSI_LOG_CONFIG,
  199. "BC_%d : MBX Cmd Failed for "
  200. "Subsys : %d Opcode : %d with "
  201. "Status : %d and Extd_Status : %d\n",
  202. mbx_hdr->subsystem,
  203. mbx_hdr->opcode,
  204. status, addl_status);
  205. if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
  206. mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
  207. beiscsi_log(phba, KERN_WARNING,
  208. BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
  209. BEISCSI_LOG_CONFIG,
  210. "BC_%d : Insufficent Buffer Error "
  211. "Resp_Len : %d Actual_Resp_Len : %d\n",
  212. mbx_resp_hdr->response_length,
  213. mbx_resp_hdr->actual_resp_len);
  214. rc = -EAGAIN;
  215. goto release_mcc_tag;
  216. }
  217. rc = -EIO;
  218. }
  219. release_mcc_tag:
  220. /* Release the MCC entry */
  221. free_mcc_tag(&phba->ctrl, tag);
  222. return rc;
  223. }
  224. void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
  225. {
  226. spin_lock(&ctrl->mbox_lock);
  227. tag = tag & 0x000000FF;
  228. ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
  229. if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
  230. ctrl->mcc_free_index = 0;
  231. else
  232. ctrl->mcc_free_index++;
  233. ctrl->mcc_tag_available++;
  234. spin_unlock(&ctrl->mbox_lock);
  235. }
  236. bool is_link_state_evt(u32 trailer)
  237. {
  238. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  239. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  240. ASYNC_EVENT_CODE_LINK_STATE);
  241. }
  242. static bool is_iscsi_evt(u32 trailer)
  243. {
  244. return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  245. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  246. ASYNC_EVENT_CODE_ISCSI;
  247. }
  248. static int iscsi_evt_type(u32 trailer)
  249. {
  250. return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
  251. ASYNC_TRAILER_EVENT_TYPE_MASK;
  252. }
  253. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  254. {
  255. if (compl->flags != 0) {
  256. compl->flags = le32_to_cpu(compl->flags);
  257. WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  258. return true;
  259. } else
  260. return false;
  261. }
  262. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  263. {
  264. compl->flags = 0;
  265. }
  266. /*
  267. * be_mcc_compl_process()- Check the MBX comapletion status
  268. * @ctrl: Function specific MBX data structure
  269. * @compl: Completion status of MBX Command
  270. *
  271. * Check for the MBX completion status when BMBX method used
  272. *
  273. * return
  274. * Success: Zero
  275. * Failure: Non-Zero
  276. **/
  277. static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
  278. struct be_mcc_compl *compl)
  279. {
  280. u16 compl_status, extd_status;
  281. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  282. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  283. struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
  284. struct be_cmd_resp_hdr *resp_hdr;
  285. be_dws_le_to_cpu(compl, 4);
  286. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  287. CQE_STATUS_COMPL_MASK;
  288. if (compl_status != MCC_STATUS_SUCCESS) {
  289. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  290. CQE_STATUS_EXTD_MASK;
  291. beiscsi_log(phba, KERN_ERR,
  292. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  293. "BC_%d : error in cmd completion: "
  294. "Subsystem : %d Opcode : %d "
  295. "status(compl/extd)=%d/%d\n",
  296. hdr->subsystem, hdr->opcode,
  297. compl_status, extd_status);
  298. if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
  299. resp_hdr = (struct be_cmd_resp_hdr *) hdr;
  300. if (resp_hdr->response_length)
  301. return 0;
  302. }
  303. return -EBUSY;
  304. }
  305. return 0;
  306. }
  307. int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
  308. struct be_mcc_compl *compl)
  309. {
  310. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  311. u16 compl_status, extd_status;
  312. unsigned short tag;
  313. be_dws_le_to_cpu(compl, 4);
  314. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  315. CQE_STATUS_COMPL_MASK;
  316. /* The ctrl.mcc_numtag[tag] is filled with
  317. * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
  318. * [7:0] = compl_status
  319. */
  320. tag = (compl->tag0 & 0x000000FF);
  321. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  322. CQE_STATUS_EXTD_MASK;
  323. ctrl->mcc_numtag[tag] = 0x80000000;
  324. ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
  325. ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
  326. ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
  327. if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
  328. wake_up_interruptible(&ctrl->mcc_wait[tag]);
  329. } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
  330. struct be_dma_mem *tag_mem;
  331. tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
  332. beiscsi_log(phba, KERN_WARNING,
  333. BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
  334. BEISCSI_LOG_CONFIG,
  335. "BC_%d : MBX Completion for timeout Command "
  336. "from FW\n");
  337. /* Check if memory needs to be freed */
  338. if (tag_mem->size)
  339. pci_free_consistent(ctrl->pdev, tag_mem->size,
  340. tag_mem->va, tag_mem->dma);
  341. /* Change tag state */
  342. spin_lock(&phba->ctrl.mbox_lock);
  343. ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
  344. spin_unlock(&phba->ctrl.mbox_lock);
  345. /* Free MCC Tag */
  346. free_mcc_tag(ctrl, tag);
  347. }
  348. return 0;
  349. }
  350. static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
  351. {
  352. struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
  353. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  354. if (be_mcc_compl_is_new(compl)) {
  355. queue_tail_inc(mcc_cq);
  356. return compl;
  357. }
  358. return NULL;
  359. }
  360. /**
  361. * be2iscsi_fail_session(): Closing session with appropriate error
  362. * @cls_session: ptr to session
  363. *
  364. * Depending on adapter state appropriate error flag is passed.
  365. **/
  366. void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
  367. {
  368. struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
  369. struct beiscsi_hba *phba = iscsi_host_priv(shost);
  370. uint32_t iscsi_err_flag;
  371. if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
  372. iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
  373. else
  374. iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
  375. iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
  376. }
  377. void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
  378. struct be_async_event_link_state *evt)
  379. {
  380. if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
  381. ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
  382. (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
  383. phba->state = BE_ADAPTER_LINK_DOWN;
  384. beiscsi_log(phba, KERN_ERR,
  385. BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
  386. "BC_%d : Link Down on Port %d\n",
  387. evt->physical_port);
  388. iscsi_host_for_each_session(phba->shost,
  389. be2iscsi_fail_session);
  390. } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
  391. ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
  392. (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
  393. phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
  394. beiscsi_log(phba, KERN_ERR,
  395. BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
  396. "BC_%d : Link UP on Port %d\n",
  397. evt->physical_port);
  398. }
  399. }
  400. int beiscsi_process_mcc(struct beiscsi_hba *phba)
  401. {
  402. struct be_mcc_compl *compl;
  403. int num = 0, status = 0;
  404. struct be_ctrl_info *ctrl = &phba->ctrl;
  405. spin_lock_bh(&phba->ctrl.mcc_cq_lock);
  406. while ((compl = be_mcc_compl_get(phba))) {
  407. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  408. /* Interpret flags as an async trailer */
  409. if (is_link_state_evt(compl->flags))
  410. /* Interpret compl as a async link evt */
  411. beiscsi_async_link_state_process(phba,
  412. (struct be_async_event_link_state *) compl);
  413. else if (is_iscsi_evt(compl->flags)) {
  414. switch (iscsi_evt_type(compl->flags)) {
  415. case ASYNC_EVENT_NEW_ISCSI_TGT_DISC:
  416. case ASYNC_EVENT_NEW_ISCSI_CONN:
  417. case ASYNC_EVENT_NEW_TCP_CONN:
  418. phba->state |= BE_ADAPTER_CHECK_BOOT;
  419. beiscsi_log(phba, KERN_ERR,
  420. BEISCSI_LOG_CONFIG |
  421. BEISCSI_LOG_MBOX,
  422. "BC_%d : Async iscsi Event,"
  423. " flags handled = 0x%08x\n",
  424. compl->flags);
  425. break;
  426. default:
  427. beiscsi_log(phba, KERN_ERR,
  428. BEISCSI_LOG_CONFIG |
  429. BEISCSI_LOG_MBOX,
  430. "BC_%d : Unsupported Async"
  431. " Event, flags = 0x%08x\n",
  432. compl->flags);
  433. }
  434. } else
  435. beiscsi_log(phba, KERN_ERR,
  436. BEISCSI_LOG_CONFIG |
  437. BEISCSI_LOG_MBOX,
  438. "BC_%d : Unsupported Async Event, flags"
  439. " = 0x%08x\n", compl->flags);
  440. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  441. status = be_mcc_compl_process(ctrl, compl);
  442. atomic_dec(&phba->ctrl.mcc_obj.q.used);
  443. }
  444. be_mcc_compl_use(compl);
  445. num++;
  446. }
  447. if (num)
  448. hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
  449. spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
  450. return status;
  451. }
  452. /*
  453. * be_mcc_wait_compl()- Wait for MBX completion
  454. * @phba: driver private structure
  455. *
  456. * Wait till no more pending mcc requests are present
  457. *
  458. * return
  459. * Success: 0
  460. * Failure: Non-Zero
  461. *
  462. **/
  463. static int be_mcc_wait_compl(struct beiscsi_hba *phba)
  464. {
  465. int i, status;
  466. for (i = 0; i < mcc_timeout; i++) {
  467. if (beiscsi_error(phba))
  468. return -EIO;
  469. status = beiscsi_process_mcc(phba);
  470. if (status)
  471. return status;
  472. if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
  473. break;
  474. udelay(100);
  475. }
  476. if (i == mcc_timeout) {
  477. beiscsi_log(phba, KERN_ERR,
  478. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  479. "BC_%d : FW Timed Out\n");
  480. phba->fw_timeout = true;
  481. beiscsi_ue_detect(phba);
  482. return -EBUSY;
  483. }
  484. return 0;
  485. }
  486. /*
  487. * be_mcc_notify_wait()- Notify and wait for Compl
  488. * @phba: driver private structure
  489. *
  490. * Notify MCC requests and wait for completion
  491. *
  492. * return
  493. * Success: 0
  494. * Failure: Non-Zero
  495. **/
  496. int be_mcc_notify_wait(struct beiscsi_hba *phba)
  497. {
  498. be_mcc_notify(phba);
  499. return be_mcc_wait_compl(phba);
  500. }
  501. /*
  502. * be_mbox_db_ready_wait()- Check ready status
  503. * @ctrl: Function specific MBX data structure
  504. *
  505. * Check for the ready status of FW to send BMBX
  506. * commands to adapter.
  507. *
  508. * return
  509. * Success: 0
  510. * Failure: Non-Zero
  511. **/
  512. static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
  513. {
  514. #define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */
  515. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  516. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  517. unsigned long timeout;
  518. bool read_flag = false;
  519. int ret = 0, i;
  520. u32 ready;
  521. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
  522. if (beiscsi_error(phba))
  523. return -EIO;
  524. timeout = jiffies + (HZ * 110);
  525. do {
  526. for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
  527. ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
  528. if (ready) {
  529. read_flag = true;
  530. break;
  531. }
  532. mdelay(1);
  533. }
  534. if (!read_flag) {
  535. wait_event_timeout(rdybit_check_q,
  536. (read_flag != true),
  537. HZ * 5);
  538. }
  539. } while ((time_before(jiffies, timeout)) && !read_flag);
  540. if (!read_flag) {
  541. beiscsi_log(phba, KERN_ERR,
  542. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  543. "BC_%d : FW Timed Out\n");
  544. phba->fw_timeout = true;
  545. beiscsi_ue_detect(phba);
  546. ret = -EBUSY;
  547. }
  548. return ret;
  549. }
  550. /*
  551. * be_mbox_notify: Notify adapter of new BMBX command
  552. * @ctrl: Function specific MBX data structure
  553. *
  554. * Ring doorbell to inform adapter of a BMBX command
  555. * to process
  556. *
  557. * return
  558. * Success: 0
  559. * Failure: Non-Zero
  560. **/
  561. int be_mbox_notify(struct be_ctrl_info *ctrl)
  562. {
  563. int status;
  564. u32 val = 0;
  565. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  566. struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
  567. struct be_mcc_mailbox *mbox = mbox_mem->va;
  568. struct be_mcc_compl *compl = &mbox->compl;
  569. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  570. status = be_mbox_db_ready_wait(ctrl);
  571. if (status)
  572. return status;
  573. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  574. val |= MPU_MAILBOX_DB_HI_MASK;
  575. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  576. iowrite32(val, db);
  577. status = be_mbox_db_ready_wait(ctrl);
  578. if (status)
  579. return status;
  580. val = 0;
  581. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  582. val &= ~MPU_MAILBOX_DB_HI_MASK;
  583. val |= (u32) (mbox_mem->dma >> 4) << 2;
  584. iowrite32(val, db);
  585. status = be_mbox_db_ready_wait(ctrl);
  586. if (status)
  587. return status;
  588. if (be_mcc_compl_is_new(compl)) {
  589. status = be_mcc_compl_process(ctrl, &mbox->compl);
  590. be_mcc_compl_use(compl);
  591. if (status) {
  592. beiscsi_log(phba, KERN_ERR,
  593. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  594. "BC_%d : After be_mcc_compl_process\n");
  595. return status;
  596. }
  597. } else {
  598. beiscsi_log(phba, KERN_ERR,
  599. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  600. "BC_%d : Invalid Mailbox Completion\n");
  601. return -EBUSY;
  602. }
  603. return 0;
  604. }
  605. /*
  606. * Insert the mailbox address into the doorbell in two steps
  607. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  608. */
  609. static int be_mbox_notify_wait(struct beiscsi_hba *phba)
  610. {
  611. int status;
  612. u32 val = 0;
  613. void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
  614. struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
  615. struct be_mcc_mailbox *mbox = mbox_mem->va;
  616. struct be_mcc_compl *compl = &mbox->compl;
  617. struct be_ctrl_info *ctrl = &phba->ctrl;
  618. status = be_mbox_db_ready_wait(ctrl);
  619. if (status)
  620. return status;
  621. val |= MPU_MAILBOX_DB_HI_MASK;
  622. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  623. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  624. iowrite32(val, db);
  625. /* wait for ready to be set */
  626. status = be_mbox_db_ready_wait(ctrl);
  627. if (status != 0)
  628. return status;
  629. val = 0;
  630. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  631. val |= (u32)(mbox_mem->dma >> 4) << 2;
  632. iowrite32(val, db);
  633. status = be_mbox_db_ready_wait(ctrl);
  634. if (status != 0)
  635. return status;
  636. /* A cq entry has been made now */
  637. if (be_mcc_compl_is_new(compl)) {
  638. status = be_mcc_compl_process(ctrl, &mbox->compl);
  639. be_mcc_compl_use(compl);
  640. if (status)
  641. return status;
  642. } else {
  643. beiscsi_log(phba, KERN_ERR,
  644. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  645. "BC_%d : invalid mailbox completion\n");
  646. return -EBUSY;
  647. }
  648. return 0;
  649. }
  650. void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  651. bool embedded, u8 sge_cnt)
  652. {
  653. if (embedded)
  654. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  655. else
  656. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  657. MCC_WRB_SGE_CNT_SHIFT;
  658. wrb->payload_length = payload_len;
  659. be_dws_cpu_to_le(wrb, 8);
  660. }
  661. void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  662. u8 subsystem, u8 opcode, int cmd_len)
  663. {
  664. req_hdr->opcode = opcode;
  665. req_hdr->subsystem = subsystem;
  666. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  667. req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
  668. }
  669. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  670. struct be_dma_mem *mem)
  671. {
  672. int i, buf_pages;
  673. u64 dma = (u64) mem->dma;
  674. buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  675. for (i = 0; i < buf_pages; i++) {
  676. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  677. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  678. dma += PAGE_SIZE_4K;
  679. }
  680. }
  681. static u32 eq_delay_to_mult(u32 usec_delay)
  682. {
  683. #define MAX_INTR_RATE 651042
  684. const u32 round = 10;
  685. u32 multiplier;
  686. if (usec_delay == 0)
  687. multiplier = 0;
  688. else {
  689. u32 interrupt_rate = 1000000 / usec_delay;
  690. if (interrupt_rate == 0)
  691. multiplier = 1023;
  692. else {
  693. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  694. multiplier /= interrupt_rate;
  695. multiplier = (multiplier + round / 2) / round;
  696. multiplier = min(multiplier, (u32) 1023);
  697. }
  698. }
  699. return multiplier;
  700. }
  701. struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
  702. {
  703. return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  704. }
  705. struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
  706. {
  707. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  708. struct be_mcc_wrb *wrb;
  709. WARN_ON(atomic_read(&mccq->used) >= mccq->len);
  710. wrb = queue_head_node(mccq);
  711. memset(wrb, 0, sizeof(*wrb));
  712. wrb->tag0 = (mccq->head & 0x000000FF) << 16;
  713. queue_head_inc(mccq);
  714. atomic_inc(&mccq->used);
  715. return wrb;
  716. }
  717. int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
  718. struct be_queue_info *eq, int eq_delay)
  719. {
  720. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  721. struct be_cmd_req_eq_create *req = embedded_payload(wrb);
  722. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  723. struct be_dma_mem *q_mem = &eq->dma_mem;
  724. int status;
  725. spin_lock(&ctrl->mbox_lock);
  726. memset(wrb, 0, sizeof(*wrb));
  727. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  728. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  729. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  730. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  731. AMAP_SET_BITS(struct amap_eq_context, func, req->context,
  732. PCI_FUNC(ctrl->pdev->devfn));
  733. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  734. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  735. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  736. __ilog2_u32(eq->len / 256));
  737. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  738. eq_delay_to_mult(eq_delay));
  739. be_dws_cpu_to_le(req->context, sizeof(req->context));
  740. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  741. status = be_mbox_notify(ctrl);
  742. if (!status) {
  743. eq->id = le16_to_cpu(resp->eq_id);
  744. eq->created = true;
  745. }
  746. spin_unlock(&ctrl->mbox_lock);
  747. return status;
  748. }
  749. /**
  750. * be_cmd_fw_initialize()- Initialize FW
  751. * @ctrl: Pointer to function control structure
  752. *
  753. * Send FW initialize pattern for the function.
  754. *
  755. * return
  756. * Success: 0
  757. * Failure: Non-Zero value
  758. **/
  759. int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
  760. {
  761. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  762. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  763. int status;
  764. u8 *endian_check;
  765. spin_lock(&ctrl->mbox_lock);
  766. memset(wrb, 0, sizeof(*wrb));
  767. endian_check = (u8 *) wrb;
  768. *endian_check++ = 0xFF;
  769. *endian_check++ = 0x12;
  770. *endian_check++ = 0x34;
  771. *endian_check++ = 0xFF;
  772. *endian_check++ = 0xFF;
  773. *endian_check++ = 0x56;
  774. *endian_check++ = 0x78;
  775. *endian_check++ = 0xFF;
  776. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  777. status = be_mbox_notify(ctrl);
  778. if (status)
  779. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  780. "BC_%d : be_cmd_fw_initialize Failed\n");
  781. spin_unlock(&ctrl->mbox_lock);
  782. return status;
  783. }
  784. /**
  785. * be_cmd_fw_uninit()- Uinitialize FW
  786. * @ctrl: Pointer to function control structure
  787. *
  788. * Send FW uninitialize pattern for the function
  789. *
  790. * return
  791. * Success: 0
  792. * Failure: Non-Zero value
  793. **/
  794. int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
  795. {
  796. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  797. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  798. int status;
  799. u8 *endian_check;
  800. spin_lock(&ctrl->mbox_lock);
  801. memset(wrb, 0, sizeof(*wrb));
  802. endian_check = (u8 *) wrb;
  803. *endian_check++ = 0xFF;
  804. *endian_check++ = 0xAA;
  805. *endian_check++ = 0xBB;
  806. *endian_check++ = 0xFF;
  807. *endian_check++ = 0xFF;
  808. *endian_check++ = 0xCC;
  809. *endian_check++ = 0xDD;
  810. *endian_check = 0xFF;
  811. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  812. status = be_mbox_notify(ctrl);
  813. if (status)
  814. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  815. "BC_%d : be_cmd_fw_uninit Failed\n");
  816. spin_unlock(&ctrl->mbox_lock);
  817. return status;
  818. }
  819. int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
  820. struct be_queue_info *cq, struct be_queue_info *eq,
  821. bool sol_evts, bool no_delay, int coalesce_wm)
  822. {
  823. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  824. struct be_cmd_req_cq_create *req = embedded_payload(wrb);
  825. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  826. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  827. struct be_dma_mem *q_mem = &cq->dma_mem;
  828. void *ctxt = &req->context;
  829. int status;
  830. spin_lock(&ctrl->mbox_lock);
  831. memset(wrb, 0, sizeof(*wrb));
  832. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  833. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  834. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  835. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  836. if (is_chip_be2_be3r(phba)) {
  837. AMAP_SET_BITS(struct amap_cq_context, coalescwm,
  838. ctxt, coalesce_wm);
  839. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  840. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  841. __ilog2_u32(cq->len / 256));
  842. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  843. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  844. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  845. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  846. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  847. AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
  848. PCI_FUNC(ctrl->pdev->devfn));
  849. } else {
  850. req->hdr.version = MBX_CMD_VER2;
  851. req->page_size = 1;
  852. AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
  853. ctxt, coalesce_wm);
  854. AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
  855. ctxt, no_delay);
  856. AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
  857. __ilog2_u32(cq->len / 256));
  858. AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
  859. AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
  860. AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
  861. AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
  862. }
  863. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  864. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  865. status = be_mbox_notify(ctrl);
  866. if (!status) {
  867. cq->id = le16_to_cpu(resp->cq_id);
  868. cq->created = true;
  869. } else
  870. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  871. "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
  872. status);
  873. spin_unlock(&ctrl->mbox_lock);
  874. return status;
  875. }
  876. static u32 be_encoded_q_len(int q_len)
  877. {
  878. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  879. if (len_encoded == 16)
  880. len_encoded = 0;
  881. return len_encoded;
  882. }
  883. int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
  884. struct be_queue_info *mccq,
  885. struct be_queue_info *cq)
  886. {
  887. struct be_mcc_wrb *wrb;
  888. struct be_cmd_req_mcc_create *req;
  889. struct be_dma_mem *q_mem = &mccq->dma_mem;
  890. struct be_ctrl_info *ctrl;
  891. void *ctxt;
  892. int status;
  893. spin_lock(&phba->ctrl.mbox_lock);
  894. ctrl = &phba->ctrl;
  895. wrb = wrb_from_mbox(&ctrl->mbox_mem);
  896. memset(wrb, 0, sizeof(*wrb));
  897. req = embedded_payload(wrb);
  898. ctxt = &req->context;
  899. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  900. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  901. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  902. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  903. AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
  904. PCI_FUNC(phba->pcidev->devfn));
  905. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  906. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  907. be_encoded_q_len(mccq->len));
  908. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  909. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  910. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  911. status = be_mbox_notify_wait(phba);
  912. if (!status) {
  913. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  914. mccq->id = le16_to_cpu(resp->id);
  915. mccq->created = true;
  916. }
  917. spin_unlock(&phba->ctrl.mbox_lock);
  918. return status;
  919. }
  920. int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
  921. int queue_type)
  922. {
  923. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  924. struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
  925. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  926. u8 subsys = 0, opcode = 0;
  927. int status;
  928. beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
  929. "BC_%d : In beiscsi_cmd_q_destroy "
  930. "queue_type : %d\n", queue_type);
  931. spin_lock(&ctrl->mbox_lock);
  932. memset(wrb, 0, sizeof(*wrb));
  933. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  934. switch (queue_type) {
  935. case QTYPE_EQ:
  936. subsys = CMD_SUBSYSTEM_COMMON;
  937. opcode = OPCODE_COMMON_EQ_DESTROY;
  938. break;
  939. case QTYPE_CQ:
  940. subsys = CMD_SUBSYSTEM_COMMON;
  941. opcode = OPCODE_COMMON_CQ_DESTROY;
  942. break;
  943. case QTYPE_MCCQ:
  944. subsys = CMD_SUBSYSTEM_COMMON;
  945. opcode = OPCODE_COMMON_MCC_DESTROY;
  946. break;
  947. case QTYPE_WRBQ:
  948. subsys = CMD_SUBSYSTEM_ISCSI;
  949. opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
  950. break;
  951. case QTYPE_DPDUQ:
  952. subsys = CMD_SUBSYSTEM_ISCSI;
  953. opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
  954. break;
  955. case QTYPE_SGL:
  956. subsys = CMD_SUBSYSTEM_ISCSI;
  957. opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
  958. break;
  959. default:
  960. spin_unlock(&ctrl->mbox_lock);
  961. BUG();
  962. return -ENXIO;
  963. }
  964. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  965. if (queue_type != QTYPE_SGL)
  966. req->id = cpu_to_le16(q->id);
  967. status = be_mbox_notify(ctrl);
  968. spin_unlock(&ctrl->mbox_lock);
  969. return status;
  970. }
  971. /**
  972. * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
  973. * @ctrl: ptr to ctrl_info
  974. * @cq: Completion Queue
  975. * @dq: Default Queue
  976. * @lenght: ring size
  977. * @entry_size: size of each entry in DEFQ
  978. * @is_header: Header or Data DEFQ
  979. * @ulp_num: Bind to which ULP
  980. *
  981. * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
  982. * on this queue by the FW
  983. *
  984. * return
  985. * Success: 0
  986. * Failure: Non-Zero Value
  987. *
  988. **/
  989. int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
  990. struct be_queue_info *cq,
  991. struct be_queue_info *dq, int length,
  992. int entry_size, uint8_t is_header,
  993. uint8_t ulp_num)
  994. {
  995. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  996. struct be_defq_create_req *req = embedded_payload(wrb);
  997. struct be_dma_mem *q_mem = &dq->dma_mem;
  998. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  999. void *ctxt = &req->context;
  1000. int status;
  1001. spin_lock(&ctrl->mbox_lock);
  1002. memset(wrb, 0, sizeof(*wrb));
  1003. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1004. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1005. OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
  1006. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1007. if (phba->fw_config.dual_ulp_aware) {
  1008. req->ulp_num = ulp_num;
  1009. req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
  1010. req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
  1011. }
  1012. if (is_chip_be2_be3r(phba)) {
  1013. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1014. rx_pdid, ctxt, 0);
  1015. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1016. rx_pdid_valid, ctxt, 1);
  1017. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1018. pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
  1019. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1020. ring_size, ctxt,
  1021. be_encoded_q_len(length /
  1022. sizeof(struct phys_addr)));
  1023. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1024. default_buffer_size, ctxt, entry_size);
  1025. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1026. cq_id_recv, ctxt, cq->id);
  1027. } else {
  1028. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1029. rx_pdid, ctxt, 0);
  1030. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1031. rx_pdid_valid, ctxt, 1);
  1032. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1033. ring_size, ctxt,
  1034. be_encoded_q_len(length /
  1035. sizeof(struct phys_addr)));
  1036. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1037. default_buffer_size, ctxt, entry_size);
  1038. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1039. cq_id_recv, ctxt, cq->id);
  1040. }
  1041. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  1042. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1043. status = be_mbox_notify(ctrl);
  1044. if (!status) {
  1045. struct be_ring *defq_ring;
  1046. struct be_defq_create_resp *resp = embedded_payload(wrb);
  1047. dq->id = le16_to_cpu(resp->id);
  1048. dq->created = true;
  1049. if (is_header)
  1050. defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
  1051. else
  1052. defq_ring = &phba->phwi_ctrlr->
  1053. default_pdu_data[ulp_num];
  1054. defq_ring->id = dq->id;
  1055. if (!phba->fw_config.dual_ulp_aware) {
  1056. defq_ring->ulp_num = BEISCSI_ULP0;
  1057. defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
  1058. } else {
  1059. defq_ring->ulp_num = resp->ulp_num;
  1060. defq_ring->doorbell_offset = resp->doorbell_offset;
  1061. }
  1062. }
  1063. spin_unlock(&ctrl->mbox_lock);
  1064. return status;
  1065. }
  1066. /**
  1067. * be_cmd_wrbq_create()- Create WRBQ
  1068. * @ctrl: ptr to ctrl_info
  1069. * @q_mem: memory details for the queue
  1070. * @wrbq: queue info
  1071. * @pwrb_context: ptr to wrb_context
  1072. * @ulp_num: ULP on which the WRBQ is to be created
  1073. *
  1074. * Create WRBQ on the passed ULP_NUM.
  1075. *
  1076. **/
  1077. int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
  1078. struct be_dma_mem *q_mem,
  1079. struct be_queue_info *wrbq,
  1080. struct hwi_wrb_context *pwrb_context,
  1081. uint8_t ulp_num)
  1082. {
  1083. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1084. struct be_wrbq_create_req *req = embedded_payload(wrb);
  1085. struct be_wrbq_create_resp *resp = embedded_payload(wrb);
  1086. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  1087. int status;
  1088. spin_lock(&ctrl->mbox_lock);
  1089. memset(wrb, 0, sizeof(*wrb));
  1090. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1091. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1092. OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
  1093. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1094. if (phba->fw_config.dual_ulp_aware) {
  1095. req->ulp_num = ulp_num;
  1096. req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
  1097. req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
  1098. }
  1099. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1100. status = be_mbox_notify(ctrl);
  1101. if (!status) {
  1102. wrbq->id = le16_to_cpu(resp->cid);
  1103. wrbq->created = true;
  1104. pwrb_context->cid = wrbq->id;
  1105. if (!phba->fw_config.dual_ulp_aware) {
  1106. pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
  1107. pwrb_context->ulp_num = BEISCSI_ULP0;
  1108. } else {
  1109. pwrb_context->ulp_num = resp->ulp_num;
  1110. pwrb_context->doorbell_offset = resp->doorbell_offset;
  1111. }
  1112. }
  1113. spin_unlock(&ctrl->mbox_lock);
  1114. return status;
  1115. }
  1116. int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
  1117. struct be_dma_mem *q_mem)
  1118. {
  1119. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1120. struct be_post_template_pages_req *req = embedded_payload(wrb);
  1121. int status;
  1122. spin_lock(&ctrl->mbox_lock);
  1123. memset(wrb, 0, sizeof(*wrb));
  1124. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1125. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1126. OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
  1127. sizeof(*req));
  1128. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1129. req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
  1130. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1131. status = be_mbox_notify(ctrl);
  1132. spin_unlock(&ctrl->mbox_lock);
  1133. return status;
  1134. }
  1135. int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
  1136. {
  1137. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1138. struct be_remove_template_pages_req *req = embedded_payload(wrb);
  1139. int status;
  1140. spin_lock(&ctrl->mbox_lock);
  1141. memset(wrb, 0, sizeof(*wrb));
  1142. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1143. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1144. OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
  1145. sizeof(*req));
  1146. req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
  1147. status = be_mbox_notify(ctrl);
  1148. spin_unlock(&ctrl->mbox_lock);
  1149. return status;
  1150. }
  1151. int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
  1152. struct be_dma_mem *q_mem,
  1153. u32 page_offset, u32 num_pages)
  1154. {
  1155. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1156. struct be_post_sgl_pages_req *req = embedded_payload(wrb);
  1157. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  1158. int status;
  1159. unsigned int curr_pages;
  1160. u32 internal_page_offset = 0;
  1161. u32 temp_num_pages = num_pages;
  1162. if (num_pages == 0xff)
  1163. num_pages = 1;
  1164. spin_lock(&ctrl->mbox_lock);
  1165. do {
  1166. memset(wrb, 0, sizeof(*wrb));
  1167. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1168. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1169. OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
  1170. sizeof(*req));
  1171. curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
  1172. pages);
  1173. req->num_pages = min(num_pages, curr_pages);
  1174. req->page_offset = page_offset;
  1175. be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
  1176. q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
  1177. internal_page_offset += req->num_pages;
  1178. page_offset += req->num_pages;
  1179. num_pages -= req->num_pages;
  1180. if (temp_num_pages == 0xff)
  1181. req->num_pages = temp_num_pages;
  1182. status = be_mbox_notify(ctrl);
  1183. if (status) {
  1184. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  1185. "BC_%d : FW CMD to map iscsi frags failed.\n");
  1186. goto error;
  1187. }
  1188. } while (num_pages > 0);
  1189. error:
  1190. spin_unlock(&ctrl->mbox_lock);
  1191. if (status != 0)
  1192. beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
  1193. return status;
  1194. }
  1195. int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
  1196. {
  1197. struct be_ctrl_info *ctrl = &phba->ctrl;
  1198. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1199. struct be_post_sgl_pages_req *req = embedded_payload(wrb);
  1200. int status;
  1201. spin_lock(&ctrl->mbox_lock);
  1202. req = embedded_payload(wrb);
  1203. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1204. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1205. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
  1206. status = be_mbox_notify_wait(phba);
  1207. spin_unlock(&ctrl->mbox_lock);
  1208. return status;
  1209. }
  1210. /**
  1211. * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
  1212. * @phba: device priv structure instance
  1213. * @vlan_tag: TAG to be set
  1214. *
  1215. * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
  1216. *
  1217. * returns
  1218. * TAG for the MBX Cmd
  1219. * **/
  1220. int be_cmd_set_vlan(struct beiscsi_hba *phba,
  1221. uint16_t vlan_tag)
  1222. {
  1223. unsigned int tag = 0;
  1224. struct be_mcc_wrb *wrb;
  1225. struct be_cmd_set_vlan_req *req;
  1226. struct be_ctrl_info *ctrl = &phba->ctrl;
  1227. spin_lock(&ctrl->mbox_lock);
  1228. tag = alloc_mcc_tag(phba);
  1229. if (!tag) {
  1230. spin_unlock(&ctrl->mbox_lock);
  1231. return tag;
  1232. }
  1233. wrb = wrb_from_mccq(phba);
  1234. req = embedded_payload(wrb);
  1235. wrb->tag0 |= tag;
  1236. be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
  1237. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1238. OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
  1239. sizeof(*req));
  1240. req->interface_hndl = phba->interface_handle;
  1241. req->vlan_priority = vlan_tag;
  1242. be_mcc_notify(phba);
  1243. spin_unlock(&ctrl->mbox_lock);
  1244. return tag;
  1245. }