cmd.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556
  1. /*
  2. * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <asm-generic/kmap_types.h>
  33. #include <linux/module.h>
  34. #include <linux/errno.h>
  35. #include <linux/pci.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/slab.h>
  38. #include <linux/delay.h>
  39. #include <linux/random.h>
  40. #include <linux/io-mapping.h>
  41. #include <linux/mlx5/driver.h>
  42. #include <linux/debugfs.h>
  43. #include "mlx5_core.h"
  44. enum {
  45. CMD_IF_REV = 5,
  46. };
  47. enum {
  48. CMD_MODE_POLLING,
  49. CMD_MODE_EVENTS
  50. };
  51. enum {
  52. NUM_LONG_LISTS = 2,
  53. NUM_MED_LISTS = 64,
  54. LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
  55. MLX5_CMD_DATA_BLOCK_SIZE,
  56. MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
  57. };
  58. enum {
  59. MLX5_CMD_DELIVERY_STAT_OK = 0x0,
  60. MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
  61. MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
  62. MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
  63. MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
  64. MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
  65. MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
  66. MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
  67. MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
  68. MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
  69. MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
  70. };
  71. enum {
  72. MLX5_CMD_STAT_OK = 0x0,
  73. MLX5_CMD_STAT_INT_ERR = 0x1,
  74. MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
  75. MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
  76. MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
  77. MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
  78. MLX5_CMD_STAT_RES_BUSY = 0x6,
  79. MLX5_CMD_STAT_LIM_ERR = 0x8,
  80. MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
  81. MLX5_CMD_STAT_IX_ERR = 0xa,
  82. MLX5_CMD_STAT_NO_RES_ERR = 0xf,
  83. MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
  84. MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
  85. MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
  86. MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
  87. MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
  88. };
  89. static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
  90. struct mlx5_cmd_msg *in,
  91. struct mlx5_cmd_msg *out,
  92. void *uout, int uout_size,
  93. mlx5_cmd_cbk_t cbk,
  94. void *context, int page_queue)
  95. {
  96. gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
  97. struct mlx5_cmd_work_ent *ent;
  98. ent = kzalloc(sizeof(*ent), alloc_flags);
  99. if (!ent)
  100. return ERR_PTR(-ENOMEM);
  101. ent->in = in;
  102. ent->out = out;
  103. ent->uout = uout;
  104. ent->uout_size = uout_size;
  105. ent->callback = cbk;
  106. ent->context = context;
  107. ent->cmd = cmd;
  108. ent->page_queue = page_queue;
  109. return ent;
  110. }
  111. static u8 alloc_token(struct mlx5_cmd *cmd)
  112. {
  113. u8 token;
  114. spin_lock(&cmd->token_lock);
  115. token = cmd->token++ % 255 + 1;
  116. spin_unlock(&cmd->token_lock);
  117. return token;
  118. }
  119. static int alloc_ent(struct mlx5_cmd *cmd)
  120. {
  121. unsigned long flags;
  122. int ret;
  123. spin_lock_irqsave(&cmd->alloc_lock, flags);
  124. ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
  125. if (ret < cmd->max_reg_cmds)
  126. clear_bit(ret, &cmd->bitmask);
  127. spin_unlock_irqrestore(&cmd->alloc_lock, flags);
  128. return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
  129. }
  130. static void free_ent(struct mlx5_cmd *cmd, int idx)
  131. {
  132. unsigned long flags;
  133. spin_lock_irqsave(&cmd->alloc_lock, flags);
  134. set_bit(idx, &cmd->bitmask);
  135. spin_unlock_irqrestore(&cmd->alloc_lock, flags);
  136. }
  137. static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
  138. {
  139. return cmd->cmd_buf + (idx << cmd->log_stride);
  140. }
  141. static u8 xor8_buf(void *buf, int len)
  142. {
  143. u8 *ptr = buf;
  144. u8 sum = 0;
  145. int i;
  146. for (i = 0; i < len; i++)
  147. sum ^= ptr[i];
  148. return sum;
  149. }
  150. static int verify_block_sig(struct mlx5_cmd_prot_block *block)
  151. {
  152. if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
  153. return -EINVAL;
  154. if (xor8_buf(block, sizeof(*block)) != 0xff)
  155. return -EINVAL;
  156. return 0;
  157. }
  158. static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
  159. int csum)
  160. {
  161. block->token = token;
  162. if (csum) {
  163. block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
  164. sizeof(block->data) - 2);
  165. block->sig = ~xor8_buf(block, sizeof(*block) - 1);
  166. }
  167. }
  168. static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
  169. {
  170. struct mlx5_cmd_mailbox *next = msg->next;
  171. while (next) {
  172. calc_block_sig(next->buf, token, csum);
  173. next = next->next;
  174. }
  175. }
  176. static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
  177. {
  178. ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
  179. calc_chain_sig(ent->in, ent->token, csum);
  180. calc_chain_sig(ent->out, ent->token, csum);
  181. }
  182. static void poll_timeout(struct mlx5_cmd_work_ent *ent)
  183. {
  184. unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
  185. u8 own;
  186. do {
  187. own = ent->lay->status_own;
  188. if (!(own & CMD_OWNER_HW)) {
  189. ent->ret = 0;
  190. return;
  191. }
  192. usleep_range(5000, 10000);
  193. } while (time_before(jiffies, poll_end));
  194. ent->ret = -ETIMEDOUT;
  195. }
  196. static void free_cmd(struct mlx5_cmd_work_ent *ent)
  197. {
  198. kfree(ent);
  199. }
  200. static int verify_signature(struct mlx5_cmd_work_ent *ent)
  201. {
  202. struct mlx5_cmd_mailbox *next = ent->out->next;
  203. int err;
  204. u8 sig;
  205. sig = xor8_buf(ent->lay, sizeof(*ent->lay));
  206. if (sig != 0xff)
  207. return -EINVAL;
  208. while (next) {
  209. err = verify_block_sig(next->buf);
  210. if (err)
  211. return err;
  212. next = next->next;
  213. }
  214. return 0;
  215. }
  216. static void dump_buf(void *buf, int size, int data_only, int offset)
  217. {
  218. __be32 *p = buf;
  219. int i;
  220. for (i = 0; i < size; i += 16) {
  221. pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
  222. be32_to_cpu(p[1]), be32_to_cpu(p[2]),
  223. be32_to_cpu(p[3]));
  224. p += 4;
  225. offset += 16;
  226. }
  227. if (!data_only)
  228. pr_debug("\n");
  229. }
  230. const char *mlx5_command_str(int command)
  231. {
  232. switch (command) {
  233. case MLX5_CMD_OP_QUERY_HCA_CAP:
  234. return "QUERY_HCA_CAP";
  235. case MLX5_CMD_OP_SET_HCA_CAP:
  236. return "SET_HCA_CAP";
  237. case MLX5_CMD_OP_QUERY_ADAPTER:
  238. return "QUERY_ADAPTER";
  239. case MLX5_CMD_OP_INIT_HCA:
  240. return "INIT_HCA";
  241. case MLX5_CMD_OP_TEARDOWN_HCA:
  242. return "TEARDOWN_HCA";
  243. case MLX5_CMD_OP_ENABLE_HCA:
  244. return "MLX5_CMD_OP_ENABLE_HCA";
  245. case MLX5_CMD_OP_DISABLE_HCA:
  246. return "MLX5_CMD_OP_DISABLE_HCA";
  247. case MLX5_CMD_OP_QUERY_PAGES:
  248. return "QUERY_PAGES";
  249. case MLX5_CMD_OP_MANAGE_PAGES:
  250. return "MANAGE_PAGES";
  251. case MLX5_CMD_OP_CREATE_MKEY:
  252. return "CREATE_MKEY";
  253. case MLX5_CMD_OP_QUERY_MKEY:
  254. return "QUERY_MKEY";
  255. case MLX5_CMD_OP_DESTROY_MKEY:
  256. return "DESTROY_MKEY";
  257. case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
  258. return "QUERY_SPECIAL_CONTEXTS";
  259. case MLX5_CMD_OP_CREATE_EQ:
  260. return "CREATE_EQ";
  261. case MLX5_CMD_OP_DESTROY_EQ:
  262. return "DESTROY_EQ";
  263. case MLX5_CMD_OP_QUERY_EQ:
  264. return "QUERY_EQ";
  265. case MLX5_CMD_OP_CREATE_CQ:
  266. return "CREATE_CQ";
  267. case MLX5_CMD_OP_DESTROY_CQ:
  268. return "DESTROY_CQ";
  269. case MLX5_CMD_OP_QUERY_CQ:
  270. return "QUERY_CQ";
  271. case MLX5_CMD_OP_MODIFY_CQ:
  272. return "MODIFY_CQ";
  273. case MLX5_CMD_OP_CREATE_QP:
  274. return "CREATE_QP";
  275. case MLX5_CMD_OP_DESTROY_QP:
  276. return "DESTROY_QP";
  277. case MLX5_CMD_OP_RST2INIT_QP:
  278. return "RST2INIT_QP";
  279. case MLX5_CMD_OP_INIT2RTR_QP:
  280. return "INIT2RTR_QP";
  281. case MLX5_CMD_OP_RTR2RTS_QP:
  282. return "RTR2RTS_QP";
  283. case MLX5_CMD_OP_RTS2RTS_QP:
  284. return "RTS2RTS_QP";
  285. case MLX5_CMD_OP_SQERR2RTS_QP:
  286. return "SQERR2RTS_QP";
  287. case MLX5_CMD_OP_2ERR_QP:
  288. return "2ERR_QP";
  289. case MLX5_CMD_OP_2RST_QP:
  290. return "2RST_QP";
  291. case MLX5_CMD_OP_QUERY_QP:
  292. return "QUERY_QP";
  293. case MLX5_CMD_OP_MAD_IFC:
  294. return "MAD_IFC";
  295. case MLX5_CMD_OP_INIT2INIT_QP:
  296. return "INIT2INIT_QP";
  297. case MLX5_CMD_OP_CREATE_PSV:
  298. return "CREATE_PSV";
  299. case MLX5_CMD_OP_DESTROY_PSV:
  300. return "DESTROY_PSV";
  301. case MLX5_CMD_OP_CREATE_SRQ:
  302. return "CREATE_SRQ";
  303. case MLX5_CMD_OP_DESTROY_SRQ:
  304. return "DESTROY_SRQ";
  305. case MLX5_CMD_OP_QUERY_SRQ:
  306. return "QUERY_SRQ";
  307. case MLX5_CMD_OP_ARM_RQ:
  308. return "ARM_RQ";
  309. case MLX5_CMD_OP_RESIZE_SRQ:
  310. return "RESIZE_SRQ";
  311. case MLX5_CMD_OP_ALLOC_PD:
  312. return "ALLOC_PD";
  313. case MLX5_CMD_OP_DEALLOC_PD:
  314. return "DEALLOC_PD";
  315. case MLX5_CMD_OP_ALLOC_UAR:
  316. return "ALLOC_UAR";
  317. case MLX5_CMD_OP_DEALLOC_UAR:
  318. return "DEALLOC_UAR";
  319. case MLX5_CMD_OP_ATTACH_TO_MCG:
  320. return "ATTACH_TO_MCG";
  321. case MLX5_CMD_OP_DETACH_FROM_MCG:
  322. return "DETACH_FROM_MCG";
  323. case MLX5_CMD_OP_ALLOC_XRCD:
  324. return "ALLOC_XRCD";
  325. case MLX5_CMD_OP_DEALLOC_XRCD:
  326. return "DEALLOC_XRCD";
  327. case MLX5_CMD_OP_ACCESS_REG:
  328. return "MLX5_CMD_OP_ACCESS_REG";
  329. default: return "unknown command opcode";
  330. }
  331. }
  332. static void dump_command(struct mlx5_core_dev *dev,
  333. struct mlx5_cmd_work_ent *ent, int input)
  334. {
  335. u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
  336. struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
  337. struct mlx5_cmd_mailbox *next = msg->next;
  338. int data_only;
  339. u32 offset = 0;
  340. int dump_len;
  341. data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
  342. if (data_only)
  343. mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
  344. "dump command data %s(0x%x) %s\n",
  345. mlx5_command_str(op), op,
  346. input ? "INPUT" : "OUTPUT");
  347. else
  348. mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
  349. mlx5_command_str(op), op,
  350. input ? "INPUT" : "OUTPUT");
  351. if (data_only) {
  352. if (input) {
  353. dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
  354. offset += sizeof(ent->lay->in);
  355. } else {
  356. dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
  357. offset += sizeof(ent->lay->out);
  358. }
  359. } else {
  360. dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
  361. offset += sizeof(*ent->lay);
  362. }
  363. while (next && offset < msg->len) {
  364. if (data_only) {
  365. dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
  366. dump_buf(next->buf, dump_len, 1, offset);
  367. offset += MLX5_CMD_DATA_BLOCK_SIZE;
  368. } else {
  369. mlx5_core_dbg(dev, "command block:\n");
  370. dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
  371. offset += sizeof(struct mlx5_cmd_prot_block);
  372. }
  373. next = next->next;
  374. }
  375. if (data_only)
  376. pr_debug("\n");
  377. }
  378. static void cmd_work_handler(struct work_struct *work)
  379. {
  380. struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
  381. struct mlx5_cmd *cmd = ent->cmd;
  382. struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
  383. struct mlx5_cmd_layout *lay;
  384. struct semaphore *sem;
  385. sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
  386. down(sem);
  387. if (!ent->page_queue) {
  388. ent->idx = alloc_ent(cmd);
  389. if (ent->idx < 0) {
  390. mlx5_core_err(dev, "failed to allocate command entry\n");
  391. up(sem);
  392. return;
  393. }
  394. } else {
  395. ent->idx = cmd->max_reg_cmds;
  396. }
  397. ent->token = alloc_token(cmd);
  398. cmd->ent_arr[ent->idx] = ent;
  399. lay = get_inst(cmd, ent->idx);
  400. ent->lay = lay;
  401. memset(lay, 0, sizeof(*lay));
  402. memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
  403. ent->op = be32_to_cpu(lay->in[0]) >> 16;
  404. if (ent->in->next)
  405. lay->in_ptr = cpu_to_be64(ent->in->next->dma);
  406. lay->inlen = cpu_to_be32(ent->in->len);
  407. if (ent->out->next)
  408. lay->out_ptr = cpu_to_be64(ent->out->next->dma);
  409. lay->outlen = cpu_to_be32(ent->out->len);
  410. lay->type = MLX5_PCI_CMD_XPORT;
  411. lay->token = ent->token;
  412. lay->status_own = CMD_OWNER_HW;
  413. set_signature(ent, !cmd->checksum_disabled);
  414. dump_command(dev, ent, 1);
  415. ent->ts1 = ktime_get_ns();
  416. /* ring doorbell after the descriptor is valid */
  417. wmb();
  418. iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
  419. mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
  420. mmiowb();
  421. if (cmd->mode == CMD_MODE_POLLING) {
  422. poll_timeout(ent);
  423. /* make sure we read the descriptor after ownership is SW */
  424. rmb();
  425. mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
  426. }
  427. }
  428. static const char *deliv_status_to_str(u8 status)
  429. {
  430. switch (status) {
  431. case MLX5_CMD_DELIVERY_STAT_OK:
  432. return "no errors";
  433. case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
  434. return "signature error";
  435. case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
  436. return "token error";
  437. case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
  438. return "bad block number";
  439. case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
  440. return "output pointer not aligned to block size";
  441. case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
  442. return "input pointer not aligned to block size";
  443. case MLX5_CMD_DELIVERY_STAT_FW_ERR:
  444. return "firmware internal error";
  445. case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
  446. return "command input length error";
  447. case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
  448. return "command ouput length error";
  449. case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
  450. return "reserved fields not cleared";
  451. case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
  452. return "bad command descriptor type";
  453. default:
  454. return "unknown status code";
  455. }
  456. }
  457. static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
  458. {
  459. struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
  460. return be16_to_cpu(hdr->opcode);
  461. }
  462. static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
  463. {
  464. unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
  465. struct mlx5_cmd *cmd = &dev->cmd;
  466. int err;
  467. if (cmd->mode == CMD_MODE_POLLING) {
  468. wait_for_completion(&ent->done);
  469. err = ent->ret;
  470. } else {
  471. if (!wait_for_completion_timeout(&ent->done, timeout))
  472. err = -ETIMEDOUT;
  473. else
  474. err = 0;
  475. }
  476. if (err == -ETIMEDOUT) {
  477. mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
  478. mlx5_command_str(msg_to_opcode(ent->in)),
  479. msg_to_opcode(ent->in));
  480. }
  481. mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
  482. err, deliv_status_to_str(ent->status), ent->status);
  483. return err;
  484. }
  485. /* Notes:
  486. * 1. Callback functions may not sleep
  487. * 2. page queue commands do not support asynchrous completion
  488. */
  489. static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
  490. struct mlx5_cmd_msg *out, void *uout, int uout_size,
  491. mlx5_cmd_cbk_t callback,
  492. void *context, int page_queue, u8 *status)
  493. {
  494. struct mlx5_cmd *cmd = &dev->cmd;
  495. struct mlx5_cmd_work_ent *ent;
  496. struct mlx5_cmd_stats *stats;
  497. int err = 0;
  498. s64 ds;
  499. u16 op;
  500. if (callback && page_queue)
  501. return -EINVAL;
  502. ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
  503. page_queue);
  504. if (IS_ERR(ent))
  505. return PTR_ERR(ent);
  506. if (!callback)
  507. init_completion(&ent->done);
  508. INIT_WORK(&ent->work, cmd_work_handler);
  509. if (page_queue) {
  510. cmd_work_handler(&ent->work);
  511. } else if (!queue_work(cmd->wq, &ent->work)) {
  512. mlx5_core_warn(dev, "failed to queue work\n");
  513. err = -ENOMEM;
  514. goto out_free;
  515. }
  516. if (!callback) {
  517. err = wait_func(dev, ent);
  518. if (err == -ETIMEDOUT)
  519. goto out;
  520. ds = ent->ts2 - ent->ts1;
  521. op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
  522. if (op < ARRAY_SIZE(cmd->stats)) {
  523. stats = &cmd->stats[op];
  524. spin_lock_irq(&stats->lock);
  525. stats->sum += ds;
  526. ++stats->n;
  527. spin_unlock_irq(&stats->lock);
  528. }
  529. mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
  530. "fw exec time for %s is %lld nsec\n",
  531. mlx5_command_str(op), ds);
  532. *status = ent->status;
  533. free_cmd(ent);
  534. }
  535. return err;
  536. out_free:
  537. free_cmd(ent);
  538. out:
  539. return err;
  540. }
  541. static ssize_t dbg_write(struct file *filp, const char __user *buf,
  542. size_t count, loff_t *pos)
  543. {
  544. struct mlx5_core_dev *dev = filp->private_data;
  545. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  546. char lbuf[3];
  547. int err;
  548. if (!dbg->in_msg || !dbg->out_msg)
  549. return -ENOMEM;
  550. if (copy_from_user(lbuf, buf, sizeof(lbuf)))
  551. return -EFAULT;
  552. lbuf[sizeof(lbuf) - 1] = 0;
  553. if (strcmp(lbuf, "go"))
  554. return -EINVAL;
  555. err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
  556. return err ? err : count;
  557. }
  558. static const struct file_operations fops = {
  559. .owner = THIS_MODULE,
  560. .open = simple_open,
  561. .write = dbg_write,
  562. };
  563. static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
  564. {
  565. struct mlx5_cmd_prot_block *block;
  566. struct mlx5_cmd_mailbox *next;
  567. int copy;
  568. if (!to || !from)
  569. return -ENOMEM;
  570. copy = min_t(int, size, sizeof(to->first.data));
  571. memcpy(to->first.data, from, copy);
  572. size -= copy;
  573. from += copy;
  574. next = to->next;
  575. while (size) {
  576. if (!next) {
  577. /* this is a BUG */
  578. return -ENOMEM;
  579. }
  580. copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
  581. block = next->buf;
  582. memcpy(block->data, from, copy);
  583. from += copy;
  584. size -= copy;
  585. next = next->next;
  586. }
  587. return 0;
  588. }
  589. static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
  590. {
  591. struct mlx5_cmd_prot_block *block;
  592. struct mlx5_cmd_mailbox *next;
  593. int copy;
  594. if (!to || !from)
  595. return -ENOMEM;
  596. copy = min_t(int, size, sizeof(from->first.data));
  597. memcpy(to, from->first.data, copy);
  598. size -= copy;
  599. to += copy;
  600. next = from->next;
  601. while (size) {
  602. if (!next) {
  603. /* this is a BUG */
  604. return -ENOMEM;
  605. }
  606. copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
  607. block = next->buf;
  608. memcpy(to, block->data, copy);
  609. to += copy;
  610. size -= copy;
  611. next = next->next;
  612. }
  613. return 0;
  614. }
  615. static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
  616. gfp_t flags)
  617. {
  618. struct mlx5_cmd_mailbox *mailbox;
  619. mailbox = kmalloc(sizeof(*mailbox), flags);
  620. if (!mailbox)
  621. return ERR_PTR(-ENOMEM);
  622. mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
  623. &mailbox->dma);
  624. if (!mailbox->buf) {
  625. mlx5_core_dbg(dev, "failed allocation\n");
  626. kfree(mailbox);
  627. return ERR_PTR(-ENOMEM);
  628. }
  629. memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
  630. mailbox->next = NULL;
  631. return mailbox;
  632. }
  633. static void free_cmd_box(struct mlx5_core_dev *dev,
  634. struct mlx5_cmd_mailbox *mailbox)
  635. {
  636. pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
  637. kfree(mailbox);
  638. }
  639. static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
  640. gfp_t flags, int size)
  641. {
  642. struct mlx5_cmd_mailbox *tmp, *head = NULL;
  643. struct mlx5_cmd_prot_block *block;
  644. struct mlx5_cmd_msg *msg;
  645. int blen;
  646. int err;
  647. int n;
  648. int i;
  649. msg = kzalloc(sizeof(*msg), flags);
  650. if (!msg)
  651. return ERR_PTR(-ENOMEM);
  652. blen = size - min_t(int, sizeof(msg->first.data), size);
  653. n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
  654. for (i = 0; i < n; i++) {
  655. tmp = alloc_cmd_box(dev, flags);
  656. if (IS_ERR(tmp)) {
  657. mlx5_core_warn(dev, "failed allocating block\n");
  658. err = PTR_ERR(tmp);
  659. goto err_alloc;
  660. }
  661. block = tmp->buf;
  662. tmp->next = head;
  663. block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
  664. block->block_num = cpu_to_be32(n - i - 1);
  665. head = tmp;
  666. }
  667. msg->next = head;
  668. msg->len = size;
  669. return msg;
  670. err_alloc:
  671. while (head) {
  672. tmp = head->next;
  673. free_cmd_box(dev, head);
  674. head = tmp;
  675. }
  676. kfree(msg);
  677. return ERR_PTR(err);
  678. }
  679. static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
  680. struct mlx5_cmd_msg *msg)
  681. {
  682. struct mlx5_cmd_mailbox *head = msg->next;
  683. struct mlx5_cmd_mailbox *next;
  684. while (head) {
  685. next = head->next;
  686. free_cmd_box(dev, head);
  687. head = next;
  688. }
  689. kfree(msg);
  690. }
  691. static ssize_t data_write(struct file *filp, const char __user *buf,
  692. size_t count, loff_t *pos)
  693. {
  694. struct mlx5_core_dev *dev = filp->private_data;
  695. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  696. void *ptr;
  697. int err;
  698. if (*pos != 0)
  699. return -EINVAL;
  700. kfree(dbg->in_msg);
  701. dbg->in_msg = NULL;
  702. dbg->inlen = 0;
  703. ptr = kzalloc(count, GFP_KERNEL);
  704. if (!ptr)
  705. return -ENOMEM;
  706. if (copy_from_user(ptr, buf, count)) {
  707. err = -EFAULT;
  708. goto out;
  709. }
  710. dbg->in_msg = ptr;
  711. dbg->inlen = count;
  712. *pos = count;
  713. return count;
  714. out:
  715. kfree(ptr);
  716. return err;
  717. }
  718. static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
  719. loff_t *pos)
  720. {
  721. struct mlx5_core_dev *dev = filp->private_data;
  722. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  723. int copy;
  724. if (*pos)
  725. return 0;
  726. if (!dbg->out_msg)
  727. return -ENOMEM;
  728. copy = min_t(int, count, dbg->outlen);
  729. if (copy_to_user(buf, dbg->out_msg, copy))
  730. return -EFAULT;
  731. *pos += copy;
  732. return copy;
  733. }
  734. static const struct file_operations dfops = {
  735. .owner = THIS_MODULE,
  736. .open = simple_open,
  737. .write = data_write,
  738. .read = data_read,
  739. };
  740. static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
  741. loff_t *pos)
  742. {
  743. struct mlx5_core_dev *dev = filp->private_data;
  744. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  745. char outlen[8];
  746. int err;
  747. if (*pos)
  748. return 0;
  749. err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
  750. if (err < 0)
  751. return err;
  752. if (copy_to_user(buf, &outlen, err))
  753. return -EFAULT;
  754. *pos += err;
  755. return err;
  756. }
  757. static ssize_t outlen_write(struct file *filp, const char __user *buf,
  758. size_t count, loff_t *pos)
  759. {
  760. struct mlx5_core_dev *dev = filp->private_data;
  761. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  762. char outlen_str[8];
  763. int outlen;
  764. void *ptr;
  765. int err;
  766. if (*pos != 0 || count > 6)
  767. return -EINVAL;
  768. kfree(dbg->out_msg);
  769. dbg->out_msg = NULL;
  770. dbg->outlen = 0;
  771. if (copy_from_user(outlen_str, buf, count))
  772. return -EFAULT;
  773. outlen_str[7] = 0;
  774. err = sscanf(outlen_str, "%d", &outlen);
  775. if (err < 0)
  776. return err;
  777. ptr = kzalloc(outlen, GFP_KERNEL);
  778. if (!ptr)
  779. return -ENOMEM;
  780. dbg->out_msg = ptr;
  781. dbg->outlen = outlen;
  782. *pos = count;
  783. return count;
  784. }
  785. static const struct file_operations olfops = {
  786. .owner = THIS_MODULE,
  787. .open = simple_open,
  788. .write = outlen_write,
  789. .read = outlen_read,
  790. };
  791. static void set_wqname(struct mlx5_core_dev *dev)
  792. {
  793. struct mlx5_cmd *cmd = &dev->cmd;
  794. snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
  795. dev_name(&dev->pdev->dev));
  796. }
  797. static void clean_debug_files(struct mlx5_core_dev *dev)
  798. {
  799. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  800. if (!mlx5_debugfs_root)
  801. return;
  802. mlx5_cmdif_debugfs_cleanup(dev);
  803. debugfs_remove_recursive(dbg->dbg_root);
  804. }
  805. static int create_debugfs_files(struct mlx5_core_dev *dev)
  806. {
  807. struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
  808. int err = -ENOMEM;
  809. if (!mlx5_debugfs_root)
  810. return 0;
  811. dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
  812. if (!dbg->dbg_root)
  813. return err;
  814. dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
  815. dev, &dfops);
  816. if (!dbg->dbg_in)
  817. goto err_dbg;
  818. dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
  819. dev, &dfops);
  820. if (!dbg->dbg_out)
  821. goto err_dbg;
  822. dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
  823. dev, &olfops);
  824. if (!dbg->dbg_outlen)
  825. goto err_dbg;
  826. dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
  827. &dbg->status);
  828. if (!dbg->dbg_status)
  829. goto err_dbg;
  830. dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
  831. if (!dbg->dbg_run)
  832. goto err_dbg;
  833. mlx5_cmdif_debugfs_init(dev);
  834. return 0;
  835. err_dbg:
  836. clean_debug_files(dev);
  837. return err;
  838. }
  839. void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
  840. {
  841. struct mlx5_cmd *cmd = &dev->cmd;
  842. int i;
  843. for (i = 0; i < cmd->max_reg_cmds; i++)
  844. down(&cmd->sem);
  845. down(&cmd->pages_sem);
  846. flush_workqueue(cmd->wq);
  847. cmd->mode = CMD_MODE_EVENTS;
  848. up(&cmd->pages_sem);
  849. for (i = 0; i < cmd->max_reg_cmds; i++)
  850. up(&cmd->sem);
  851. }
  852. void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
  853. {
  854. struct mlx5_cmd *cmd = &dev->cmd;
  855. int i;
  856. for (i = 0; i < cmd->max_reg_cmds; i++)
  857. down(&cmd->sem);
  858. down(&cmd->pages_sem);
  859. flush_workqueue(cmd->wq);
  860. cmd->mode = CMD_MODE_POLLING;
  861. up(&cmd->pages_sem);
  862. for (i = 0; i < cmd->max_reg_cmds; i++)
  863. up(&cmd->sem);
  864. }
  865. static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
  866. {
  867. unsigned long flags;
  868. if (msg->cache) {
  869. spin_lock_irqsave(&msg->cache->lock, flags);
  870. list_add_tail(&msg->list, &msg->cache->head);
  871. spin_unlock_irqrestore(&msg->cache->lock, flags);
  872. } else {
  873. mlx5_free_cmd_msg(dev, msg);
  874. }
  875. }
  876. void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
  877. {
  878. struct mlx5_cmd *cmd = &dev->cmd;
  879. struct mlx5_cmd_work_ent *ent;
  880. mlx5_cmd_cbk_t callback;
  881. void *context;
  882. int err;
  883. int i;
  884. s64 ds;
  885. struct mlx5_cmd_stats *stats;
  886. unsigned long flags;
  887. for (i = 0; i < (1 << cmd->log_sz); i++) {
  888. if (test_bit(i, &vector)) {
  889. struct semaphore *sem;
  890. ent = cmd->ent_arr[i];
  891. if (ent->page_queue)
  892. sem = &cmd->pages_sem;
  893. else
  894. sem = &cmd->sem;
  895. ent->ts2 = ktime_get_ns();
  896. memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
  897. dump_command(dev, ent, 0);
  898. if (!ent->ret) {
  899. if (!cmd->checksum_disabled)
  900. ent->ret = verify_signature(ent);
  901. else
  902. ent->ret = 0;
  903. ent->status = ent->lay->status_own >> 1;
  904. mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
  905. ent->ret, deliv_status_to_str(ent->status), ent->status);
  906. }
  907. free_ent(cmd, ent->idx);
  908. if (ent->callback) {
  909. ds = ent->ts2 - ent->ts1;
  910. if (ent->op < ARRAY_SIZE(cmd->stats)) {
  911. stats = &cmd->stats[ent->op];
  912. spin_lock_irqsave(&stats->lock, flags);
  913. stats->sum += ds;
  914. ++stats->n;
  915. spin_unlock_irqrestore(&stats->lock, flags);
  916. }
  917. callback = ent->callback;
  918. context = ent->context;
  919. err = ent->ret;
  920. if (!err)
  921. err = mlx5_copy_from_msg(ent->uout,
  922. ent->out,
  923. ent->uout_size);
  924. mlx5_free_cmd_msg(dev, ent->out);
  925. free_msg(dev, ent->in);
  926. free_cmd(ent);
  927. callback(err, context);
  928. } else {
  929. complete(&ent->done);
  930. }
  931. up(sem);
  932. }
  933. }
  934. }
  935. EXPORT_SYMBOL(mlx5_cmd_comp_handler);
  936. static int status_to_err(u8 status)
  937. {
  938. return status ? -1 : 0; /* TBD more meaningful codes */
  939. }
  940. static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
  941. gfp_t gfp)
  942. {
  943. struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
  944. struct mlx5_cmd *cmd = &dev->cmd;
  945. struct cache_ent *ent = NULL;
  946. if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
  947. ent = &cmd->cache.large;
  948. else if (in_size > 16 && in_size <= MED_LIST_SIZE)
  949. ent = &cmd->cache.med;
  950. if (ent) {
  951. spin_lock_irq(&ent->lock);
  952. if (!list_empty(&ent->head)) {
  953. msg = list_entry(ent->head.next, typeof(*msg), list);
  954. /* For cached lists, we must explicitly state what is
  955. * the real size
  956. */
  957. msg->len = in_size;
  958. list_del(&msg->list);
  959. }
  960. spin_unlock_irq(&ent->lock);
  961. }
  962. if (IS_ERR(msg))
  963. msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
  964. return msg;
  965. }
  966. static int is_manage_pages(struct mlx5_inbox_hdr *in)
  967. {
  968. return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
  969. }
  970. static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
  971. int out_size, mlx5_cmd_cbk_t callback, void *context)
  972. {
  973. struct mlx5_cmd_msg *inb;
  974. struct mlx5_cmd_msg *outb;
  975. int pages_queue;
  976. gfp_t gfp;
  977. int err;
  978. u8 status = 0;
  979. pages_queue = is_manage_pages(in);
  980. gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
  981. inb = alloc_msg(dev, in_size, gfp);
  982. if (IS_ERR(inb)) {
  983. err = PTR_ERR(inb);
  984. return err;
  985. }
  986. err = mlx5_copy_to_msg(inb, in, in_size);
  987. if (err) {
  988. mlx5_core_warn(dev, "err %d\n", err);
  989. goto out_in;
  990. }
  991. outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
  992. if (IS_ERR(outb)) {
  993. err = PTR_ERR(outb);
  994. goto out_in;
  995. }
  996. err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
  997. pages_queue, &status);
  998. if (err)
  999. goto out_out;
  1000. mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
  1001. if (status) {
  1002. err = status_to_err(status);
  1003. goto out_out;
  1004. }
  1005. err = mlx5_copy_from_msg(out, outb, out_size);
  1006. out_out:
  1007. if (!callback)
  1008. mlx5_free_cmd_msg(dev, outb);
  1009. out_in:
  1010. if (!callback)
  1011. free_msg(dev, inb);
  1012. return err;
  1013. }
  1014. int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
  1015. int out_size)
  1016. {
  1017. return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
  1018. }
  1019. EXPORT_SYMBOL(mlx5_cmd_exec);
  1020. int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
  1021. void *out, int out_size, mlx5_cmd_cbk_t callback,
  1022. void *context)
  1023. {
  1024. return cmd_exec(dev, in, in_size, out, out_size, callback, context);
  1025. }
  1026. EXPORT_SYMBOL(mlx5_cmd_exec_cb);
  1027. static void destroy_msg_cache(struct mlx5_core_dev *dev)
  1028. {
  1029. struct mlx5_cmd *cmd = &dev->cmd;
  1030. struct mlx5_cmd_msg *msg;
  1031. struct mlx5_cmd_msg *n;
  1032. list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
  1033. list_del(&msg->list);
  1034. mlx5_free_cmd_msg(dev, msg);
  1035. }
  1036. list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
  1037. list_del(&msg->list);
  1038. mlx5_free_cmd_msg(dev, msg);
  1039. }
  1040. }
  1041. static int create_msg_cache(struct mlx5_core_dev *dev)
  1042. {
  1043. struct mlx5_cmd *cmd = &dev->cmd;
  1044. struct mlx5_cmd_msg *msg;
  1045. int err;
  1046. int i;
  1047. spin_lock_init(&cmd->cache.large.lock);
  1048. INIT_LIST_HEAD(&cmd->cache.large.head);
  1049. spin_lock_init(&cmd->cache.med.lock);
  1050. INIT_LIST_HEAD(&cmd->cache.med.head);
  1051. for (i = 0; i < NUM_LONG_LISTS; i++) {
  1052. msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
  1053. if (IS_ERR(msg)) {
  1054. err = PTR_ERR(msg);
  1055. goto ex_err;
  1056. }
  1057. msg->cache = &cmd->cache.large;
  1058. list_add_tail(&msg->list, &cmd->cache.large.head);
  1059. }
  1060. for (i = 0; i < NUM_MED_LISTS; i++) {
  1061. msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
  1062. if (IS_ERR(msg)) {
  1063. err = PTR_ERR(msg);
  1064. goto ex_err;
  1065. }
  1066. msg->cache = &cmd->cache.med;
  1067. list_add_tail(&msg->list, &cmd->cache.med.head);
  1068. }
  1069. return 0;
  1070. ex_err:
  1071. destroy_msg_cache(dev);
  1072. return err;
  1073. }
  1074. int mlx5_cmd_init(struct mlx5_core_dev *dev)
  1075. {
  1076. int size = sizeof(struct mlx5_cmd_prot_block);
  1077. int align = roundup_pow_of_two(size);
  1078. struct mlx5_cmd *cmd = &dev->cmd;
  1079. u32 cmd_h, cmd_l;
  1080. u16 cmd_if_rev;
  1081. int err;
  1082. int i;
  1083. cmd_if_rev = cmdif_rev(dev);
  1084. if (cmd_if_rev != CMD_IF_REV) {
  1085. dev_err(&dev->pdev->dev,
  1086. "Driver cmdif rev(%d) differs from firmware's(%d)\n",
  1087. CMD_IF_REV, cmd_if_rev);
  1088. return -EINVAL;
  1089. }
  1090. cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
  1091. if (!cmd->pool)
  1092. return -ENOMEM;
  1093. cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
  1094. if (!cmd->cmd_buf) {
  1095. err = -ENOMEM;
  1096. goto err_free_pool;
  1097. }
  1098. cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
  1099. DMA_BIDIRECTIONAL);
  1100. if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
  1101. err = -ENOMEM;
  1102. goto err_free;
  1103. }
  1104. cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
  1105. cmd->log_sz = cmd_l >> 4 & 0xf;
  1106. cmd->log_stride = cmd_l & 0xf;
  1107. if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
  1108. dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
  1109. 1 << cmd->log_sz);
  1110. err = -EINVAL;
  1111. goto err_map;
  1112. }
  1113. if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
  1114. dev_err(&dev->pdev->dev, "command queue size overflow\n");
  1115. err = -EINVAL;
  1116. goto err_map;
  1117. }
  1118. cmd->checksum_disabled = 1;
  1119. cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
  1120. cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
  1121. cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
  1122. if (cmd->cmdif_rev > CMD_IF_REV) {
  1123. dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
  1124. CMD_IF_REV, cmd->cmdif_rev);
  1125. err = -ENOTSUPP;
  1126. goto err_map;
  1127. }
  1128. spin_lock_init(&cmd->alloc_lock);
  1129. spin_lock_init(&cmd->token_lock);
  1130. for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
  1131. spin_lock_init(&cmd->stats[i].lock);
  1132. sema_init(&cmd->sem, cmd->max_reg_cmds);
  1133. sema_init(&cmd->pages_sem, 1);
  1134. cmd_h = (u32)((u64)(cmd->dma) >> 32);
  1135. cmd_l = (u32)(cmd->dma);
  1136. if (cmd_l & 0xfff) {
  1137. dev_err(&dev->pdev->dev, "invalid command queue address\n");
  1138. err = -ENOMEM;
  1139. goto err_map;
  1140. }
  1141. iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
  1142. iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
  1143. /* Make sure firmware sees the complete address before we proceed */
  1144. wmb();
  1145. mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
  1146. cmd->mode = CMD_MODE_POLLING;
  1147. err = create_msg_cache(dev);
  1148. if (err) {
  1149. dev_err(&dev->pdev->dev, "failed to create command cache\n");
  1150. goto err_map;
  1151. }
  1152. set_wqname(dev);
  1153. cmd->wq = create_singlethread_workqueue(cmd->wq_name);
  1154. if (!cmd->wq) {
  1155. dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
  1156. err = -ENOMEM;
  1157. goto err_cache;
  1158. }
  1159. err = create_debugfs_files(dev);
  1160. if (err) {
  1161. err = -ENOMEM;
  1162. goto err_wq;
  1163. }
  1164. return 0;
  1165. err_wq:
  1166. destroy_workqueue(cmd->wq);
  1167. err_cache:
  1168. destroy_msg_cache(dev);
  1169. err_map:
  1170. dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
  1171. DMA_BIDIRECTIONAL);
  1172. err_free:
  1173. free_pages((unsigned long)cmd->cmd_buf, 0);
  1174. err_free_pool:
  1175. pci_pool_destroy(cmd->pool);
  1176. return err;
  1177. }
  1178. EXPORT_SYMBOL(mlx5_cmd_init);
  1179. void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
  1180. {
  1181. struct mlx5_cmd *cmd = &dev->cmd;
  1182. clean_debug_files(dev);
  1183. destroy_workqueue(cmd->wq);
  1184. destroy_msg_cache(dev);
  1185. dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
  1186. DMA_BIDIRECTIONAL);
  1187. free_pages((unsigned long)cmd->cmd_buf, 0);
  1188. pci_pool_destroy(cmd->pool);
  1189. }
  1190. EXPORT_SYMBOL(mlx5_cmd_cleanup);
  1191. static const char *cmd_status_str(u8 status)
  1192. {
  1193. switch (status) {
  1194. case MLX5_CMD_STAT_OK:
  1195. return "OK";
  1196. case MLX5_CMD_STAT_INT_ERR:
  1197. return "internal error";
  1198. case MLX5_CMD_STAT_BAD_OP_ERR:
  1199. return "bad operation";
  1200. case MLX5_CMD_STAT_BAD_PARAM_ERR:
  1201. return "bad parameter";
  1202. case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
  1203. return "bad system state";
  1204. case MLX5_CMD_STAT_BAD_RES_ERR:
  1205. return "bad resource";
  1206. case MLX5_CMD_STAT_RES_BUSY:
  1207. return "resource busy";
  1208. case MLX5_CMD_STAT_LIM_ERR:
  1209. return "limits exceeded";
  1210. case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
  1211. return "bad resource state";
  1212. case MLX5_CMD_STAT_IX_ERR:
  1213. return "bad index";
  1214. case MLX5_CMD_STAT_NO_RES_ERR:
  1215. return "no resources";
  1216. case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
  1217. return "bad input length";
  1218. case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
  1219. return "bad output length";
  1220. case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
  1221. return "bad QP state";
  1222. case MLX5_CMD_STAT_BAD_PKT_ERR:
  1223. return "bad packet (discarded)";
  1224. case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
  1225. return "bad size too many outstanding CQEs";
  1226. default:
  1227. return "unknown status";
  1228. }
  1229. }
  1230. static int cmd_status_to_err(u8 status)
  1231. {
  1232. switch (status) {
  1233. case MLX5_CMD_STAT_OK: return 0;
  1234. case MLX5_CMD_STAT_INT_ERR: return -EIO;
  1235. case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
  1236. case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
  1237. case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
  1238. case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
  1239. case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
  1240. case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
  1241. case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
  1242. case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
  1243. case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
  1244. case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
  1245. case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
  1246. case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
  1247. case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
  1248. case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
  1249. default: return -EIO;
  1250. }
  1251. }
  1252. /* this will be available till all the commands use set/get macros */
  1253. int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
  1254. {
  1255. if (!hdr->status)
  1256. return 0;
  1257. pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
  1258. cmd_status_str(hdr->status), hdr->status,
  1259. be32_to_cpu(hdr->syndrome));
  1260. return cmd_status_to_err(hdr->status);
  1261. }
  1262. int mlx5_cmd_status_to_err_v2(void *ptr)
  1263. {
  1264. u32 syndrome;
  1265. u8 status;
  1266. status = be32_to_cpu(*(__be32 *)ptr) >> 24;
  1267. if (!status)
  1268. return 0;
  1269. syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
  1270. pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
  1271. cmd_status_str(status), status, syndrome);
  1272. return cmd_status_to_err(status);
  1273. }