tcm_qla2xxx.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184
  1. /*******************************************************************************
  2. * This file contains tcm implementation using v4 configfs fabric infrastructure
  3. * for QLogic target mode HBAs
  4. *
  5. * (c) Copyright 2010-2013 Datera, Inc.
  6. *
  7. * Author: Nicholas A. Bellinger <nab@daterainc.com>
  8. *
  9. * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
  10. * the TCM_FC / Open-FCoE.org fabric module.
  11. *
  12. * Copyright (c) 2010 Cisco Systems, Inc
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. ****************************************************************************/
  24. #include <linux/module.h>
  25. #include <linux/moduleparam.h>
  26. #include <generated/utsrelease.h>
  27. #include <linux/utsname.h>
  28. #include <linux/init.h>
  29. #include <linux/list.h>
  30. #include <linux/slab.h>
  31. #include <linux/kthread.h>
  32. #include <linux/types.h>
  33. #include <linux/string.h>
  34. #include <linux/configfs.h>
  35. #include <linux/ctype.h>
  36. #include <asm/unaligned.h>
  37. #include <scsi/scsi.h>
  38. #include <scsi/scsi_host.h>
  39. #include <scsi/scsi_device.h>
  40. #include <scsi/scsi_cmnd.h>
  41. #include <target/target_core_base.h>
  42. #include <target/target_core_fabric.h>
  43. #include <target/target_core_fabric_configfs.h>
  44. #include <target/target_core_configfs.h>
  45. #include <target/configfs_macros.h>
  46. #include "qla_def.h"
  47. #include "qla_target.h"
  48. #include "tcm_qla2xxx.h"
  49. static struct workqueue_struct *tcm_qla2xxx_free_wq;
  50. static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
  51. /* Local pointer to allocated TCM configfs fabric module */
  52. static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
  53. static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
  54. /*
  55. * Parse WWN.
  56. * If strict, we require lower-case hex and colon separators to be sure
  57. * the name is the same as what would be generated by ft_format_wwn()
  58. * so the name and wwn are mapped one-to-one.
  59. */
  60. static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
  61. {
  62. const char *cp;
  63. char c;
  64. u32 nibble;
  65. u32 byte = 0;
  66. u32 pos = 0;
  67. u32 err;
  68. *wwn = 0;
  69. for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
  70. c = *cp;
  71. if (c == '\n' && cp[1] == '\0')
  72. continue;
  73. if (strict && pos++ == 2 && byte++ < 7) {
  74. pos = 0;
  75. if (c == ':')
  76. continue;
  77. err = 1;
  78. goto fail;
  79. }
  80. if (c == '\0') {
  81. err = 2;
  82. if (strict && byte != 8)
  83. goto fail;
  84. return cp - name;
  85. }
  86. err = 3;
  87. if (isdigit(c))
  88. nibble = c - '0';
  89. else if (isxdigit(c) && (islower(c) || !strict))
  90. nibble = tolower(c) - 'a' + 10;
  91. else
  92. goto fail;
  93. *wwn = (*wwn << 4) | nibble;
  94. }
  95. err = 4;
  96. fail:
  97. pr_debug("err %u len %zu pos %u byte %u\n",
  98. err, cp - name, pos, byte);
  99. return -1;
  100. }
  101. static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
  102. {
  103. u8 b[8];
  104. put_unaligned_be64(wwn, b);
  105. return snprintf(buf, len,
  106. "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
  107. b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
  108. }
  109. static char *tcm_qla2xxx_get_fabric_name(void)
  110. {
  111. return "qla2xxx";
  112. }
  113. /*
  114. * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
  115. */
  116. static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
  117. {
  118. unsigned int i, j;
  119. u8 wwn[8];
  120. memset(wwn, 0, sizeof(wwn));
  121. /* Validate and store the new name */
  122. for (i = 0, j = 0; i < 16; i++) {
  123. int value;
  124. value = hex_to_bin(*ns++);
  125. if (value >= 0)
  126. j = (j << 4) | value;
  127. else
  128. return -EINVAL;
  129. if (i % 2) {
  130. wwn[i/2] = j & 0xff;
  131. j = 0;
  132. }
  133. }
  134. *nm = wwn_to_u64(wwn);
  135. return 0;
  136. }
  137. /*
  138. * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
  139. * store_fc_host_vport_create()
  140. */
  141. static int tcm_qla2xxx_npiv_parse_wwn(
  142. const char *name,
  143. size_t count,
  144. u64 *wwpn,
  145. u64 *wwnn)
  146. {
  147. unsigned int cnt = count;
  148. int rc;
  149. *wwpn = 0;
  150. *wwnn = 0;
  151. /* count may include a LF at end of string */
  152. if (name[cnt-1] == '\n' || name[cnt-1] == 0)
  153. cnt--;
  154. /* validate we have enough characters for WWPN */
  155. if ((cnt != (16+1+16)) || (name[16] != ':'))
  156. return -EINVAL;
  157. rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
  158. if (rc != 0)
  159. return rc;
  160. rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
  161. if (rc != 0)
  162. return rc;
  163. return 0;
  164. }
  165. static char *tcm_qla2xxx_npiv_get_fabric_name(void)
  166. {
  167. return "qla2xxx_npiv";
  168. }
  169. static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
  170. {
  171. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  172. struct tcm_qla2xxx_tpg, se_tpg);
  173. struct tcm_qla2xxx_lport *lport = tpg->lport;
  174. u8 proto_id;
  175. switch (lport->lport_proto_id) {
  176. case SCSI_PROTOCOL_FCP:
  177. default:
  178. proto_id = fc_get_fabric_proto_ident(se_tpg);
  179. break;
  180. }
  181. return proto_id;
  182. }
  183. static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
  184. {
  185. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  186. struct tcm_qla2xxx_tpg, se_tpg);
  187. struct tcm_qla2xxx_lport *lport = tpg->lport;
  188. return lport->lport_naa_name;
  189. }
  190. static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
  191. {
  192. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  193. struct tcm_qla2xxx_tpg, se_tpg);
  194. return tpg->lport_tpgt;
  195. }
  196. static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
  197. {
  198. return 1;
  199. }
  200. static u32 tcm_qla2xxx_get_pr_transport_id(
  201. struct se_portal_group *se_tpg,
  202. struct se_node_acl *se_nacl,
  203. struct t10_pr_registration *pr_reg,
  204. int *format_code,
  205. unsigned char *buf)
  206. {
  207. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  208. struct tcm_qla2xxx_tpg, se_tpg);
  209. struct tcm_qla2xxx_lport *lport = tpg->lport;
  210. int ret = 0;
  211. switch (lport->lport_proto_id) {
  212. case SCSI_PROTOCOL_FCP:
  213. default:
  214. ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  215. format_code, buf);
  216. break;
  217. }
  218. return ret;
  219. }
  220. static u32 tcm_qla2xxx_get_pr_transport_id_len(
  221. struct se_portal_group *se_tpg,
  222. struct se_node_acl *se_nacl,
  223. struct t10_pr_registration *pr_reg,
  224. int *format_code)
  225. {
  226. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  227. struct tcm_qla2xxx_tpg, se_tpg);
  228. struct tcm_qla2xxx_lport *lport = tpg->lport;
  229. int ret = 0;
  230. switch (lport->lport_proto_id) {
  231. case SCSI_PROTOCOL_FCP:
  232. default:
  233. ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  234. format_code);
  235. break;
  236. }
  237. return ret;
  238. }
  239. static char *tcm_qla2xxx_parse_pr_out_transport_id(
  240. struct se_portal_group *se_tpg,
  241. const char *buf,
  242. u32 *out_tid_len,
  243. char **port_nexus_ptr)
  244. {
  245. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  246. struct tcm_qla2xxx_tpg, se_tpg);
  247. struct tcm_qla2xxx_lport *lport = tpg->lport;
  248. char *tid = NULL;
  249. switch (lport->lport_proto_id) {
  250. case SCSI_PROTOCOL_FCP:
  251. default:
  252. tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  253. port_nexus_ptr);
  254. break;
  255. }
  256. return tid;
  257. }
  258. static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
  259. {
  260. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  261. struct tcm_qla2xxx_tpg, se_tpg);
  262. return tpg->tpg_attrib.generate_node_acls;
  263. }
  264. static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
  265. {
  266. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  267. struct tcm_qla2xxx_tpg, se_tpg);
  268. return tpg->tpg_attrib.cache_dynamic_acls;
  269. }
  270. static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
  271. {
  272. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  273. struct tcm_qla2xxx_tpg, se_tpg);
  274. return tpg->tpg_attrib.demo_mode_write_protect;
  275. }
  276. static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
  277. {
  278. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  279. struct tcm_qla2xxx_tpg, se_tpg);
  280. return tpg->tpg_attrib.prod_mode_write_protect;
  281. }
  282. static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
  283. {
  284. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  285. struct tcm_qla2xxx_tpg, se_tpg);
  286. return tpg->tpg_attrib.demo_mode_login_only;
  287. }
  288. static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
  289. struct se_portal_group *se_tpg)
  290. {
  291. struct tcm_qla2xxx_nacl *nacl;
  292. nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
  293. if (!nacl) {
  294. pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n");
  295. return NULL;
  296. }
  297. return &nacl->se_node_acl;
  298. }
  299. static void tcm_qla2xxx_release_fabric_acl(
  300. struct se_portal_group *se_tpg,
  301. struct se_node_acl *se_nacl)
  302. {
  303. struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
  304. struct tcm_qla2xxx_nacl, se_node_acl);
  305. kfree(nacl);
  306. }
  307. static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
  308. {
  309. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  310. struct tcm_qla2xxx_tpg, se_tpg);
  311. return tpg->lport_tpgt;
  312. }
  313. static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
  314. {
  315. struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
  316. struct qla_tgt_mgmt_cmd, free_work);
  317. transport_generic_free_cmd(&mcmd->se_cmd, 0);
  318. }
  319. /*
  320. * Called from qla_target_template->free_mcmd(), and will call
  321. * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
  322. * release callback. qla_hw_data->hardware_lock is expected to be held
  323. */
  324. static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
  325. {
  326. INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
  327. queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
  328. }
  329. static void tcm_qla2xxx_complete_free(struct work_struct *work)
  330. {
  331. struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
  332. cmd->cmd_in_wq = 0;
  333. WARN_ON(cmd->cmd_flags & BIT_16);
  334. cmd->cmd_flags |= BIT_16;
  335. transport_generic_free_cmd(&cmd->se_cmd, 0);
  336. }
  337. /*
  338. * Called from qla_target_template->free_cmd(), and will call
  339. * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
  340. * release callback. qla_hw_data->hardware_lock is expected to be held
  341. */
  342. static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
  343. {
  344. cmd->cmd_in_wq = 1;
  345. INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
  346. queue_work(tcm_qla2xxx_free_wq, &cmd->work);
  347. }
  348. /*
  349. * Called from struct target_core_fabric_ops->check_stop_free() context
  350. */
  351. static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
  352. {
  353. struct qla_tgt_cmd *cmd;
  354. if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
  355. cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
  356. cmd->cmd_flags |= BIT_14;
  357. }
  358. return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
  359. }
  360. /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
  361. * fabric descriptor @se_cmd command to release
  362. */
  363. static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
  364. {
  365. struct qla_tgt_cmd *cmd;
  366. if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
  367. struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
  368. struct qla_tgt_mgmt_cmd, se_cmd);
  369. qlt_free_mcmd(mcmd);
  370. return;
  371. }
  372. cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
  373. qlt_free_cmd(cmd);
  374. }
  375. static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
  376. {
  377. struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
  378. struct scsi_qla_host *vha;
  379. unsigned long flags;
  380. BUG_ON(!sess);
  381. vha = sess->vha;
  382. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  383. target_sess_cmd_list_set_waiting(se_sess);
  384. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  385. return 1;
  386. }
  387. static void tcm_qla2xxx_close_session(struct se_session *se_sess)
  388. {
  389. struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
  390. struct scsi_qla_host *vha;
  391. unsigned long flags;
  392. BUG_ON(!sess);
  393. vha = sess->vha;
  394. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  395. qlt_unreg_sess(sess);
  396. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  397. }
  398. static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
  399. {
  400. return 0;
  401. }
  402. static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
  403. {
  404. struct qla_tgt_cmd *cmd = container_of(se_cmd,
  405. struct qla_tgt_cmd, se_cmd);
  406. cmd->bufflen = se_cmd->data_length;
  407. cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
  408. cmd->sg_cnt = se_cmd->t_data_nents;
  409. cmd->sg = se_cmd->t_data_sg;
  410. cmd->prot_sg_cnt = se_cmd->t_prot_nents;
  411. cmd->prot_sg = se_cmd->t_prot_sg;
  412. cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
  413. se_cmd->pi_err = 0;
  414. /*
  415. * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
  416. * the SGL mappings into PCIe memory for incoming FCP WRITE data.
  417. */
  418. return qlt_rdy_to_xfer(cmd);
  419. }
  420. static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
  421. {
  422. unsigned long flags;
  423. /*
  424. * Check for WRITE_PENDING status to determine if we need to wait for
  425. * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
  426. */
  427. spin_lock_irqsave(&se_cmd->t_state_lock, flags);
  428. if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
  429. se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
  430. spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
  431. wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
  432. 3000);
  433. return 0;
  434. }
  435. spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
  436. return 0;
  437. }
  438. static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
  439. {
  440. return;
  441. }
  442. static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
  443. {
  444. struct qla_tgt_cmd *cmd;
  445. /* check for task mgmt cmd */
  446. if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  447. return 0xffffffff;
  448. cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
  449. return cmd->tag;
  450. }
  451. static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
  452. {
  453. return 0;
  454. }
  455. /*
  456. * Called from process context in qla_target.c:qlt_do_work() code
  457. */
  458. static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
  459. unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
  460. int data_dir, int bidi)
  461. {
  462. struct se_cmd *se_cmd = &cmd->se_cmd;
  463. struct se_session *se_sess;
  464. struct qla_tgt_sess *sess;
  465. int flags = TARGET_SCF_ACK_KREF;
  466. if (bidi)
  467. flags |= TARGET_SCF_BIDI_OP;
  468. sess = cmd->sess;
  469. if (!sess) {
  470. pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
  471. return -EINVAL;
  472. }
  473. se_sess = sess->se_sess;
  474. if (!se_sess) {
  475. pr_err("Unable to locate active struct se_session\n");
  476. return -EINVAL;
  477. }
  478. return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
  479. cmd->unpacked_lun, data_length, fcp_task_attr,
  480. data_dir, flags);
  481. }
  482. static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
  483. {
  484. struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
  485. /*
  486. * Ensure that the complete FCP WRITE payload has been received.
  487. * Otherwise return an exception via CHECK_CONDITION status.
  488. */
  489. cmd->cmd_in_wq = 0;
  490. cmd->cmd_flags |= BIT_11;
  491. if (!cmd->write_data_transferred) {
  492. /*
  493. * Check if se_cmd has already been aborted via LUN_RESET, and
  494. * waiting upon completion in tcm_qla2xxx_write_pending_status()
  495. */
  496. if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
  497. complete(&cmd->se_cmd.t_transport_stop_comp);
  498. return;
  499. }
  500. if (cmd->se_cmd.pi_err)
  501. transport_generic_request_failure(&cmd->se_cmd,
  502. cmd->se_cmd.pi_err);
  503. else
  504. transport_generic_request_failure(&cmd->se_cmd,
  505. TCM_CHECK_CONDITION_ABORT_CMD);
  506. return;
  507. }
  508. return target_execute_cmd(&cmd->se_cmd);
  509. }
  510. /*
  511. * Called from qla_target.c:qlt_do_ctio_completion()
  512. */
  513. static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
  514. {
  515. cmd->cmd_flags |= BIT_10;
  516. cmd->cmd_in_wq = 1;
  517. INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
  518. queue_work(tcm_qla2xxx_free_wq, &cmd->work);
  519. }
  520. static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
  521. {
  522. struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
  523. /* take an extra kref to prevent cmd free too early.
  524. * need to wait for SCSI status/check condition to
  525. * finish responding generate by transport_generic_request_failure.
  526. */
  527. kref_get(&cmd->se_cmd.cmd_kref);
  528. transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
  529. }
  530. /*
  531. * Called from qla_target.c:qlt_do_ctio_completion()
  532. */
  533. static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
  534. {
  535. INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
  536. queue_work(tcm_qla2xxx_free_wq, &cmd->work);
  537. }
  538. /*
  539. * Called from qla_target.c:qlt_issue_task_mgmt()
  540. */
  541. static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
  542. uint8_t tmr_func, uint32_t tag)
  543. {
  544. struct qla_tgt_sess *sess = mcmd->sess;
  545. struct se_cmd *se_cmd = &mcmd->se_cmd;
  546. return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
  547. tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
  548. }
  549. static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
  550. {
  551. struct qla_tgt_cmd *cmd = container_of(se_cmd,
  552. struct qla_tgt_cmd, se_cmd);
  553. cmd->cmd_flags |= BIT_4;
  554. cmd->bufflen = se_cmd->data_length;
  555. cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
  556. cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
  557. cmd->sg_cnt = se_cmd->t_data_nents;
  558. cmd->sg = se_cmd->t_data_sg;
  559. cmd->offset = 0;
  560. cmd->cmd_flags |= BIT_3;
  561. cmd->prot_sg_cnt = se_cmd->t_prot_nents;
  562. cmd->prot_sg = se_cmd->t_prot_sg;
  563. cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
  564. se_cmd->pi_err = 0;
  565. /*
  566. * Now queue completed DATA_IN the qla2xxx LLD and response ring
  567. */
  568. return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
  569. se_cmd->scsi_status);
  570. }
  571. static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
  572. {
  573. struct qla_tgt_cmd *cmd = container_of(se_cmd,
  574. struct qla_tgt_cmd, se_cmd);
  575. int xmit_type = QLA_TGT_XMIT_STATUS;
  576. cmd->bufflen = se_cmd->data_length;
  577. cmd->sg = NULL;
  578. cmd->sg_cnt = 0;
  579. cmd->offset = 0;
  580. cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
  581. cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
  582. if (cmd->cmd_flags & BIT_5) {
  583. pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
  584. dump_stack();
  585. }
  586. cmd->cmd_flags |= BIT_5;
  587. if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  588. /*
  589. * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
  590. * for qla_tgt_xmit_response LLD code
  591. */
  592. if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  593. se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
  594. se_cmd->residual_count = 0;
  595. }
  596. se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
  597. se_cmd->residual_count += se_cmd->data_length;
  598. cmd->bufflen = 0;
  599. }
  600. /*
  601. * Now queue status response to qla2xxx LLD code and response ring
  602. */
  603. return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
  604. }
  605. static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
  606. {
  607. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  608. struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
  609. struct qla_tgt_mgmt_cmd, se_cmd);
  610. pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
  611. mcmd, se_tmr->function, se_tmr->response);
  612. /*
  613. * Do translation between TCM TM response codes and
  614. * QLA2xxx FC TM response codes.
  615. */
  616. switch (se_tmr->response) {
  617. case TMR_FUNCTION_COMPLETE:
  618. mcmd->fc_tm_rsp = FC_TM_SUCCESS;
  619. break;
  620. case TMR_TASK_DOES_NOT_EXIST:
  621. mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
  622. break;
  623. case TMR_FUNCTION_REJECTED:
  624. mcmd->fc_tm_rsp = FC_TM_REJECT;
  625. break;
  626. case TMR_LUN_DOES_NOT_EXIST:
  627. default:
  628. mcmd->fc_tm_rsp = FC_TM_FAILED;
  629. break;
  630. }
  631. /*
  632. * Queue the TM response to QLA2xxx LLD to build a
  633. * CTIO response packet.
  634. */
  635. qlt_xmit_tm_rsp(mcmd);
  636. }
  637. static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
  638. {
  639. struct qla_tgt_cmd *cmd = container_of(se_cmd,
  640. struct qla_tgt_cmd, se_cmd);
  641. struct scsi_qla_host *vha = cmd->vha;
  642. struct qla_hw_data *ha = vha->hw;
  643. if (!cmd->sg_mapped)
  644. return;
  645. pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
  646. cmd->sg_mapped = 0;
  647. }
  648. static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
  649. struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
  650. /*
  651. * Expected to be called with struct qla_hw_data->hardware_lock held
  652. */
  653. static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
  654. {
  655. struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
  656. struct se_portal_group *se_tpg = se_nacl->se_tpg;
  657. struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
  658. struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
  659. struct tcm_qla2xxx_lport, lport_wwn);
  660. struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
  661. struct tcm_qla2xxx_nacl, se_node_acl);
  662. void *node;
  663. pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
  664. node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
  665. if (WARN_ON(node && (node != se_nacl))) {
  666. /*
  667. * The nacl no longer matches what we think it should be.
  668. * Most likely a new dynamic acl has been added while
  669. * someone dropped the hardware lock. It clearly is a
  670. * bug elsewhere, but this bit can't make things worse.
  671. */
  672. btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
  673. node, GFP_ATOMIC);
  674. }
  675. pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
  676. se_nacl, nacl->nport_wwnn, nacl->nport_id);
  677. /*
  678. * Now clear the se_nacl and session pointers from our HW lport lookup
  679. * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
  680. *
  681. * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
  682. * target_wait_for_sess_cmds() before the session waits for outstanding
  683. * I/O to complete, to avoid a race between session shutdown execution
  684. * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
  685. */
  686. tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
  687. }
  688. static void tcm_qla2xxx_release_session(struct kref *kref)
  689. {
  690. struct se_session *se_sess = container_of(kref,
  691. struct se_session, sess_kref);
  692. qlt_unreg_sess(se_sess->fabric_sess_ptr);
  693. }
  694. static void tcm_qla2xxx_put_session(struct se_session *se_sess)
  695. {
  696. struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
  697. struct qla_hw_data *ha = sess->vha->hw;
  698. unsigned long flags;
  699. spin_lock_irqsave(&ha->hardware_lock, flags);
  700. kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
  701. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  702. }
  703. static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
  704. {
  705. if (!sess)
  706. return;
  707. assert_spin_locked(&sess->vha->hw->hardware_lock);
  708. kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
  709. }
  710. static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
  711. {
  712. assert_spin_locked(&sess->vha->hw->hardware_lock);
  713. target_sess_cmd_list_set_waiting(sess->se_sess);
  714. }
  715. static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
  716. struct se_portal_group *se_tpg,
  717. struct config_group *group,
  718. const char *name)
  719. {
  720. struct se_node_acl *se_nacl, *se_nacl_new;
  721. struct tcm_qla2xxx_nacl *nacl;
  722. u64 wwnn;
  723. u32 qla2xxx_nexus_depth;
  724. if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
  725. return ERR_PTR(-EINVAL);
  726. se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
  727. if (!se_nacl_new)
  728. return ERR_PTR(-ENOMEM);
  729. /* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
  730. qla2xxx_nexus_depth = 1;
  731. /*
  732. * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
  733. * when converting a NodeACL from demo mode -> explict
  734. */
  735. se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
  736. name, qla2xxx_nexus_depth);
  737. if (IS_ERR(se_nacl)) {
  738. tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
  739. return se_nacl;
  740. }
  741. /*
  742. * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
  743. */
  744. nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
  745. nacl->nport_wwnn = wwnn;
  746. tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
  747. return se_nacl;
  748. }
  749. static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
  750. {
  751. struct se_portal_group *se_tpg = se_acl->se_tpg;
  752. struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
  753. struct tcm_qla2xxx_nacl, se_node_acl);
  754. core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
  755. kfree(nacl);
  756. }
  757. /* Start items for tcm_qla2xxx_tpg_attrib_cit */
  758. #define DEF_QLA_TPG_ATTRIB(name) \
  759. \
  760. static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
  761. struct se_portal_group *se_tpg, \
  762. char *page) \
  763. { \
  764. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
  765. struct tcm_qla2xxx_tpg, se_tpg); \
  766. \
  767. return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
  768. } \
  769. \
  770. static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
  771. struct se_portal_group *se_tpg, \
  772. const char *page, \
  773. size_t count) \
  774. { \
  775. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
  776. struct tcm_qla2xxx_tpg, se_tpg); \
  777. unsigned long val; \
  778. int ret; \
  779. \
  780. ret = kstrtoul(page, 0, &val); \
  781. if (ret < 0) { \
  782. pr_err("kstrtoul() failed with" \
  783. " ret: %d\n", ret); \
  784. return -EINVAL; \
  785. } \
  786. ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
  787. \
  788. return (!ret) ? count : -EINVAL; \
  789. }
  790. #define DEF_QLA_TPG_ATTR_BOOL(_name) \
  791. \
  792. static int tcm_qla2xxx_set_attrib_##_name( \
  793. struct tcm_qla2xxx_tpg *tpg, \
  794. unsigned long val) \
  795. { \
  796. struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
  797. \
  798. if ((val != 0) && (val != 1)) { \
  799. pr_err("Illegal boolean value %lu\n", val); \
  800. return -EINVAL; \
  801. } \
  802. \
  803. a->_name = val; \
  804. return 0; \
  805. }
  806. #define QLA_TPG_ATTR(_name, _mode) \
  807. TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
  808. /*
  809. * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
  810. */
  811. DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
  812. DEF_QLA_TPG_ATTRIB(generate_node_acls);
  813. QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
  814. /*
  815. Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
  816. */
  817. DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
  818. DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
  819. QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
  820. /*
  821. * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
  822. */
  823. DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
  824. DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
  825. QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
  826. /*
  827. * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
  828. */
  829. DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
  830. DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
  831. QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
  832. /*
  833. * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only
  834. */
  835. DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only);
  836. DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
  837. QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR);
  838. static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
  839. &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
  840. &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
  841. &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
  842. &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
  843. &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr,
  844. NULL,
  845. };
  846. /* End items for tcm_qla2xxx_tpg_attrib_cit */
  847. static ssize_t tcm_qla2xxx_tpg_show_enable(
  848. struct se_portal_group *se_tpg,
  849. char *page)
  850. {
  851. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  852. struct tcm_qla2xxx_tpg, se_tpg);
  853. return snprintf(page, PAGE_SIZE, "%d\n",
  854. atomic_read(&tpg->lport_tpg_enabled));
  855. }
  856. static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
  857. {
  858. struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
  859. struct tcm_qla2xxx_tpg, tpg_base_work);
  860. struct se_portal_group *se_tpg = &base_tpg->se_tpg;
  861. struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
  862. if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
  863. &se_tpg->tpg_group.cg_item)) {
  864. atomic_set(&base_tpg->lport_tpg_enabled, 1);
  865. qlt_enable_vha(base_vha);
  866. }
  867. complete(&base_tpg->tpg_base_comp);
  868. }
  869. static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
  870. {
  871. struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
  872. struct tcm_qla2xxx_tpg, tpg_base_work);
  873. struct se_portal_group *se_tpg = &base_tpg->se_tpg;
  874. struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
  875. if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
  876. atomic_set(&base_tpg->lport_tpg_enabled, 0);
  877. configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
  878. &se_tpg->tpg_group.cg_item);
  879. }
  880. complete(&base_tpg->tpg_base_comp);
  881. }
  882. static ssize_t tcm_qla2xxx_tpg_store_enable(
  883. struct se_portal_group *se_tpg,
  884. const char *page,
  885. size_t count)
  886. {
  887. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  888. struct tcm_qla2xxx_tpg, se_tpg);
  889. unsigned long op;
  890. int rc;
  891. rc = kstrtoul(page, 0, &op);
  892. if (rc < 0) {
  893. pr_err("kstrtoul() returned %d\n", rc);
  894. return -EINVAL;
  895. }
  896. if ((op != 1) && (op != 0)) {
  897. pr_err("Illegal value for tpg_enable: %lu\n", op);
  898. return -EINVAL;
  899. }
  900. if (op) {
  901. if (atomic_read(&tpg->lport_tpg_enabled))
  902. return -EEXIST;
  903. INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
  904. } else {
  905. if (!atomic_read(&tpg->lport_tpg_enabled))
  906. return count;
  907. INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
  908. }
  909. init_completion(&tpg->tpg_base_comp);
  910. schedule_work(&tpg->tpg_base_work);
  911. wait_for_completion(&tpg->tpg_base_comp);
  912. if (op) {
  913. if (!atomic_read(&tpg->lport_tpg_enabled))
  914. return -ENODEV;
  915. } else {
  916. if (atomic_read(&tpg->lport_tpg_enabled))
  917. return -EPERM;
  918. }
  919. return count;
  920. }
  921. TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
  922. static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
  923. &tcm_qla2xxx_tpg_enable.attr,
  924. NULL,
  925. };
  926. static struct se_portal_group *tcm_qla2xxx_make_tpg(
  927. struct se_wwn *wwn,
  928. struct config_group *group,
  929. const char *name)
  930. {
  931. struct tcm_qla2xxx_lport *lport = container_of(wwn,
  932. struct tcm_qla2xxx_lport, lport_wwn);
  933. struct tcm_qla2xxx_tpg *tpg;
  934. unsigned long tpgt;
  935. int ret;
  936. if (strstr(name, "tpgt_") != name)
  937. return ERR_PTR(-EINVAL);
  938. if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
  939. return ERR_PTR(-EINVAL);
  940. if ((tpgt != 1)) {
  941. pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
  942. return ERR_PTR(-ENOSYS);
  943. }
  944. tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
  945. if (!tpg) {
  946. pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
  947. return ERR_PTR(-ENOMEM);
  948. }
  949. tpg->lport = lport;
  950. tpg->lport_tpgt = tpgt;
  951. /*
  952. * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
  953. * NodeACLs
  954. */
  955. tpg->tpg_attrib.generate_node_acls = 1;
  956. tpg->tpg_attrib.demo_mode_write_protect = 1;
  957. tpg->tpg_attrib.cache_dynamic_acls = 1;
  958. tpg->tpg_attrib.demo_mode_login_only = 1;
  959. ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
  960. &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
  961. if (ret < 0) {
  962. kfree(tpg);
  963. return NULL;
  964. }
  965. lport->tpg_1 = tpg;
  966. return &tpg->se_tpg;
  967. }
  968. static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
  969. {
  970. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  971. struct tcm_qla2xxx_tpg, se_tpg);
  972. struct tcm_qla2xxx_lport *lport = tpg->lport;
  973. struct scsi_qla_host *vha = lport->qla_vha;
  974. /*
  975. * Call into qla2x_target.c LLD logic to shutdown the active
  976. * FC Nexuses and disable target mode operation for this qla_hw_data
  977. */
  978. if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
  979. qlt_stop_phase1(vha->vha_tgt.qla_tgt);
  980. core_tpg_deregister(se_tpg);
  981. /*
  982. * Clear local TPG=1 pointer for non NPIV mode.
  983. */
  984. lport->tpg_1 = NULL;
  985. kfree(tpg);
  986. }
  987. static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
  988. struct se_portal_group *se_tpg,
  989. char *page)
  990. {
  991. return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
  992. }
  993. static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
  994. struct se_portal_group *se_tpg,
  995. const char *page,
  996. size_t count)
  997. {
  998. struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
  999. struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
  1000. struct tcm_qla2xxx_lport, lport_wwn);
  1001. struct scsi_qla_host *vha = lport->qla_vha;
  1002. struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
  1003. struct tcm_qla2xxx_tpg, se_tpg);
  1004. unsigned long op;
  1005. int rc;
  1006. rc = kstrtoul(page, 0, &op);
  1007. if (rc < 0) {
  1008. pr_err("kstrtoul() returned %d\n", rc);
  1009. return -EINVAL;
  1010. }
  1011. if ((op != 1) && (op != 0)) {
  1012. pr_err("Illegal value for tpg_enable: %lu\n", op);
  1013. return -EINVAL;
  1014. }
  1015. if (op) {
  1016. if (atomic_read(&tpg->lport_tpg_enabled))
  1017. return -EEXIST;
  1018. atomic_set(&tpg->lport_tpg_enabled, 1);
  1019. qlt_enable_vha(vha);
  1020. } else {
  1021. if (!atomic_read(&tpg->lport_tpg_enabled))
  1022. return count;
  1023. atomic_set(&tpg->lport_tpg_enabled, 0);
  1024. qlt_stop_phase1(vha->vha_tgt.qla_tgt);
  1025. }
  1026. return count;
  1027. }
  1028. TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
  1029. static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
  1030. &tcm_qla2xxx_npiv_tpg_enable.attr,
  1031. NULL,
  1032. };
  1033. static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
  1034. struct se_wwn *wwn,
  1035. struct config_group *group,
  1036. const char *name)
  1037. {
  1038. struct tcm_qla2xxx_lport *lport = container_of(wwn,
  1039. struct tcm_qla2xxx_lport, lport_wwn);
  1040. struct tcm_qla2xxx_tpg *tpg;
  1041. unsigned long tpgt;
  1042. int ret;
  1043. if (strstr(name, "tpgt_") != name)
  1044. return ERR_PTR(-EINVAL);
  1045. if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
  1046. return ERR_PTR(-EINVAL);
  1047. tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
  1048. if (!tpg) {
  1049. pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
  1050. return ERR_PTR(-ENOMEM);
  1051. }
  1052. tpg->lport = lport;
  1053. tpg->lport_tpgt = tpgt;
  1054. /*
  1055. * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
  1056. * NodeACLs
  1057. */
  1058. tpg->tpg_attrib.generate_node_acls = 1;
  1059. tpg->tpg_attrib.demo_mode_write_protect = 1;
  1060. tpg->tpg_attrib.cache_dynamic_acls = 1;
  1061. tpg->tpg_attrib.demo_mode_login_only = 1;
  1062. ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
  1063. &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
  1064. if (ret < 0) {
  1065. kfree(tpg);
  1066. return NULL;
  1067. }
  1068. lport->tpg_1 = tpg;
  1069. return &tpg->se_tpg;
  1070. }
  1071. /*
  1072. * Expected to be called with struct qla_hw_data->hardware_lock held
  1073. */
  1074. static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
  1075. scsi_qla_host_t *vha,
  1076. const uint8_t *s_id)
  1077. {
  1078. struct tcm_qla2xxx_lport *lport;
  1079. struct se_node_acl *se_nacl;
  1080. struct tcm_qla2xxx_nacl *nacl;
  1081. u32 key;
  1082. lport = vha->vha_tgt.target_lport_ptr;
  1083. if (!lport) {
  1084. pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
  1085. dump_stack();
  1086. return NULL;
  1087. }
  1088. key = (((unsigned long)s_id[0] << 16) |
  1089. ((unsigned long)s_id[1] << 8) |
  1090. (unsigned long)s_id[2]);
  1091. pr_debug("find_sess_by_s_id: 0x%06x\n", key);
  1092. se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
  1093. if (!se_nacl) {
  1094. pr_debug("Unable to locate s_id: 0x%06x\n", key);
  1095. return NULL;
  1096. }
  1097. pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
  1098. se_nacl, se_nacl->initiatorname);
  1099. nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
  1100. if (!nacl->qla_tgt_sess) {
  1101. pr_err("Unable to locate struct qla_tgt_sess\n");
  1102. return NULL;
  1103. }
  1104. return nacl->qla_tgt_sess;
  1105. }
  1106. /*
  1107. * Expected to be called with struct qla_hw_data->hardware_lock held
  1108. */
  1109. static void tcm_qla2xxx_set_sess_by_s_id(
  1110. struct tcm_qla2xxx_lport *lport,
  1111. struct se_node_acl *new_se_nacl,
  1112. struct tcm_qla2xxx_nacl *nacl,
  1113. struct se_session *se_sess,
  1114. struct qla_tgt_sess *qla_tgt_sess,
  1115. uint8_t *s_id)
  1116. {
  1117. u32 key;
  1118. void *slot;
  1119. int rc;
  1120. key = (((unsigned long)s_id[0] << 16) |
  1121. ((unsigned long)s_id[1] << 8) |
  1122. (unsigned long)s_id[2]);
  1123. pr_debug("set_sess_by_s_id: %06x\n", key);
  1124. slot = btree_lookup32(&lport->lport_fcport_map, key);
  1125. if (!slot) {
  1126. if (new_se_nacl) {
  1127. pr_debug("Setting up new fc_port entry to new_se_nacl\n");
  1128. nacl->nport_id = key;
  1129. rc = btree_insert32(&lport->lport_fcport_map, key,
  1130. new_se_nacl, GFP_ATOMIC);
  1131. if (rc)
  1132. printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
  1133. (int)key);
  1134. } else {
  1135. pr_debug("Wiping nonexisting fc_port entry\n");
  1136. }
  1137. qla_tgt_sess->se_sess = se_sess;
  1138. nacl->qla_tgt_sess = qla_tgt_sess;
  1139. return;
  1140. }
  1141. if (nacl->qla_tgt_sess) {
  1142. if (new_se_nacl == NULL) {
  1143. pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
  1144. btree_remove32(&lport->lport_fcport_map, key);
  1145. nacl->qla_tgt_sess = NULL;
  1146. return;
  1147. }
  1148. pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
  1149. btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
  1150. qla_tgt_sess->se_sess = se_sess;
  1151. nacl->qla_tgt_sess = qla_tgt_sess;
  1152. return;
  1153. }
  1154. if (new_se_nacl == NULL) {
  1155. pr_debug("Clearing existing fc_port entry\n");
  1156. btree_remove32(&lport->lport_fcport_map, key);
  1157. return;
  1158. }
  1159. pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
  1160. btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
  1161. qla_tgt_sess->se_sess = se_sess;
  1162. nacl->qla_tgt_sess = qla_tgt_sess;
  1163. pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
  1164. nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
  1165. }
  1166. /*
  1167. * Expected to be called with struct qla_hw_data->hardware_lock held
  1168. */
  1169. static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
  1170. scsi_qla_host_t *vha,
  1171. const uint16_t loop_id)
  1172. {
  1173. struct tcm_qla2xxx_lport *lport;
  1174. struct se_node_acl *se_nacl;
  1175. struct tcm_qla2xxx_nacl *nacl;
  1176. struct tcm_qla2xxx_fc_loopid *fc_loopid;
  1177. lport = vha->vha_tgt.target_lport_ptr;
  1178. if (!lport) {
  1179. pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
  1180. dump_stack();
  1181. return NULL;
  1182. }
  1183. pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
  1184. fc_loopid = lport->lport_loopid_map + loop_id;
  1185. se_nacl = fc_loopid->se_nacl;
  1186. if (!se_nacl) {
  1187. pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
  1188. loop_id);
  1189. return NULL;
  1190. }
  1191. nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
  1192. if (!nacl->qla_tgt_sess) {
  1193. pr_err("Unable to locate struct qla_tgt_sess\n");
  1194. return NULL;
  1195. }
  1196. return nacl->qla_tgt_sess;
  1197. }
  1198. /*
  1199. * Expected to be called with struct qla_hw_data->hardware_lock held
  1200. */
  1201. static void tcm_qla2xxx_set_sess_by_loop_id(
  1202. struct tcm_qla2xxx_lport *lport,
  1203. struct se_node_acl *new_se_nacl,
  1204. struct tcm_qla2xxx_nacl *nacl,
  1205. struct se_session *se_sess,
  1206. struct qla_tgt_sess *qla_tgt_sess,
  1207. uint16_t loop_id)
  1208. {
  1209. struct se_node_acl *saved_nacl;
  1210. struct tcm_qla2xxx_fc_loopid *fc_loopid;
  1211. pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
  1212. fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
  1213. lport->lport_loopid_map)[loop_id];
  1214. saved_nacl = fc_loopid->se_nacl;
  1215. if (!saved_nacl) {
  1216. pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
  1217. fc_loopid->se_nacl = new_se_nacl;
  1218. if (qla_tgt_sess->se_sess != se_sess)
  1219. qla_tgt_sess->se_sess = se_sess;
  1220. if (nacl->qla_tgt_sess != qla_tgt_sess)
  1221. nacl->qla_tgt_sess = qla_tgt_sess;
  1222. return;
  1223. }
  1224. if (nacl->qla_tgt_sess) {
  1225. if (new_se_nacl == NULL) {
  1226. pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
  1227. fc_loopid->se_nacl = NULL;
  1228. nacl->qla_tgt_sess = NULL;
  1229. return;
  1230. }
  1231. pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
  1232. fc_loopid->se_nacl = new_se_nacl;
  1233. if (qla_tgt_sess->se_sess != se_sess)
  1234. qla_tgt_sess->se_sess = se_sess;
  1235. if (nacl->qla_tgt_sess != qla_tgt_sess)
  1236. nacl->qla_tgt_sess = qla_tgt_sess;
  1237. return;
  1238. }
  1239. if (new_se_nacl == NULL) {
  1240. pr_debug("Clearing fc_loopid->se_nacl\n");
  1241. fc_loopid->se_nacl = NULL;
  1242. return;
  1243. }
  1244. pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
  1245. fc_loopid->se_nacl = new_se_nacl;
  1246. if (qla_tgt_sess->se_sess != se_sess)
  1247. qla_tgt_sess->se_sess = se_sess;
  1248. if (nacl->qla_tgt_sess != qla_tgt_sess)
  1249. nacl->qla_tgt_sess = qla_tgt_sess;
  1250. pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
  1251. nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
  1252. }
  1253. /*
  1254. * Should always be called with qla_hw_data->hardware_lock held.
  1255. */
  1256. static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
  1257. struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
  1258. {
  1259. struct se_session *se_sess = sess->se_sess;
  1260. unsigned char be_sid[3];
  1261. be_sid[0] = sess->s_id.b.domain;
  1262. be_sid[1] = sess->s_id.b.area;
  1263. be_sid[2] = sess->s_id.b.al_pa;
  1264. tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
  1265. sess, be_sid);
  1266. tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
  1267. sess, sess->loop_id);
  1268. }
  1269. static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
  1270. {
  1271. struct qla_tgt *tgt = sess->tgt;
  1272. struct qla_hw_data *ha = tgt->ha;
  1273. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  1274. struct se_session *se_sess;
  1275. struct se_node_acl *se_nacl;
  1276. struct tcm_qla2xxx_lport *lport;
  1277. struct tcm_qla2xxx_nacl *nacl;
  1278. BUG_ON(in_interrupt());
  1279. se_sess = sess->se_sess;
  1280. if (!se_sess) {
  1281. pr_err("struct qla_tgt_sess->se_sess is NULL\n");
  1282. dump_stack();
  1283. return;
  1284. }
  1285. se_nacl = se_sess->se_node_acl;
  1286. nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
  1287. lport = vha->vha_tgt.target_lport_ptr;
  1288. if (!lport) {
  1289. pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
  1290. dump_stack();
  1291. return;
  1292. }
  1293. target_wait_for_sess_cmds(se_sess);
  1294. transport_deregister_session_configfs(sess->se_sess);
  1295. transport_deregister_session(sess->se_sess);
  1296. }
  1297. /*
  1298. * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
  1299. * to locate struct se_node_acl
  1300. */
  1301. static int tcm_qla2xxx_check_initiator_node_acl(
  1302. scsi_qla_host_t *vha,
  1303. unsigned char *fc_wwpn,
  1304. void *qla_tgt_sess,
  1305. uint8_t *s_id,
  1306. uint16_t loop_id)
  1307. {
  1308. struct qla_hw_data *ha = vha->hw;
  1309. struct tcm_qla2xxx_lport *lport;
  1310. struct tcm_qla2xxx_tpg *tpg;
  1311. struct tcm_qla2xxx_nacl *nacl;
  1312. struct se_portal_group *se_tpg;
  1313. struct se_node_acl *se_nacl;
  1314. struct se_session *se_sess;
  1315. struct qla_tgt_sess *sess = qla_tgt_sess;
  1316. unsigned char port_name[36];
  1317. unsigned long flags;
  1318. int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
  1319. TCM_QLA2XXX_DEFAULT_TAGS;
  1320. lport = vha->vha_tgt.target_lport_ptr;
  1321. if (!lport) {
  1322. pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
  1323. dump_stack();
  1324. return -EINVAL;
  1325. }
  1326. /*
  1327. * Locate the TPG=1 reference..
  1328. */
  1329. tpg = lport->tpg_1;
  1330. if (!tpg) {
  1331. pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
  1332. return -EINVAL;
  1333. }
  1334. se_tpg = &tpg->se_tpg;
  1335. se_sess = transport_init_session_tags(num_tags,
  1336. sizeof(struct qla_tgt_cmd),
  1337. TARGET_PROT_NORMAL);
  1338. if (IS_ERR(se_sess)) {
  1339. pr_err("Unable to initialize struct se_session\n");
  1340. return PTR_ERR(se_sess);
  1341. }
  1342. /*
  1343. * Format the FCP Initiator port_name into colon seperated values to
  1344. * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
  1345. */
  1346. memset(&port_name, 0, 36);
  1347. snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
  1348. fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
  1349. fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
  1350. /*
  1351. * Locate our struct se_node_acl either from an explict NodeACL created
  1352. * via ConfigFS, or via running in TPG demo mode.
  1353. */
  1354. se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
  1355. port_name);
  1356. if (!se_sess->se_node_acl) {
  1357. transport_free_session(se_sess);
  1358. return -EINVAL;
  1359. }
  1360. se_nacl = se_sess->se_node_acl;
  1361. nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
  1362. /*
  1363. * And now setup the new se_nacl and session pointers into our HW lport
  1364. * mappings for fabric S_ID and LOOP_ID.
  1365. */
  1366. spin_lock_irqsave(&ha->hardware_lock, flags);
  1367. tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
  1368. qla_tgt_sess, s_id);
  1369. tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
  1370. qla_tgt_sess, loop_id);
  1371. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1372. /*
  1373. * Finally register the new FC Nexus with TCM
  1374. */
  1375. transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
  1376. return 0;
  1377. }
  1378. static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
  1379. uint16_t loop_id, bool conf_compl_supported)
  1380. {
  1381. struct qla_tgt *tgt = sess->tgt;
  1382. struct qla_hw_data *ha = tgt->ha;
  1383. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  1384. struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
  1385. struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
  1386. struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
  1387. struct tcm_qla2xxx_nacl, se_node_acl);
  1388. u32 key;
  1389. if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
  1390. pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
  1391. sess, sess->port_name,
  1392. sess->loop_id, loop_id, sess->s_id.b.domain,
  1393. sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain,
  1394. s_id.b.area, s_id.b.al_pa);
  1395. if (sess->loop_id != loop_id) {
  1396. /*
  1397. * Because we can shuffle loop IDs around and we
  1398. * update different sessions non-atomically, we might
  1399. * have overwritten this session's old loop ID
  1400. * already, and we might end up overwriting some other
  1401. * session that will be updated later. So we have to
  1402. * be extra careful and we can't warn about those things...
  1403. */
  1404. if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl)
  1405. lport->lport_loopid_map[sess->loop_id].se_nacl = NULL;
  1406. lport->lport_loopid_map[loop_id].se_nacl = se_nacl;
  1407. sess->loop_id = loop_id;
  1408. }
  1409. if (sess->s_id.b24 != s_id.b24) {
  1410. key = (((u32) sess->s_id.b.domain << 16) |
  1411. ((u32) sess->s_id.b.area << 8) |
  1412. ((u32) sess->s_id.b.al_pa));
  1413. if (btree_lookup32(&lport->lport_fcport_map, key))
  1414. WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
  1415. "Found wrong se_nacl when updating s_id %x:%x:%x\n",
  1416. sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
  1417. else
  1418. WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
  1419. sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
  1420. key = (((u32) s_id.b.domain << 16) |
  1421. ((u32) s_id.b.area << 8) |
  1422. ((u32) s_id.b.al_pa));
  1423. if (btree_lookup32(&lport->lport_fcport_map, key)) {
  1424. WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n",
  1425. s_id.b.domain, s_id.b.area, s_id.b.al_pa);
  1426. btree_update32(&lport->lport_fcport_map, key, se_nacl);
  1427. } else {
  1428. btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
  1429. }
  1430. sess->s_id = s_id;
  1431. nacl->nport_id = key;
  1432. }
  1433. sess->conf_compl_supported = conf_compl_supported;
  1434. }
  1435. /*
  1436. * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
  1437. */
  1438. static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
  1439. .handle_cmd = tcm_qla2xxx_handle_cmd,
  1440. .handle_data = tcm_qla2xxx_handle_data,
  1441. .handle_dif_err = tcm_qla2xxx_handle_dif_err,
  1442. .handle_tmr = tcm_qla2xxx_handle_tmr,
  1443. .free_cmd = tcm_qla2xxx_free_cmd,
  1444. .free_mcmd = tcm_qla2xxx_free_mcmd,
  1445. .free_session = tcm_qla2xxx_free_session,
  1446. .update_sess = tcm_qla2xxx_update_sess,
  1447. .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
  1448. .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
  1449. .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
  1450. .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
  1451. .put_sess = tcm_qla2xxx_put_sess,
  1452. .shutdown_sess = tcm_qla2xxx_shutdown_sess,
  1453. };
  1454. static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
  1455. {
  1456. int rc;
  1457. rc = btree_init32(&lport->lport_fcport_map);
  1458. if (rc) {
  1459. pr_err("Unable to initialize lport->lport_fcport_map btree\n");
  1460. return rc;
  1461. }
  1462. lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
  1463. 65536);
  1464. if (!lport->lport_loopid_map) {
  1465. pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
  1466. sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
  1467. btree_destroy32(&lport->lport_fcport_map);
  1468. return -ENOMEM;
  1469. }
  1470. memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
  1471. * 65536);
  1472. pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
  1473. sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
  1474. return 0;
  1475. }
  1476. static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
  1477. void *target_lport_ptr,
  1478. u64 npiv_wwpn, u64 npiv_wwnn)
  1479. {
  1480. struct qla_hw_data *ha = vha->hw;
  1481. struct tcm_qla2xxx_lport *lport =
  1482. (struct tcm_qla2xxx_lport *)target_lport_ptr;
  1483. /*
  1484. * Setup tgt_ops, local pointer to vha and target_lport_ptr
  1485. */
  1486. ha->tgt.tgt_ops = &tcm_qla2xxx_template;
  1487. vha->vha_tgt.target_lport_ptr = target_lport_ptr;
  1488. lport->qla_vha = vha;
  1489. return 0;
  1490. }
  1491. static struct se_wwn *tcm_qla2xxx_make_lport(
  1492. struct target_fabric_configfs *tf,
  1493. struct config_group *group,
  1494. const char *name)
  1495. {
  1496. struct tcm_qla2xxx_lport *lport;
  1497. u64 wwpn;
  1498. int ret = -ENODEV;
  1499. if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
  1500. return ERR_PTR(-EINVAL);
  1501. lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
  1502. if (!lport) {
  1503. pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
  1504. return ERR_PTR(-ENOMEM);
  1505. }
  1506. lport->lport_wwpn = wwpn;
  1507. tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
  1508. wwpn);
  1509. sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn);
  1510. ret = tcm_qla2xxx_init_lport(lport);
  1511. if (ret != 0)
  1512. goto out;
  1513. ret = qlt_lport_register(lport, wwpn, 0, 0,
  1514. tcm_qla2xxx_lport_register_cb);
  1515. if (ret != 0)
  1516. goto out_lport;
  1517. return &lport->lport_wwn;
  1518. out_lport:
  1519. vfree(lport->lport_loopid_map);
  1520. btree_destroy32(&lport->lport_fcport_map);
  1521. out:
  1522. kfree(lport);
  1523. return ERR_PTR(ret);
  1524. }
  1525. static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
  1526. {
  1527. struct tcm_qla2xxx_lport *lport = container_of(wwn,
  1528. struct tcm_qla2xxx_lport, lport_wwn);
  1529. struct scsi_qla_host *vha = lport->qla_vha;
  1530. struct se_node_acl *node;
  1531. u32 key = 0;
  1532. /*
  1533. * Call into qla2x_target.c LLD logic to complete the
  1534. * shutdown of struct qla_tgt after the call to
  1535. * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
  1536. */
  1537. if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
  1538. qlt_stop_phase2(vha->vha_tgt.qla_tgt);
  1539. qlt_lport_deregister(vha);
  1540. vfree(lport->lport_loopid_map);
  1541. btree_for_each_safe32(&lport->lport_fcport_map, key, node)
  1542. btree_remove32(&lport->lport_fcport_map, key);
  1543. btree_destroy32(&lport->lport_fcport_map);
  1544. kfree(lport);
  1545. }
  1546. static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
  1547. void *target_lport_ptr,
  1548. u64 npiv_wwpn, u64 npiv_wwnn)
  1549. {
  1550. struct fc_vport *vport;
  1551. struct Scsi_Host *sh = base_vha->host;
  1552. struct scsi_qla_host *npiv_vha;
  1553. struct tcm_qla2xxx_lport *lport =
  1554. (struct tcm_qla2xxx_lport *)target_lport_ptr;
  1555. struct tcm_qla2xxx_lport *base_lport =
  1556. (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
  1557. struct tcm_qla2xxx_tpg *base_tpg;
  1558. struct fc_vport_identifiers vport_id;
  1559. if (!qla_tgt_mode_enabled(base_vha)) {
  1560. pr_err("qla2xxx base_vha not enabled for target mode\n");
  1561. return -EPERM;
  1562. }
  1563. if (!base_lport || !base_lport->tpg_1 ||
  1564. !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
  1565. pr_err("qla2xxx base_lport or tpg_1 not available\n");
  1566. return -EPERM;
  1567. }
  1568. base_tpg = base_lport->tpg_1;
  1569. memset(&vport_id, 0, sizeof(vport_id));
  1570. vport_id.port_name = npiv_wwpn;
  1571. vport_id.node_name = npiv_wwnn;
  1572. vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
  1573. vport_id.vport_type = FC_PORTTYPE_NPIV;
  1574. vport_id.disable = false;
  1575. vport = fc_vport_create(sh, 0, &vport_id);
  1576. if (!vport) {
  1577. pr_err("fc_vport_create failed for qla2xxx_npiv\n");
  1578. return -ENODEV;
  1579. }
  1580. /*
  1581. * Setup local pointer to NPIV vhba + target_lport_ptr
  1582. */
  1583. npiv_vha = (struct scsi_qla_host *)vport->dd_data;
  1584. npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
  1585. lport->qla_vha = npiv_vha;
  1586. scsi_host_get(npiv_vha->host);
  1587. return 0;
  1588. }
  1589. static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
  1590. struct target_fabric_configfs *tf,
  1591. struct config_group *group,
  1592. const char *name)
  1593. {
  1594. struct tcm_qla2xxx_lport *lport;
  1595. u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
  1596. char *p, tmp[128];
  1597. int ret;
  1598. snprintf(tmp, 128, "%s", name);
  1599. p = strchr(tmp, '@');
  1600. if (!p) {
  1601. pr_err("Unable to locate NPIV '@' seperator\n");
  1602. return ERR_PTR(-EINVAL);
  1603. }
  1604. *p++ = '\0';
  1605. if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
  1606. return ERR_PTR(-EINVAL);
  1607. if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
  1608. &npiv_wwpn, &npiv_wwnn) < 0)
  1609. return ERR_PTR(-EINVAL);
  1610. lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
  1611. if (!lport) {
  1612. pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
  1613. return ERR_PTR(-ENOMEM);
  1614. }
  1615. lport->lport_npiv_wwpn = npiv_wwpn;
  1616. lport->lport_npiv_wwnn = npiv_wwnn;
  1617. sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
  1618. ret = tcm_qla2xxx_init_lport(lport);
  1619. if (ret != 0)
  1620. goto out;
  1621. ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
  1622. tcm_qla2xxx_lport_register_npiv_cb);
  1623. if (ret != 0)
  1624. goto out_lport;
  1625. return &lport->lport_wwn;
  1626. out_lport:
  1627. vfree(lport->lport_loopid_map);
  1628. btree_destroy32(&lport->lport_fcport_map);
  1629. out:
  1630. kfree(lport);
  1631. return ERR_PTR(ret);
  1632. }
  1633. static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
  1634. {
  1635. struct tcm_qla2xxx_lport *lport = container_of(wwn,
  1636. struct tcm_qla2xxx_lport, lport_wwn);
  1637. struct scsi_qla_host *npiv_vha = lport->qla_vha;
  1638. struct qla_hw_data *ha = npiv_vha->hw;
  1639. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  1640. scsi_host_put(npiv_vha->host);
  1641. /*
  1642. * Notify libfc that we want to release the vha->fc_vport
  1643. */
  1644. fc_vport_terminate(npiv_vha->fc_vport);
  1645. scsi_host_put(base_vha->host);
  1646. kfree(lport);
  1647. }
  1648. static ssize_t tcm_qla2xxx_wwn_show_attr_version(
  1649. struct target_fabric_configfs *tf,
  1650. char *page)
  1651. {
  1652. return sprintf(page,
  1653. "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
  1654. UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
  1655. utsname()->machine);
  1656. }
  1657. TF_WWN_ATTR_RO(tcm_qla2xxx, version);
  1658. static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
  1659. &tcm_qla2xxx_wwn_version.attr,
  1660. NULL,
  1661. };
  1662. static struct target_core_fabric_ops tcm_qla2xxx_ops = {
  1663. .get_fabric_name = tcm_qla2xxx_get_fabric_name,
  1664. .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
  1665. .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
  1666. .tpg_get_tag = tcm_qla2xxx_get_tag,
  1667. .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
  1668. .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
  1669. .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
  1670. .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
  1671. .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
  1672. .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
  1673. .tpg_check_demo_mode_write_protect =
  1674. tcm_qla2xxx_check_demo_write_protect,
  1675. .tpg_check_prod_mode_write_protect =
  1676. tcm_qla2xxx_check_prod_write_protect,
  1677. .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
  1678. .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
  1679. .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
  1680. .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
  1681. .check_stop_free = tcm_qla2xxx_check_stop_free,
  1682. .release_cmd = tcm_qla2xxx_release_cmd,
  1683. .put_session = tcm_qla2xxx_put_session,
  1684. .shutdown_session = tcm_qla2xxx_shutdown_session,
  1685. .close_session = tcm_qla2xxx_close_session,
  1686. .sess_get_index = tcm_qla2xxx_sess_get_index,
  1687. .sess_get_initiator_sid = NULL,
  1688. .write_pending = tcm_qla2xxx_write_pending,
  1689. .write_pending_status = tcm_qla2xxx_write_pending_status,
  1690. .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
  1691. .get_task_tag = tcm_qla2xxx_get_task_tag,
  1692. .get_cmd_state = tcm_qla2xxx_get_cmd_state,
  1693. .queue_data_in = tcm_qla2xxx_queue_data_in,
  1694. .queue_status = tcm_qla2xxx_queue_status,
  1695. .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
  1696. .aborted_task = tcm_qla2xxx_aborted_task,
  1697. /*
  1698. * Setup function pointers for generic logic in
  1699. * target_core_fabric_configfs.c
  1700. */
  1701. .fabric_make_wwn = tcm_qla2xxx_make_lport,
  1702. .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
  1703. .fabric_make_tpg = tcm_qla2xxx_make_tpg,
  1704. .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
  1705. .fabric_post_link = NULL,
  1706. .fabric_pre_unlink = NULL,
  1707. .fabric_make_np = NULL,
  1708. .fabric_drop_np = NULL,
  1709. .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
  1710. .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
  1711. };
  1712. static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
  1713. .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
  1714. .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
  1715. .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
  1716. .tpg_get_tag = tcm_qla2xxx_get_tag,
  1717. .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
  1718. .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
  1719. .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
  1720. .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
  1721. .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
  1722. .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
  1723. .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
  1724. .tpg_check_prod_mode_write_protect =
  1725. tcm_qla2xxx_check_prod_write_protect,
  1726. .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
  1727. .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
  1728. .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
  1729. .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
  1730. .check_stop_free = tcm_qla2xxx_check_stop_free,
  1731. .release_cmd = tcm_qla2xxx_release_cmd,
  1732. .put_session = tcm_qla2xxx_put_session,
  1733. .shutdown_session = tcm_qla2xxx_shutdown_session,
  1734. .close_session = tcm_qla2xxx_close_session,
  1735. .sess_get_index = tcm_qla2xxx_sess_get_index,
  1736. .sess_get_initiator_sid = NULL,
  1737. .write_pending = tcm_qla2xxx_write_pending,
  1738. .write_pending_status = tcm_qla2xxx_write_pending_status,
  1739. .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
  1740. .get_task_tag = tcm_qla2xxx_get_task_tag,
  1741. .get_cmd_state = tcm_qla2xxx_get_cmd_state,
  1742. .queue_data_in = tcm_qla2xxx_queue_data_in,
  1743. .queue_status = tcm_qla2xxx_queue_status,
  1744. .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
  1745. .aborted_task = tcm_qla2xxx_aborted_task,
  1746. /*
  1747. * Setup function pointers for generic logic in
  1748. * target_core_fabric_configfs.c
  1749. */
  1750. .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
  1751. .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
  1752. .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
  1753. .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
  1754. .fabric_post_link = NULL,
  1755. .fabric_pre_unlink = NULL,
  1756. .fabric_make_np = NULL,
  1757. .fabric_drop_np = NULL,
  1758. .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
  1759. .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
  1760. };
  1761. static int tcm_qla2xxx_register_configfs(void)
  1762. {
  1763. struct target_fabric_configfs *fabric, *npiv_fabric;
  1764. int ret;
  1765. pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
  1766. UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
  1767. utsname()->machine);
  1768. /*
  1769. * Register the top level struct config_item_type with TCM core
  1770. */
  1771. fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
  1772. if (IS_ERR(fabric)) {
  1773. pr_err("target_fabric_configfs_init() failed\n");
  1774. return PTR_ERR(fabric);
  1775. }
  1776. /*
  1777. * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
  1778. */
  1779. fabric->tf_ops = tcm_qla2xxx_ops;
  1780. /*
  1781. * Setup default attribute lists for various fabric->tf_cit_tmpl
  1782. */
  1783. fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
  1784. fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
  1785. fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =
  1786. tcm_qla2xxx_tpg_attrib_attrs;
  1787. fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
  1788. fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
  1789. fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
  1790. fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
  1791. fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
  1792. fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
  1793. /*
  1794. * Register the fabric for use within TCM
  1795. */
  1796. ret = target_fabric_configfs_register(fabric);
  1797. if (ret < 0) {
  1798. pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
  1799. return ret;
  1800. }
  1801. /*
  1802. * Setup our local pointer to *fabric
  1803. */
  1804. tcm_qla2xxx_fabric_configfs = fabric;
  1805. pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
  1806. /*
  1807. * Register the top level struct config_item_type for NPIV with TCM core
  1808. */
  1809. npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
  1810. if (IS_ERR(npiv_fabric)) {
  1811. pr_err("target_fabric_configfs_init() failed\n");
  1812. ret = PTR_ERR(npiv_fabric);
  1813. goto out_fabric;
  1814. }
  1815. /*
  1816. * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
  1817. */
  1818. npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
  1819. /*
  1820. * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
  1821. */
  1822. npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
  1823. npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
  1824. tcm_qla2xxx_npiv_tpg_attrs;
  1825. npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
  1826. npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
  1827. npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
  1828. npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
  1829. npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
  1830. npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
  1831. npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
  1832. /*
  1833. * Register the npiv_fabric for use within TCM
  1834. */
  1835. ret = target_fabric_configfs_register(npiv_fabric);
  1836. if (ret < 0) {
  1837. pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
  1838. goto out_fabric;
  1839. }
  1840. /*
  1841. * Setup our local pointer to *npiv_fabric
  1842. */
  1843. tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
  1844. pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
  1845. tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
  1846. WQ_MEM_RECLAIM, 0);
  1847. if (!tcm_qla2xxx_free_wq) {
  1848. ret = -ENOMEM;
  1849. goto out_fabric_npiv;
  1850. }
  1851. tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
  1852. if (!tcm_qla2xxx_cmd_wq) {
  1853. ret = -ENOMEM;
  1854. goto out_free_wq;
  1855. }
  1856. return 0;
  1857. out_free_wq:
  1858. destroy_workqueue(tcm_qla2xxx_free_wq);
  1859. out_fabric_npiv:
  1860. target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
  1861. out_fabric:
  1862. target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
  1863. return ret;
  1864. }
  1865. static void tcm_qla2xxx_deregister_configfs(void)
  1866. {
  1867. destroy_workqueue(tcm_qla2xxx_cmd_wq);
  1868. destroy_workqueue(tcm_qla2xxx_free_wq);
  1869. target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
  1870. tcm_qla2xxx_fabric_configfs = NULL;
  1871. pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
  1872. target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
  1873. tcm_qla2xxx_npiv_fabric_configfs = NULL;
  1874. pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
  1875. }
  1876. static int __init tcm_qla2xxx_init(void)
  1877. {
  1878. int ret;
  1879. ret = tcm_qla2xxx_register_configfs();
  1880. if (ret < 0)
  1881. return ret;
  1882. return 0;
  1883. }
  1884. static void __exit tcm_qla2xxx_exit(void)
  1885. {
  1886. tcm_qla2xxx_deregister_configfs();
  1887. }
  1888. MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
  1889. MODULE_LICENSE("GPL");
  1890. module_init(tcm_qla2xxx_init);
  1891. module_exit(tcm_qla2xxx_exit);