fnic_fcs.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/pci.h>
  20. #include <linux/slab.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/workqueue.h>
  27. #include <scsi/fc/fc_fip.h>
  28. #include <scsi/fc/fc_els.h>
  29. #include <scsi/fc/fc_fcoe.h>
  30. #include <scsi/fc_frame.h>
  31. #include <scsi/libfc.h>
  32. #include "fnic_io.h"
  33. #include "fnic.h"
  34. #include "fnic_fip.h"
  35. #include "cq_enet_desc.h"
  36. #include "cq_exch_desc.h"
  37. static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
  38. struct workqueue_struct *fnic_fip_queue;
  39. struct workqueue_struct *fnic_event_queue;
  40. static void fnic_set_eth_mode(struct fnic *);
  41. static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
  42. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
  43. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
  44. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
  45. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
  46. void fnic_handle_link(struct work_struct *work)
  47. {
  48. struct fnic *fnic = container_of(work, struct fnic, link_work);
  49. unsigned long flags;
  50. int old_link_status;
  51. u32 old_link_down_cnt;
  52. spin_lock_irqsave(&fnic->fnic_lock, flags);
  53. if (fnic->stop_rx_link_events) {
  54. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  55. return;
  56. }
  57. old_link_down_cnt = fnic->link_down_cnt;
  58. old_link_status = fnic->link_status;
  59. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  60. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  61. if (old_link_status == fnic->link_status) {
  62. if (!fnic->link_status) {
  63. /* DOWN -> DOWN */
  64. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  65. fnic_fc_trace_set_data(fnic->lport->host->host_no,
  66. FNIC_FC_LE, "Link Status: DOWN->DOWN",
  67. strlen("Link Status: DOWN->DOWN"));
  68. } else {
  69. if (old_link_down_cnt != fnic->link_down_cnt) {
  70. /* UP -> DOWN -> UP */
  71. fnic->lport->host_stats.link_failure_count++;
  72. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  73. fnic_fc_trace_set_data(
  74. fnic->lport->host->host_no,
  75. FNIC_FC_LE,
  76. "Link Status:UP_DOWN_UP",
  77. strlen("Link_Status:UP_DOWN_UP")
  78. );
  79. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  80. "link down\n");
  81. fcoe_ctlr_link_down(&fnic->ctlr);
  82. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  83. /* start FCoE VLAN discovery */
  84. fnic_fc_trace_set_data(
  85. fnic->lport->host->host_no,
  86. FNIC_FC_LE,
  87. "Link Status: UP_DOWN_UP_VLAN",
  88. strlen(
  89. "Link Status: UP_DOWN_UP_VLAN")
  90. );
  91. fnic_fcoe_send_vlan_req(fnic);
  92. return;
  93. }
  94. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  95. "link up\n");
  96. fcoe_ctlr_link_up(&fnic->ctlr);
  97. } else {
  98. /* UP -> UP */
  99. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  100. fnic_fc_trace_set_data(
  101. fnic->lport->host->host_no, FNIC_FC_LE,
  102. "Link Status: UP_UP",
  103. strlen("Link Status: UP_UP"));
  104. }
  105. }
  106. } else if (fnic->link_status) {
  107. /* DOWN -> UP */
  108. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  109. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  110. /* start FCoE VLAN discovery */
  111. fnic_fc_trace_set_data(
  112. fnic->lport->host->host_no,
  113. FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
  114. strlen("Link Status: DOWN_UP_VLAN"));
  115. fnic_fcoe_send_vlan_req(fnic);
  116. return;
  117. }
  118. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  119. fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
  120. "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
  121. fcoe_ctlr_link_up(&fnic->ctlr);
  122. } else {
  123. /* UP -> DOWN */
  124. fnic->lport->host_stats.link_failure_count++;
  125. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  126. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  127. fnic_fc_trace_set_data(
  128. fnic->lport->host->host_no, FNIC_FC_LE,
  129. "Link Status: UP_DOWN",
  130. strlen("Link Status: UP_DOWN"));
  131. fcoe_ctlr_link_down(&fnic->ctlr);
  132. }
  133. }
  134. /*
  135. * This function passes incoming fabric frames to libFC
  136. */
  137. void fnic_handle_frame(struct work_struct *work)
  138. {
  139. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  140. struct fc_lport *lp = fnic->lport;
  141. unsigned long flags;
  142. struct sk_buff *skb;
  143. struct fc_frame *fp;
  144. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  145. spin_lock_irqsave(&fnic->fnic_lock, flags);
  146. if (fnic->stop_rx_link_events) {
  147. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  148. dev_kfree_skb(skb);
  149. return;
  150. }
  151. fp = (struct fc_frame *)skb;
  152. /*
  153. * If we're in a transitional state, just re-queue and return.
  154. * The queue will be serviced when we get to a stable state.
  155. */
  156. if (fnic->state != FNIC_IN_FC_MODE &&
  157. fnic->state != FNIC_IN_ETH_MODE) {
  158. skb_queue_head(&fnic->frame_queue, skb);
  159. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  160. return;
  161. }
  162. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  163. fc_exch_recv(lp, fp);
  164. }
  165. }
  166. void fnic_fcoe_evlist_free(struct fnic *fnic)
  167. {
  168. struct fnic_event *fevt = NULL;
  169. struct fnic_event *next = NULL;
  170. unsigned long flags;
  171. spin_lock_irqsave(&fnic->fnic_lock, flags);
  172. if (list_empty(&fnic->evlist)) {
  173. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  174. return;
  175. }
  176. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  177. list_del(&fevt->list);
  178. kfree(fevt);
  179. }
  180. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  181. }
  182. void fnic_handle_event(struct work_struct *work)
  183. {
  184. struct fnic *fnic = container_of(work, struct fnic, event_work);
  185. struct fnic_event *fevt = NULL;
  186. struct fnic_event *next = NULL;
  187. unsigned long flags;
  188. spin_lock_irqsave(&fnic->fnic_lock, flags);
  189. if (list_empty(&fnic->evlist)) {
  190. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  191. return;
  192. }
  193. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  194. if (fnic->stop_rx_link_events) {
  195. list_del(&fevt->list);
  196. kfree(fevt);
  197. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  198. return;
  199. }
  200. /*
  201. * If we're in a transitional state, just re-queue and return.
  202. * The queue will be serviced when we get to a stable state.
  203. */
  204. if (fnic->state != FNIC_IN_FC_MODE &&
  205. fnic->state != FNIC_IN_ETH_MODE) {
  206. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  207. return;
  208. }
  209. list_del(&fevt->list);
  210. switch (fevt->event) {
  211. case FNIC_EVT_START_VLAN_DISC:
  212. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  213. fnic_fcoe_send_vlan_req(fnic);
  214. spin_lock_irqsave(&fnic->fnic_lock, flags);
  215. break;
  216. case FNIC_EVT_START_FCF_DISC:
  217. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  218. "Start FCF Discovery\n");
  219. fnic_fcoe_start_fcf_disc(fnic);
  220. break;
  221. default:
  222. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  223. "Unknown event 0x%x\n", fevt->event);
  224. break;
  225. }
  226. kfree(fevt);
  227. }
  228. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  229. }
  230. /**
  231. * Check if the Received FIP FLOGI frame is rejected
  232. * @fip: The FCoE controller that received the frame
  233. * @skb: The received FIP frame
  234. *
  235. * Returns non-zero if the frame is rejected with unsupported cmd with
  236. * insufficient resource els explanation.
  237. */
  238. static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
  239. struct sk_buff *skb)
  240. {
  241. struct fc_lport *lport = fip->lp;
  242. struct fip_header *fiph;
  243. struct fc_frame_header *fh = NULL;
  244. struct fip_desc *desc;
  245. struct fip_encaps *els;
  246. enum fip_desc_type els_dtype = 0;
  247. u16 op;
  248. u8 els_op;
  249. u8 sub;
  250. size_t els_len = 0;
  251. size_t rlen;
  252. size_t dlen = 0;
  253. if (skb_linearize(skb))
  254. return 0;
  255. if (skb->len < sizeof(*fiph))
  256. return 0;
  257. fiph = (struct fip_header *)skb->data;
  258. op = ntohs(fiph->fip_op);
  259. sub = fiph->fip_subcode;
  260. if (op != FIP_OP_LS)
  261. return 0;
  262. if (sub != FIP_SC_REP)
  263. return 0;
  264. rlen = ntohs(fiph->fip_dl_len) * 4;
  265. if (rlen + sizeof(*fiph) > skb->len)
  266. return 0;
  267. desc = (struct fip_desc *)(fiph + 1);
  268. dlen = desc->fip_dlen * FIP_BPW;
  269. if (desc->fip_dtype == FIP_DT_FLOGI) {
  270. if (dlen < sizeof(*els) + sizeof(*fh) + 1)
  271. return 0;
  272. els_len = dlen - sizeof(*els);
  273. els = (struct fip_encaps *)desc;
  274. fh = (struct fc_frame_header *)(els + 1);
  275. els_dtype = desc->fip_dtype;
  276. if (!fh)
  277. return 0;
  278. /*
  279. * ELS command code, reason and explanation should be = Reject,
  280. * unsupported command and insufficient resource
  281. */
  282. els_op = *(u8 *)(fh + 1);
  283. if (els_op == ELS_LS_RJT) {
  284. shost_printk(KERN_INFO, lport->host,
  285. "Flogi Request Rejected by Switch\n");
  286. return 1;
  287. }
  288. shost_printk(KERN_INFO, lport->host,
  289. "Flogi Request Accepted by Switch\n");
  290. }
  291. return 0;
  292. }
  293. static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
  294. {
  295. struct fcoe_ctlr *fip = &fnic->ctlr;
  296. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  297. struct sk_buff *skb;
  298. char *eth_fr;
  299. int fr_len;
  300. struct fip_vlan *vlan;
  301. u64 vlan_tov;
  302. fnic_fcoe_reset_vlans(fnic);
  303. fnic->set_vlan(fnic, 0);
  304. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  305. "Sending VLAN request...\n");
  306. skb = dev_alloc_skb(sizeof(struct fip_vlan));
  307. if (!skb)
  308. return;
  309. fr_len = sizeof(*vlan);
  310. eth_fr = (char *)skb->data;
  311. vlan = (struct fip_vlan *)eth_fr;
  312. memset(vlan, 0, sizeof(*vlan));
  313. memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
  314. memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
  315. vlan->eth.h_proto = htons(ETH_P_FIP);
  316. vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
  317. vlan->fip.fip_op = htons(FIP_OP_VLAN);
  318. vlan->fip.fip_subcode = FIP_SC_VL_REQ;
  319. vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
  320. vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
  321. vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
  322. memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
  323. vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
  324. vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
  325. put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
  326. atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
  327. skb_put(skb, sizeof(*vlan));
  328. skb->protocol = htons(ETH_P_FIP);
  329. skb_reset_mac_header(skb);
  330. skb_reset_network_header(skb);
  331. fip->send(fip, skb);
  332. /* set a timer so that we can retry if there no response */
  333. vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
  334. mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
  335. }
  336. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
  337. {
  338. struct fcoe_ctlr *fip = &fnic->ctlr;
  339. struct fip_header *fiph;
  340. struct fip_desc *desc;
  341. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  342. u16 vid;
  343. size_t rlen;
  344. size_t dlen;
  345. struct fcoe_vlan *vlan;
  346. u64 sol_time;
  347. unsigned long flags;
  348. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  349. "Received VLAN response...\n");
  350. fiph = (struct fip_header *) skb->data;
  351. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  352. "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
  353. ntohs(fiph->fip_op), fiph->fip_subcode);
  354. rlen = ntohs(fiph->fip_dl_len) * 4;
  355. fnic_fcoe_reset_vlans(fnic);
  356. spin_lock_irqsave(&fnic->vlans_lock, flags);
  357. desc = (struct fip_desc *)(fiph + 1);
  358. while (rlen > 0) {
  359. dlen = desc->fip_dlen * FIP_BPW;
  360. switch (desc->fip_dtype) {
  361. case FIP_DT_VLAN:
  362. vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
  363. shost_printk(KERN_INFO, fnic->lport->host,
  364. "process_vlan_resp: FIP VLAN %d\n", vid);
  365. vlan = kmalloc(sizeof(*vlan),
  366. GFP_ATOMIC);
  367. if (!vlan) {
  368. /* retry from timer */
  369. spin_unlock_irqrestore(&fnic->vlans_lock,
  370. flags);
  371. goto out;
  372. }
  373. memset(vlan, 0, sizeof(struct fcoe_vlan));
  374. vlan->vid = vid & 0x0fff;
  375. vlan->state = FIP_VLAN_AVAIL;
  376. list_add_tail(&vlan->list, &fnic->vlans);
  377. break;
  378. }
  379. desc = (struct fip_desc *)((char *)desc + dlen);
  380. rlen -= dlen;
  381. }
  382. /* any VLAN descriptors present ? */
  383. if (list_empty(&fnic->vlans)) {
  384. /* retry from timer */
  385. atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
  386. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  387. "No VLAN descriptors in FIP VLAN response\n");
  388. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  389. goto out;
  390. }
  391. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  392. fnic->set_vlan(fnic, vlan->vid);
  393. vlan->state = FIP_VLAN_SENT; /* sent now */
  394. vlan->sol_count++;
  395. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  396. /* start the solicitation */
  397. fcoe_ctlr_link_up(fip);
  398. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  399. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  400. out:
  401. return;
  402. }
  403. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
  404. {
  405. unsigned long flags;
  406. struct fcoe_vlan *vlan;
  407. u64 sol_time;
  408. spin_lock_irqsave(&fnic->vlans_lock, flags);
  409. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  410. fnic->set_vlan(fnic, vlan->vid);
  411. vlan->state = FIP_VLAN_SENT; /* sent now */
  412. vlan->sol_count = 1;
  413. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  414. /* start the solicitation */
  415. fcoe_ctlr_link_up(&fnic->ctlr);
  416. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  417. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  418. }
  419. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
  420. {
  421. unsigned long flags;
  422. struct fcoe_vlan *fvlan;
  423. spin_lock_irqsave(&fnic->vlans_lock, flags);
  424. if (list_empty(&fnic->vlans)) {
  425. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  426. return -EINVAL;
  427. }
  428. fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  429. if (fvlan->state == FIP_VLAN_USED) {
  430. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  431. return 0;
  432. }
  433. if (fvlan->state == FIP_VLAN_SENT) {
  434. fvlan->state = FIP_VLAN_USED;
  435. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  436. return 0;
  437. }
  438. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  439. return -EINVAL;
  440. }
  441. static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
  442. {
  443. struct fnic_event *fevt;
  444. unsigned long flags;
  445. fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
  446. if (!fevt)
  447. return;
  448. fevt->fnic = fnic;
  449. fevt->event = ev;
  450. spin_lock_irqsave(&fnic->fnic_lock, flags);
  451. list_add_tail(&fevt->list, &fnic->evlist);
  452. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  453. schedule_work(&fnic->event_work);
  454. }
  455. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
  456. {
  457. struct fip_header *fiph;
  458. int ret = 1;
  459. u16 op;
  460. u8 sub;
  461. if (!skb || !(skb->data))
  462. return -1;
  463. if (skb_linearize(skb))
  464. goto drop;
  465. fiph = (struct fip_header *)skb->data;
  466. op = ntohs(fiph->fip_op);
  467. sub = fiph->fip_subcode;
  468. if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
  469. goto drop;
  470. if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
  471. goto drop;
  472. if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
  473. if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
  474. goto drop;
  475. /* pass it on to fcoe */
  476. ret = 1;
  477. } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
  478. /* set the vlan as used */
  479. fnic_fcoe_process_vlan_resp(fnic, skb);
  480. ret = 0;
  481. } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
  482. /* received CVL request, restart vlan disc */
  483. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  484. /* pass it on to fcoe */
  485. ret = 1;
  486. }
  487. drop:
  488. return ret;
  489. }
  490. void fnic_handle_fip_frame(struct work_struct *work)
  491. {
  492. struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
  493. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  494. unsigned long flags;
  495. struct sk_buff *skb;
  496. struct ethhdr *eh;
  497. while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
  498. spin_lock_irqsave(&fnic->fnic_lock, flags);
  499. if (fnic->stop_rx_link_events) {
  500. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  501. dev_kfree_skb(skb);
  502. return;
  503. }
  504. /*
  505. * If we're in a transitional state, just re-queue and return.
  506. * The queue will be serviced when we get to a stable state.
  507. */
  508. if (fnic->state != FNIC_IN_FC_MODE &&
  509. fnic->state != FNIC_IN_ETH_MODE) {
  510. skb_queue_head(&fnic->fip_frame_queue, skb);
  511. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  512. return;
  513. }
  514. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  515. eh = (struct ethhdr *)skb->data;
  516. if (eh->h_proto == htons(ETH_P_FIP)) {
  517. skb_pull(skb, sizeof(*eh));
  518. if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
  519. dev_kfree_skb(skb);
  520. continue;
  521. }
  522. /*
  523. * If there's FLOGI rejects - clear all
  524. * fcf's & restart from scratch
  525. */
  526. if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
  527. atomic64_inc(
  528. &fnic_stats->vlan_stats.flogi_rejects);
  529. shost_printk(KERN_INFO, fnic->lport->host,
  530. "Trigger a Link down - VLAN Disc\n");
  531. fcoe_ctlr_link_down(&fnic->ctlr);
  532. /* start FCoE VLAN discovery */
  533. fnic_fcoe_send_vlan_req(fnic);
  534. dev_kfree_skb(skb);
  535. continue;
  536. }
  537. fcoe_ctlr_recv(&fnic->ctlr, skb);
  538. continue;
  539. }
  540. }
  541. }
  542. /**
  543. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  544. * @fnic: fnic instance.
  545. * @skb: Ethernet Frame.
  546. */
  547. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  548. {
  549. struct fc_frame *fp;
  550. struct ethhdr *eh;
  551. struct fcoe_hdr *fcoe_hdr;
  552. struct fcoe_crc_eof *ft;
  553. /*
  554. * Undo VLAN encapsulation if present.
  555. */
  556. eh = (struct ethhdr *)skb->data;
  557. if (eh->h_proto == htons(ETH_P_8021Q)) {
  558. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  559. eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
  560. skb_reset_mac_header(skb);
  561. }
  562. if (eh->h_proto == htons(ETH_P_FIP)) {
  563. if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
  564. printk(KERN_ERR "Dropped FIP frame, as firmware "
  565. "uses non-FIP mode, Enable FIP "
  566. "using UCSM\n");
  567. goto drop;
  568. }
  569. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  570. FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
  571. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  572. }
  573. skb_queue_tail(&fnic->fip_frame_queue, skb);
  574. queue_work(fnic_fip_queue, &fnic->fip_frame_work);
  575. return 1; /* let caller know packet was used */
  576. }
  577. if (eh->h_proto != htons(ETH_P_FCOE))
  578. goto drop;
  579. skb_set_network_header(skb, sizeof(*eh));
  580. skb_pull(skb, sizeof(*eh));
  581. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  582. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  583. goto drop;
  584. fp = (struct fc_frame *)skb;
  585. fc_frame_init(fp);
  586. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  587. skb_pull(skb, sizeof(struct fcoe_hdr));
  588. skb_reset_transport_header(skb);
  589. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  590. fr_eof(fp) = ft->fcoe_eof;
  591. skb_trim(skb, skb->len - sizeof(*ft));
  592. return 0;
  593. drop:
  594. dev_kfree_skb_irq(skb);
  595. return -1;
  596. }
  597. /**
  598. * fnic_update_mac_locked() - set data MAC address and filters.
  599. * @fnic: fnic instance.
  600. * @new: newly-assigned FCoE MAC address.
  601. *
  602. * Called with the fnic lock held.
  603. */
  604. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  605. {
  606. u8 *ctl = fnic->ctlr.ctl_src_addr;
  607. u8 *data = fnic->data_src_addr;
  608. if (is_zero_ether_addr(new))
  609. new = ctl;
  610. if (ether_addr_equal(data, new))
  611. return;
  612. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
  613. if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
  614. vnic_dev_del_addr(fnic->vdev, data);
  615. memcpy(data, new, ETH_ALEN);
  616. if (!ether_addr_equal(new, ctl))
  617. vnic_dev_add_addr(fnic->vdev, new);
  618. }
  619. /**
  620. * fnic_update_mac() - set data MAC address and filters.
  621. * @lport: local port.
  622. * @new: newly-assigned FCoE MAC address.
  623. */
  624. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  625. {
  626. struct fnic *fnic = lport_priv(lport);
  627. spin_lock_irq(&fnic->fnic_lock);
  628. fnic_update_mac_locked(fnic, new);
  629. spin_unlock_irq(&fnic->fnic_lock);
  630. }
  631. /**
  632. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  633. * @lport: local port.
  634. * @port_id: assigned FC_ID.
  635. * @fp: received frame containing the FLOGI accept or NULL.
  636. *
  637. * This is called from libfc when a new FC_ID has been assigned.
  638. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  639. * address and FC_ID.
  640. *
  641. * It is also called with FC_ID 0 when we're logged off.
  642. *
  643. * If the FC_ID is due to point-to-point, fp may be NULL.
  644. */
  645. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  646. {
  647. struct fnic *fnic = lport_priv(lport);
  648. u8 *mac;
  649. int ret;
  650. FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
  651. port_id, fp);
  652. /*
  653. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  654. * Set ethernet mode to send FLOGI.
  655. */
  656. if (!port_id) {
  657. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  658. fnic_set_eth_mode(fnic);
  659. return;
  660. }
  661. if (fp) {
  662. mac = fr_cb(fp)->granted_mac;
  663. if (is_zero_ether_addr(mac)) {
  664. /* non-FIP - FLOGI already accepted - ignore return */
  665. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  666. }
  667. fnic_update_mac(lport, mac);
  668. }
  669. /* Change state to reflect transition to FC mode */
  670. spin_lock_irq(&fnic->fnic_lock);
  671. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  672. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  673. else {
  674. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  675. "Unexpected fnic state %s while"
  676. " processing flogi resp\n",
  677. fnic_state_to_str(fnic->state));
  678. spin_unlock_irq(&fnic->fnic_lock);
  679. return;
  680. }
  681. spin_unlock_irq(&fnic->fnic_lock);
  682. /*
  683. * Send FLOGI registration to firmware to set up FC mode.
  684. * The new address will be set up when registration completes.
  685. */
  686. ret = fnic_flogi_reg_handler(fnic, port_id);
  687. if (ret < 0) {
  688. spin_lock_irq(&fnic->fnic_lock);
  689. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  690. fnic->state = FNIC_IN_ETH_MODE;
  691. spin_unlock_irq(&fnic->fnic_lock);
  692. }
  693. }
  694. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  695. *cq_desc, struct vnic_rq_buf *buf,
  696. int skipped __attribute__((unused)),
  697. void *opaque)
  698. {
  699. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  700. struct sk_buff *skb;
  701. struct fc_frame *fp;
  702. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  703. unsigned int eth_hdrs_stripped;
  704. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  705. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  706. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  707. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  708. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  709. u8 fcs_ok = 1, packet_error = 0;
  710. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  711. u32 rss_hash;
  712. u16 exchange_id, tmpl;
  713. u8 sof = 0;
  714. u8 eof = 0;
  715. u32 fcp_bytes_written = 0;
  716. unsigned long flags;
  717. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  718. PCI_DMA_FROMDEVICE);
  719. skb = buf->os_buf;
  720. fp = (struct fc_frame *)skb;
  721. buf->os_buf = NULL;
  722. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  723. if (type == CQ_DESC_TYPE_RQ_FCP) {
  724. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  725. &type, &color, &q_number, &completed_index,
  726. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  727. &tmpl, &fcp_bytes_written, &sof, &eof,
  728. &ingress_port, &packet_error,
  729. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  730. &vlan);
  731. eth_hdrs_stripped = 1;
  732. skb_trim(skb, fcp_bytes_written);
  733. fr_sof(fp) = sof;
  734. fr_eof(fp) = eof;
  735. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  736. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  737. &type, &color, &q_number, &completed_index,
  738. &ingress_port, &fcoe, &eop, &sop,
  739. &rss_type, &csum_not_calc, &rss_hash,
  740. &bytes_written, &packet_error,
  741. &vlan_stripped, &vlan, &checksum,
  742. &fcoe_sof, &fcoe_fc_crc_ok,
  743. &fcoe_enc_error, &fcoe_eof,
  744. &tcp_udp_csum_ok, &udp, &tcp,
  745. &ipv4_csum_ok, &ipv6, &ipv4,
  746. &ipv4_fragment, &fcs_ok);
  747. eth_hdrs_stripped = 0;
  748. skb_trim(skb, bytes_written);
  749. if (!fcs_ok) {
  750. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  751. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  752. "fcs error. dropping packet.\n");
  753. goto drop;
  754. }
  755. if (fnic_import_rq_eth_pkt(fnic, skb))
  756. return;
  757. } else {
  758. /* wrong CQ type*/
  759. shost_printk(KERN_ERR, fnic->lport->host,
  760. "fnic rq_cmpl wrong cq type x%x\n", type);
  761. goto drop;
  762. }
  763. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  764. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  765. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  766. "fnic rq_cmpl fcoe x%x fcsok x%x"
  767. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  768. " x%x\n",
  769. fcoe, fcs_ok, packet_error,
  770. fcoe_fc_crc_ok, fcoe_enc_error);
  771. goto drop;
  772. }
  773. spin_lock_irqsave(&fnic->fnic_lock, flags);
  774. if (fnic->stop_rx_link_events) {
  775. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  776. goto drop;
  777. }
  778. fr_dev(fp) = fnic->lport;
  779. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  780. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
  781. (char *)skb->data, skb->len)) != 0) {
  782. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  783. }
  784. skb_queue_tail(&fnic->frame_queue, skb);
  785. queue_work(fnic_event_queue, &fnic->frame_work);
  786. return;
  787. drop:
  788. dev_kfree_skb_irq(skb);
  789. }
  790. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  791. struct cq_desc *cq_desc, u8 type,
  792. u16 q_number, u16 completed_index,
  793. void *opaque)
  794. {
  795. struct fnic *fnic = vnic_dev_priv(vdev);
  796. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  797. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  798. NULL);
  799. return 0;
  800. }
  801. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  802. {
  803. unsigned int tot_rq_work_done = 0, cur_work_done;
  804. unsigned int i;
  805. int err;
  806. for (i = 0; i < fnic->rq_count; i++) {
  807. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  808. fnic_rq_cmpl_handler_cont,
  809. NULL);
  810. if (cur_work_done) {
  811. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  812. if (err)
  813. shost_printk(KERN_ERR, fnic->lport->host,
  814. "fnic_alloc_rq_frame can't alloc"
  815. " frame\n");
  816. }
  817. tot_rq_work_done += cur_work_done;
  818. }
  819. return tot_rq_work_done;
  820. }
  821. /*
  822. * This function is called once at init time to allocate and fill RQ
  823. * buffers. Subsequently, it is called in the interrupt context after RQ
  824. * buffer processing to replenish the buffers in the RQ
  825. */
  826. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  827. {
  828. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  829. struct sk_buff *skb;
  830. u16 len;
  831. dma_addr_t pa;
  832. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  833. skb = dev_alloc_skb(len);
  834. if (!skb) {
  835. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  836. "Unable to allocate RQ sk_buff\n");
  837. return -ENOMEM;
  838. }
  839. skb_reset_mac_header(skb);
  840. skb_reset_transport_header(skb);
  841. skb_reset_network_header(skb);
  842. skb_put(skb, len);
  843. pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
  844. fnic_queue_rq_desc(rq, skb, pa, len);
  845. return 0;
  846. }
  847. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  848. {
  849. struct fc_frame *fp = buf->os_buf;
  850. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  851. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  852. PCI_DMA_FROMDEVICE);
  853. dev_kfree_skb(fp_skb(fp));
  854. buf->os_buf = NULL;
  855. }
  856. /**
  857. * fnic_eth_send() - Send Ethernet frame.
  858. * @fip: fcoe_ctlr instance.
  859. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  860. */
  861. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  862. {
  863. struct fnic *fnic = fnic_from_ctlr(fip);
  864. struct vnic_wq *wq = &fnic->wq[0];
  865. dma_addr_t pa;
  866. struct ethhdr *eth_hdr;
  867. struct vlan_ethhdr *vlan_hdr;
  868. unsigned long flags;
  869. if (!fnic->vlan_hw_insert) {
  870. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  871. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
  872. sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  873. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  874. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  875. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  876. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  877. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  878. FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
  879. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  880. }
  881. } else {
  882. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  883. FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
  884. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  885. }
  886. }
  887. pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  888. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  889. if (!vnic_wq_desc_avail(wq)) {
  890. pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
  891. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  892. kfree_skb(skb);
  893. return;
  894. }
  895. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  896. 0 /* hw inserts cos value */,
  897. fnic->vlan_id, 1);
  898. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  899. }
  900. /*
  901. * Send FC frame.
  902. */
  903. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  904. {
  905. struct vnic_wq *wq = &fnic->wq[0];
  906. struct sk_buff *skb;
  907. dma_addr_t pa;
  908. struct ethhdr *eth_hdr;
  909. struct vlan_ethhdr *vlan_hdr;
  910. struct fcoe_hdr *fcoe_hdr;
  911. struct fc_frame_header *fh;
  912. u32 tot_len, eth_hdr_len;
  913. int ret = 0;
  914. unsigned long flags;
  915. fh = fc_frame_header_get(fp);
  916. skb = fp_skb(fp);
  917. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  918. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  919. return 0;
  920. if (!fnic->vlan_hw_insert) {
  921. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  922. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
  923. eth_hdr = (struct ethhdr *)vlan_hdr;
  924. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  925. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  926. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  927. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  928. } else {
  929. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  930. eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
  931. eth_hdr->h_proto = htons(ETH_P_FCOE);
  932. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  933. }
  934. if (fnic->ctlr.map_dest)
  935. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  936. else
  937. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  938. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  939. tot_len = skb->len;
  940. BUG_ON(tot_len % 4);
  941. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  942. fcoe_hdr->fcoe_sof = fr_sof(fp);
  943. if (FC_FCOE_VER)
  944. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  945. pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
  946. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
  947. (char *)eth_hdr, tot_len)) != 0) {
  948. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  949. }
  950. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  951. if (!vnic_wq_desc_avail(wq)) {
  952. pci_unmap_single(fnic->pdev, pa,
  953. tot_len, PCI_DMA_TODEVICE);
  954. ret = -1;
  955. goto fnic_send_frame_end;
  956. }
  957. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  958. 0 /* hw inserts cos value */,
  959. fnic->vlan_id, 1, 1, 1);
  960. fnic_send_frame_end:
  961. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  962. if (ret)
  963. dev_kfree_skb_any(fp_skb(fp));
  964. return ret;
  965. }
  966. /*
  967. * fnic_send
  968. * Routine to send a raw frame
  969. */
  970. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  971. {
  972. struct fnic *fnic = lport_priv(lp);
  973. unsigned long flags;
  974. if (fnic->in_remove) {
  975. dev_kfree_skb(fp_skb(fp));
  976. return -1;
  977. }
  978. /*
  979. * Queue frame if in a transitional state.
  980. * This occurs while registering the Port_ID / MAC address after FLOGI.
  981. */
  982. spin_lock_irqsave(&fnic->fnic_lock, flags);
  983. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  984. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  985. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  986. return 0;
  987. }
  988. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  989. return fnic_send_frame(fnic, fp);
  990. }
  991. /**
  992. * fnic_flush_tx() - send queued frames.
  993. * @fnic: fnic device
  994. *
  995. * Send frames that were waiting to go out in FC or Ethernet mode.
  996. * Whenever changing modes we purge queued frames, so these frames should
  997. * be queued for the stable mode that we're in, either FC or Ethernet.
  998. *
  999. * Called without fnic_lock held.
  1000. */
  1001. void fnic_flush_tx(struct fnic *fnic)
  1002. {
  1003. struct sk_buff *skb;
  1004. struct fc_frame *fp;
  1005. while ((skb = skb_dequeue(&fnic->tx_queue))) {
  1006. fp = (struct fc_frame *)skb;
  1007. fnic_send_frame(fnic, fp);
  1008. }
  1009. }
  1010. /**
  1011. * fnic_set_eth_mode() - put fnic into ethernet mode.
  1012. * @fnic: fnic device
  1013. *
  1014. * Called without fnic lock held.
  1015. */
  1016. static void fnic_set_eth_mode(struct fnic *fnic)
  1017. {
  1018. unsigned long flags;
  1019. enum fnic_state old_state;
  1020. int ret;
  1021. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1022. again:
  1023. old_state = fnic->state;
  1024. switch (old_state) {
  1025. case FNIC_IN_FC_MODE:
  1026. case FNIC_IN_ETH_TRANS_FC_MODE:
  1027. default:
  1028. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  1029. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1030. ret = fnic_fw_reset_handler(fnic);
  1031. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1032. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  1033. goto again;
  1034. if (ret)
  1035. fnic->state = old_state;
  1036. break;
  1037. case FNIC_IN_FC_TRANS_ETH_MODE:
  1038. case FNIC_IN_ETH_MODE:
  1039. break;
  1040. }
  1041. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1042. }
  1043. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  1044. struct cq_desc *cq_desc,
  1045. struct vnic_wq_buf *buf, void *opaque)
  1046. {
  1047. struct sk_buff *skb = buf->os_buf;
  1048. struct fc_frame *fp = (struct fc_frame *)skb;
  1049. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1050. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1051. buf->len, PCI_DMA_TODEVICE);
  1052. dev_kfree_skb_irq(fp_skb(fp));
  1053. buf->os_buf = NULL;
  1054. }
  1055. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  1056. struct cq_desc *cq_desc, u8 type,
  1057. u16 q_number, u16 completed_index,
  1058. void *opaque)
  1059. {
  1060. struct fnic *fnic = vnic_dev_priv(vdev);
  1061. unsigned long flags;
  1062. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  1063. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  1064. fnic_wq_complete_frame_send, NULL);
  1065. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  1066. return 0;
  1067. }
  1068. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  1069. {
  1070. unsigned int wq_work_done = 0;
  1071. unsigned int i;
  1072. for (i = 0; i < fnic->raw_wq_count; i++) {
  1073. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  1074. work_to_do,
  1075. fnic_wq_cmpl_handler_cont,
  1076. NULL);
  1077. }
  1078. return wq_work_done;
  1079. }
  1080. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  1081. {
  1082. struct fc_frame *fp = buf->os_buf;
  1083. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1084. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1085. buf->len, PCI_DMA_TODEVICE);
  1086. dev_kfree_skb(fp_skb(fp));
  1087. buf->os_buf = NULL;
  1088. }
  1089. void fnic_fcoe_reset_vlans(struct fnic *fnic)
  1090. {
  1091. unsigned long flags;
  1092. struct fcoe_vlan *vlan;
  1093. struct fcoe_vlan *next;
  1094. /*
  1095. * indicate a link down to fcoe so that all fcf's are free'd
  1096. * might not be required since we did this before sending vlan
  1097. * discovery request
  1098. */
  1099. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1100. if (!list_empty(&fnic->vlans)) {
  1101. list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
  1102. list_del(&vlan->list);
  1103. kfree(vlan);
  1104. }
  1105. }
  1106. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1107. }
  1108. void fnic_handle_fip_timer(struct fnic *fnic)
  1109. {
  1110. unsigned long flags;
  1111. struct fcoe_vlan *vlan;
  1112. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  1113. u64 sol_time;
  1114. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1115. if (fnic->stop_rx_link_events) {
  1116. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1117. return;
  1118. }
  1119. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1120. if (fnic->ctlr.mode == FIP_ST_NON_FIP)
  1121. return;
  1122. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1123. if (list_empty(&fnic->vlans)) {
  1124. /* no vlans available, try again */
  1125. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1126. "Start VLAN Discovery\n");
  1127. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1128. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1129. return;
  1130. }
  1131. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  1132. shost_printk(KERN_DEBUG, fnic->lport->host,
  1133. "fip_timer: vlan %d state %d sol_count %d\n",
  1134. vlan->vid, vlan->state, vlan->sol_count);
  1135. switch (vlan->state) {
  1136. case FIP_VLAN_USED:
  1137. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1138. "FIP VLAN is selected for FC transaction\n");
  1139. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1140. break;
  1141. case FIP_VLAN_FAILED:
  1142. /* if all vlans are in failed state, restart vlan disc */
  1143. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1144. "Start VLAN Discovery\n");
  1145. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1146. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1147. break;
  1148. case FIP_VLAN_SENT:
  1149. if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
  1150. /*
  1151. * no response on this vlan, remove from the list.
  1152. * Try the next vlan
  1153. */
  1154. shost_printk(KERN_INFO, fnic->lport->host,
  1155. "Dequeue this VLAN ID %d from list\n",
  1156. vlan->vid);
  1157. list_del(&vlan->list);
  1158. kfree(vlan);
  1159. vlan = NULL;
  1160. if (list_empty(&fnic->vlans)) {
  1161. /* we exhausted all vlans, restart vlan disc */
  1162. spin_unlock_irqrestore(&fnic->vlans_lock,
  1163. flags);
  1164. shost_printk(KERN_INFO, fnic->lport->host,
  1165. "fip_timer: vlan list empty, "
  1166. "trigger vlan disc\n");
  1167. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1168. return;
  1169. }
  1170. /* check the next vlan */
  1171. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
  1172. list);
  1173. fnic->set_vlan(fnic, vlan->vid);
  1174. vlan->state = FIP_VLAN_SENT; /* sent now */
  1175. }
  1176. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1177. atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
  1178. vlan->sol_count++;
  1179. sol_time = jiffies + msecs_to_jiffies
  1180. (FCOE_CTLR_START_DELAY);
  1181. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  1182. break;
  1183. }
  1184. }