Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[linux-2.6] / drivers / scsi / fnic / fnic_fcs.c
1 /*
2  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * This program is free software; you may redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16  * SOFTWARE.
17  */
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/skbuff.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/workqueue.h>
26 #include <scsi/fc/fc_els.h>
27 #include <scsi/fc/fc_fcoe.h>
28 #include <scsi/fc_frame.h>
29 #include <scsi/libfc.h>
30 #include "fnic_io.h"
31 #include "fnic.h"
32 #include "cq_enet_desc.h"
33 #include "cq_exch_desc.h"
34
35 struct workqueue_struct *fnic_event_queue;
36
37 void fnic_handle_link(struct work_struct *work)
38 {
39         struct fnic *fnic = container_of(work, struct fnic, link_work);
40         unsigned long flags;
41         int old_link_status;
42         u32 old_link_down_cnt;
43
44         spin_lock_irqsave(&fnic->fnic_lock, flags);
45
46         if (fnic->stop_rx_link_events) {
47                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
48                 return;
49         }
50
51         old_link_down_cnt = fnic->link_down_cnt;
52         old_link_status = fnic->link_status;
53         fnic->link_status = vnic_dev_link_status(fnic->vdev);
54         fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
55
56         if (old_link_status == fnic->link_status) {
57                 if (!fnic->link_status)
58                         /* DOWN -> DOWN */
59                         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
60                 else {
61                         if (old_link_down_cnt != fnic->link_down_cnt) {
62                                 /* UP -> DOWN -> UP */
63                                 fnic->lport->host_stats.link_failure_count++;
64                                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
65                                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
66                                              "link down\n");
67                                 fc_linkdown(fnic->lport);
68                                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
69                                              "link up\n");
70                                 fc_linkup(fnic->lport);
71                         } else
72                                 /* UP -> UP */
73                                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
74                 }
75         } else if (fnic->link_status) {
76                 /* DOWN -> UP */
77                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
78                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
79                 fc_linkup(fnic->lport);
80         } else {
81                 /* UP -> DOWN */
82                 fnic->lport->host_stats.link_failure_count++;
83                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
84                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
85                 fc_linkdown(fnic->lport);
86         }
87
88 }
89
90 /*
91  * This function passes incoming fabric frames to libFC
92  */
93 void fnic_handle_frame(struct work_struct *work)
94 {
95         struct fnic *fnic = container_of(work, struct fnic, frame_work);
96         struct fc_lport *lp = fnic->lport;
97         unsigned long flags;
98         struct sk_buff *skb;
99         struct fc_frame *fp;
100
101         while ((skb = skb_dequeue(&fnic->frame_queue))) {
102
103                 spin_lock_irqsave(&fnic->fnic_lock, flags);
104                 if (fnic->stop_rx_link_events) {
105                         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
106                         dev_kfree_skb(skb);
107                         return;
108                 }
109                 fp = (struct fc_frame *)skb;
110                 /* if Flogi resp frame, register the address */
111                 if (fr_flags(fp)) {
112                         vnic_dev_add_addr(fnic->vdev,
113                                           fnic->data_src_addr);
114                         fr_flags(fp) = 0;
115                 }
116                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
117
118                 fc_exch_recv(lp, lp->emp, fp);
119         }
120
121 }
122
123 static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
124                                            u32 len, u8 sof, u8 eof)
125 {
126         struct fc_frame *fp = (struct fc_frame *)skb;
127
128         skb_trim(skb, len);
129         fr_eof(fp) = eof;
130         fr_sof(fp) = sof;
131 }
132
133
134 static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len)
135 {
136         struct fc_frame *fp;
137         struct ethhdr *eh;
138         struct vlan_ethhdr *vh;
139         struct fcoe_hdr *fcoe_hdr;
140         struct fcoe_crc_eof *ft;
141         u32    transport_len = 0;
142
143         eh = (struct ethhdr *)skb->data;
144         vh = (struct vlan_ethhdr *)skb->data;
145         if (vh->h_vlan_proto == htons(ETH_P_8021Q) &&
146             vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) {
147                 skb_pull(skb, sizeof(struct vlan_ethhdr));
148                 transport_len += sizeof(struct vlan_ethhdr);
149         } else if (eh->h_proto == htons(ETH_P_FCOE)) {
150                 transport_len += sizeof(struct ethhdr);
151                 skb_pull(skb, sizeof(struct ethhdr));
152         } else
153                 return -1;
154
155         fcoe_hdr = (struct fcoe_hdr *)skb->data;
156         if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
157                 return -1;
158
159         fp = (struct fc_frame *)skb;
160         fc_frame_init(fp);
161         fr_sof(fp) = fcoe_hdr->fcoe_sof;
162         skb_pull(skb, sizeof(struct fcoe_hdr));
163         transport_len += sizeof(struct fcoe_hdr);
164
165         ft = (struct fcoe_crc_eof *)(skb->data + len -
166                                      transport_len - sizeof(*ft));
167         fr_eof(fp) = ft->fcoe_eof;
168         skb_trim(skb, len - transport_len - sizeof(*ft));
169         return 0;
170 }
171
172 static inline int fnic_handle_flogi_resp(struct fnic *fnic,
173                                          struct fc_frame *fp)
174 {
175         u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
176         struct ethhdr *eth_hdr;
177         struct fc_frame_header *fh;
178         int ret = 0;
179         unsigned long flags;
180         struct fc_frame *old_flogi_resp = NULL;
181
182         fh = (struct fc_frame_header *)fr_hdr(fp);
183
184         spin_lock_irqsave(&fnic->fnic_lock, flags);
185
186         if (fnic->state == FNIC_IN_ETH_MODE) {
187
188                 /*
189                  * Check if oxid matches on taking the lock. A new Flogi
190                  * issued by libFC might have changed the fnic cached oxid
191                  */
192                 if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
193                         FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
194                                      "Flogi response oxid not"
195                                      " matching cached oxid, dropping frame"
196                                      "\n");
197                         ret = -1;
198                         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
199                         dev_kfree_skb_irq(fp_skb(fp));
200                         goto handle_flogi_resp_end;
201                 }
202
203                 /* Drop older cached flogi response frame, cache this frame */
204                 old_flogi_resp = fnic->flogi_resp;
205                 fnic->flogi_resp = fp;
206                 fnic->flogi_oxid = FC_XID_UNKNOWN;
207
208                 /*
209                  * this frame is part of flogi get the src mac addr from this
210                  * frame if the src mac is fcoui based then we mark the
211                  * address mode flag to use fcoui base for dst mac addr
212                  * otherwise we have to store the fcoe gateway addr
213                  */
214                 eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
215                 memcpy(mac, eth_hdr->h_source, ETH_ALEN);
216
217                 if (ntoh24(mac) == FC_FCOE_OUI)
218                         fnic->fcoui_mode = 1;
219                 else {
220                         fnic->fcoui_mode = 0;
221                         memcpy(fnic->dest_addr, mac, ETH_ALEN);
222                 }
223
224                 /*
225                  * Except for Flogi frame, all outbound frames from us have the
226                  * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
227                  * the vnic MAC address as the Eth Src address
228                  */
229                 fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
230
231                 /* We get our s_id from the d_id of the flogi resp frame */
232                 fnic->s_id = ntoh24(fh->fh_d_id);
233
234                 /* Change state to reflect transition from Eth to FC mode */
235                 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
236
237         } else {
238                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
239                              "Unexpected fnic state %s while"
240                              " processing flogi resp\n",
241                              fnic_state_to_str(fnic->state));
242                 ret = -1;
243                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
244                 dev_kfree_skb_irq(fp_skb(fp));
245                 goto handle_flogi_resp_end;
246         }
247
248         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
249
250         /* Drop older cached frame */
251         if (old_flogi_resp)
252                 dev_kfree_skb_irq(fp_skb(old_flogi_resp));
253
254         /*
255          * send flogi reg request to firmware, this will put the fnic in
256          * in FC mode
257          */
258         ret = fnic_flogi_reg_handler(fnic);
259
260         if (ret < 0) {
261                 int free_fp = 1;
262                 spin_lock_irqsave(&fnic->fnic_lock, flags);
263                 /*
264                  * free the frame is some other thread is not
265                  * pointing to it
266                  */
267                 if (fnic->flogi_resp != fp)
268                         free_fp = 0;
269                 else
270                         fnic->flogi_resp = NULL;
271
272                 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
273                         fnic->state = FNIC_IN_ETH_MODE;
274                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
275                 if (free_fp)
276                         dev_kfree_skb_irq(fp_skb(fp));
277         }
278
279  handle_flogi_resp_end:
280         return ret;
281 }
282
283 /* Returns 1 for a response that matches cached flogi oxid */
284 static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
285                                                struct fc_frame *fp)
286 {
287         struct fc_frame_header *fh;
288         int ret = 0;
289         u32 f_ctl;
290
291         fh = fc_frame_header_get(fp);
292         f_ctl = ntoh24(fh->fh_f_ctl);
293
294         if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
295             fh->fh_r_ctl == FC_RCTL_ELS_REP &&
296             (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
297             fh->fh_type == FC_TYPE_ELS)
298                 ret = 1;
299
300         return ret;
301 }
302
303 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
304                                     *cq_desc, struct vnic_rq_buf *buf,
305                                     int skipped __attribute__((unused)),
306                                     void *opaque)
307 {
308         struct fnic *fnic = vnic_dev_priv(rq->vdev);
309         struct sk_buff *skb;
310         struct fc_frame *fp;
311         unsigned int eth_hdrs_stripped;
312         u8 type, color, eop, sop, ingress_port, vlan_stripped;
313         u8 fcoe = 0, fcoe_sof, fcoe_eof;
314         u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
315         u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
316         u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
317         u8 fcs_ok = 1, packet_error = 0;
318         u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
319         u32 rss_hash;
320         u16 exchange_id, tmpl;
321         u8 sof = 0;
322         u8 eof = 0;
323         u32 fcp_bytes_written = 0;
324         unsigned long flags;
325
326         pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
327                          PCI_DMA_FROMDEVICE);
328         skb = buf->os_buf;
329         buf->os_buf = NULL;
330
331         cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
332         if (type == CQ_DESC_TYPE_RQ_FCP) {
333                 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
334                                    &type, &color, &q_number, &completed_index,
335                                    &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
336                                    &tmpl, &fcp_bytes_written, &sof, &eof,
337                                    &ingress_port, &packet_error,
338                                    &fcoe_enc_error, &fcs_ok, &vlan_stripped,
339                                    &vlan);
340                 eth_hdrs_stripped = 1;
341
342         } else if (type == CQ_DESC_TYPE_RQ_ENET) {
343                 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
344                                     &type, &color, &q_number, &completed_index,
345                                     &ingress_port, &fcoe, &eop, &sop,
346                                     &rss_type, &csum_not_calc, &rss_hash,
347                                     &bytes_written, &packet_error,
348                                     &vlan_stripped, &vlan, &checksum,
349                                     &fcoe_sof, &fcoe_fc_crc_ok,
350                                     &fcoe_enc_error, &fcoe_eof,
351                                     &tcp_udp_csum_ok, &udp, &tcp,
352                                     &ipv4_csum_ok, &ipv6, &ipv4,
353                                     &ipv4_fragment, &fcs_ok);
354                 eth_hdrs_stripped = 0;
355
356         } else {
357                 /* wrong CQ type*/
358                 shost_printk(KERN_ERR, fnic->lport->host,
359                              "fnic rq_cmpl wrong cq type x%x\n", type);
360                 goto drop;
361         }
362
363         if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
364                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
365                              "fnic rq_cmpl fcoe x%x fcsok x%x"
366                              " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
367                              " x%x\n",
368                              fcoe, fcs_ok, packet_error,
369                              fcoe_fc_crc_ok, fcoe_enc_error);
370                 goto drop;
371         }
372
373         if (eth_hdrs_stripped)
374                 fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
375         else if (fnic_import_rq_eth_pkt(skb, bytes_written))
376                 goto drop;
377
378         fp = (struct fc_frame *)skb;
379
380         /*
381          * If frame is an ELS response that matches the cached FLOGI OX_ID,
382          * and is accept, issue flogi_reg_request copy wq request to firmware
383          * to register the S_ID and determine whether FC_OUI mode or GW mode.
384          */
385         if (is_matching_flogi_resp_frame(fnic, fp)) {
386                 if (!eth_hdrs_stripped) {
387                         if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
388                                 fnic_handle_flogi_resp(fnic, fp);
389                                 return;
390                         }
391                         /*
392                          * Recd. Flogi reject. No point registering
393                          * with fw, but forward to libFC
394                          */
395                         goto forward;
396                 }
397                 goto drop;
398         }
399         if (!eth_hdrs_stripped)
400                 goto drop;
401
402 forward:
403         spin_lock_irqsave(&fnic->fnic_lock, flags);
404         if (fnic->stop_rx_link_events) {
405                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
406                 goto drop;
407         }
408         /* Use fr_flags to indicate whether succ. flogi resp or not */
409         fr_flags(fp) = 0;
410         fr_dev(fp) = fnic->lport;
411         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
412
413         skb_queue_tail(&fnic->frame_queue, skb);
414         queue_work(fnic_event_queue, &fnic->frame_work);
415
416         return;
417 drop:
418         dev_kfree_skb_irq(skb);
419 }
420
421 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
422                                      struct cq_desc *cq_desc, u8 type,
423                                      u16 q_number, u16 completed_index,
424                                      void *opaque)
425 {
426         struct fnic *fnic = vnic_dev_priv(vdev);
427
428         vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
429                         VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
430                         NULL);
431         return 0;
432 }
433
434 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
435 {
436         unsigned int tot_rq_work_done = 0, cur_work_done;
437         unsigned int i;
438         int err;
439
440         for (i = 0; i < fnic->rq_count; i++) {
441                 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
442                                                 fnic_rq_cmpl_handler_cont,
443                                                 NULL);
444                 if (cur_work_done) {
445                         err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
446                         if (err)
447                                 shost_printk(KERN_ERR, fnic->lport->host,
448                                              "fnic_alloc_rq_frame cant alloc"
449                                              " frame\n");
450                 }
451                 tot_rq_work_done += cur_work_done;
452         }
453
454         return tot_rq_work_done;
455 }
456
457 /*
458  * This function is called once at init time to allocate and fill RQ
459  * buffers. Subsequently, it is called in the interrupt context after RQ
460  * buffer processing to replenish the buffers in the RQ
461  */
462 int fnic_alloc_rq_frame(struct vnic_rq *rq)
463 {
464         struct fnic *fnic = vnic_dev_priv(rq->vdev);
465         struct sk_buff *skb;
466         u16 len;
467         dma_addr_t pa;
468
469         len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
470         skb = dev_alloc_skb(len);
471         if (!skb) {
472                 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
473                              "Unable to allocate RQ sk_buff\n");
474                 return -ENOMEM;
475         }
476         skb_reset_mac_header(skb);
477         skb_reset_transport_header(skb);
478         skb_reset_network_header(skb);
479         skb_put(skb, len);
480         pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
481         fnic_queue_rq_desc(rq, skb, pa, len);
482         return 0;
483 }
484
485 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
486 {
487         struct fc_frame *fp = buf->os_buf;
488         struct fnic *fnic = vnic_dev_priv(rq->vdev);
489
490         pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
491                          PCI_DMA_FROMDEVICE);
492
493         dev_kfree_skb(fp_skb(fp));
494         buf->os_buf = NULL;
495 }
496
497 static inline int is_flogi_frame(struct fc_frame_header *fh)
498 {
499         return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI;
500 }
501
502 int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
503 {
504         struct vnic_wq *wq = &fnic->wq[0];
505         struct sk_buff *skb;
506         dma_addr_t pa;
507         struct ethhdr *eth_hdr;
508         struct vlan_ethhdr *vlan_hdr;
509         struct fcoe_hdr *fcoe_hdr;
510         struct fc_frame_header *fh;
511         u32 tot_len, eth_hdr_len;
512         int ret = 0;
513         unsigned long flags;
514
515         fh = fc_frame_header_get(fp);
516         skb = fp_skb(fp);
517
518         if (!fnic->vlan_hw_insert) {
519                 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
520                 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
521                 eth_hdr = (struct ethhdr *)vlan_hdr;
522                 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
523                 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
524                 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
525                 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
526         } else {
527                 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
528                 eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
529                 eth_hdr->h_proto = htons(ETH_P_FCOE);
530                 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
531         }
532
533         if (is_flogi_frame(fh)) {
534                 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
535                 memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
536         } else {
537                 if (fnic->fcoui_mode)
538                         fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
539                 else
540                         memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
541                 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
542         }
543
544         tot_len = skb->len;
545         BUG_ON(tot_len % 4);
546
547         memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
548         fcoe_hdr->fcoe_sof = fr_sof(fp);
549         if (FC_FCOE_VER)
550                 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
551
552         pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
553
554         spin_lock_irqsave(&fnic->wq_lock[0], flags);
555
556         if (!vnic_wq_desc_avail(wq)) {
557                 pci_unmap_single(fnic->pdev, pa,
558                                  tot_len, PCI_DMA_TODEVICE);
559                 ret = -1;
560                 goto fnic_send_frame_end;
561         }
562
563         fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
564                            fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
565 fnic_send_frame_end:
566         spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
567
568         if (ret)
569                 dev_kfree_skb_any(fp_skb(fp));
570
571         return ret;
572 }
573
574 /*
575  * fnic_send
576  * Routine to send a raw frame
577  */
578 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
579 {
580         struct fnic *fnic = lport_priv(lp);
581         struct fc_frame_header *fh;
582         int ret = 0;
583         enum fnic_state old_state;
584         unsigned long flags;
585         struct fc_frame *old_flogi = NULL;
586         struct fc_frame *old_flogi_resp = NULL;
587
588         if (fnic->in_remove) {
589                 dev_kfree_skb(fp_skb(fp));
590                 ret = -1;
591                 goto fnic_send_end;
592         }
593
594         fh = fc_frame_header_get(fp);
595         /* if not an Flogi frame, send it out, this is the common case */
596         if (!is_flogi_frame(fh))
597                 return fnic_send_frame(fnic, fp);
598
599         /* Flogi frame, now enter the state machine */
600
601         spin_lock_irqsave(&fnic->fnic_lock, flags);
602 again:
603         /* Get any old cached frames, free them after dropping lock */
604         old_flogi = fnic->flogi;
605         fnic->flogi = NULL;
606         old_flogi_resp = fnic->flogi_resp;
607         fnic->flogi_resp = NULL;
608
609         fnic->flogi_oxid = FC_XID_UNKNOWN;
610
611         old_state = fnic->state;
612         switch (old_state) {
613         case FNIC_IN_FC_MODE:
614         case FNIC_IN_ETH_TRANS_FC_MODE:
615         default:
616                 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
617                 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
618                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
619
620                 if (old_flogi) {
621                         dev_kfree_skb(fp_skb(old_flogi));
622                         old_flogi = NULL;
623                 }
624                 if (old_flogi_resp) {
625                         dev_kfree_skb(fp_skb(old_flogi_resp));
626                         old_flogi_resp = NULL;
627                 }
628
629                 ret = fnic_fw_reset_handler(fnic);
630
631                 spin_lock_irqsave(&fnic->fnic_lock, flags);
632                 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
633                         goto again;
634                 if (ret) {
635                         fnic->state = old_state;
636                         spin_unlock_irqrestore(&fnic->fnic_lock, flags);
637                         dev_kfree_skb(fp_skb(fp));
638                         goto fnic_send_end;
639                 }
640                 old_flogi = fnic->flogi;
641                 fnic->flogi = fp;
642                 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
643                 old_flogi_resp = fnic->flogi_resp;
644                 fnic->flogi_resp = NULL;
645                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
646                 break;
647
648         case FNIC_IN_FC_TRANS_ETH_MODE:
649                 /*
650                  * A reset is pending with the firmware. Store the flogi
651                  * and its oxid. The transition out of this state happens
652                  * only when Firmware completes the reset, either with
653                  * success or failed. If success, transition to
654                  * FNIC_IN_ETH_MODE, if fail, then transition to
655                  * FNIC_IN_FC_MODE
656                  */
657                 fnic->flogi = fp;
658                 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
659                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
660                 break;
661
662         case FNIC_IN_ETH_MODE:
663                 /*
664                  * The fw/hw is already in eth mode. Store the oxid,
665                  * and send the flogi frame out. The transition out of this
666                  * state happens only we receive flogi response from the
667                  * network, and the oxid matches the cached oxid when the
668                  * flogi frame was sent out. If they match, then we issue
669                  * a flogi_reg request and transition to state
670                  * FNIC_IN_ETH_TRANS_FC_MODE
671                  */
672                 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
673                 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
674                 ret = fnic_send_frame(fnic, fp);
675                 break;
676         }
677
678 fnic_send_end:
679         if (old_flogi)
680                 dev_kfree_skb(fp_skb(old_flogi));
681         if (old_flogi_resp)
682                 dev_kfree_skb(fp_skb(old_flogi_resp));
683         return ret;
684 }
685
686 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
687                                         struct cq_desc *cq_desc,
688                                         struct vnic_wq_buf *buf, void *opaque)
689 {
690         struct sk_buff *skb = buf->os_buf;
691         struct fc_frame *fp = (struct fc_frame *)skb;
692         struct fnic *fnic = vnic_dev_priv(wq->vdev);
693
694         pci_unmap_single(fnic->pdev, buf->dma_addr,
695                          buf->len, PCI_DMA_TODEVICE);
696         dev_kfree_skb_irq(fp_skb(fp));
697         buf->os_buf = NULL;
698 }
699
700 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
701                                      struct cq_desc *cq_desc, u8 type,
702                                      u16 q_number, u16 completed_index,
703                                      void *opaque)
704 {
705         struct fnic *fnic = vnic_dev_priv(vdev);
706         unsigned long flags;
707
708         spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
709         vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
710                         fnic_wq_complete_frame_send, NULL);
711         spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
712
713         return 0;
714 }
715
716 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
717 {
718         unsigned int wq_work_done = 0;
719         unsigned int i;
720
721         for (i = 0; i < fnic->raw_wq_count; i++) {
722                 wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
723                                                  work_to_do,
724                                                  fnic_wq_cmpl_handler_cont,
725                                                  NULL);
726         }
727
728         return wq_work_done;
729 }
730
731
732 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
733 {
734         struct fc_frame *fp = buf->os_buf;
735         struct fnic *fnic = vnic_dev_priv(wq->vdev);
736
737         pci_unmap_single(fnic->pdev, buf->dma_addr,
738                          buf->len, PCI_DMA_TODEVICE);
739
740         dev_kfree_skb(fp_skb(fp));
741         buf->os_buf = NULL;
742 }