Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
[linux-2.6] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19
20 static int be_mbox_db_ready_wait(void __iomem *db)
21 {
22         int cnt = 0, wait = 5;
23         u32 ready;
24
25         do {
26                 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
27                 if (ready)
28                         break;
29
30                 if (cnt > 200000) {
31                         printk(KERN_WARNING DRV_NAME
32                                 ": mbox_db poll timed out\n");
33                         return -1;
34                 }
35
36                 if (cnt > 50)
37                         wait = 200;
38                 cnt += wait;
39                 udelay(wait);
40         } while (true);
41
42         return 0;
43 }
44
45 /*
46  * Insert the mailbox address into the doorbell in two steps
47  */
48 static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
49 {
50         int status;
51         u16 compl_status, extd_status;
52         u32 val = 0;
53         void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
54         struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
55         struct be_mcc_mailbox *mbox = mbox_mem->va;
56         struct be_mcc_cq_entry *cqe = &mbox->cqe;
57
58         memset(cqe, 0, sizeof(*cqe));
59
60         val &= ~MPU_MAILBOX_DB_RDY_MASK;
61         val |= MPU_MAILBOX_DB_HI_MASK;
62         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
63         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
64         iowrite32(val, db);
65
66         /* wait for ready to be set */
67         status = be_mbox_db_ready_wait(db);
68         if (status != 0)
69                 return status;
70
71         val = 0;
72         val &= ~MPU_MAILBOX_DB_RDY_MASK;
73         val &= ~MPU_MAILBOX_DB_HI_MASK;
74         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
75         val |= (u32)(mbox_mem->dma >> 4) << 2;
76         iowrite32(val, db);
77
78         status = be_mbox_db_ready_wait(db);
79         if (status != 0)
80                 return status;
81
82         /* compl entry has been made now */
83         be_dws_le_to_cpu(cqe, sizeof(*cqe));
84         if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) {
85                 printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n");
86                 return -1;
87         }
88
89         compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
90                                 CQE_STATUS_COMPL_MASK;
91         if (compl_status != MCC_STATUS_SUCCESS) {
92                 extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
93                                 CQE_STATUS_EXTD_MASK;
94                 printk(KERN_WARNING DRV_NAME
95                         ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
96                         compl_status, extd_status);
97         }
98
99         return compl_status;
100 }
101
102 static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
103 {
104         u32 sem = ioread32(ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
105
106         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
107         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
108                 return -1;
109         else
110                 return 0;
111 }
112
113 static int be_POST_stage_poll(struct be_ctrl_info *ctrl, u16 poll_stage)
114 {
115         u16 stage, cnt, error;
116         for (cnt = 0; cnt < 5000; cnt++) {
117                 error = be_POST_stage_get(ctrl, &stage);
118                 if (error)
119                         return -1;
120
121                 if (stage == poll_stage)
122                         break;
123                 udelay(1000);
124         }
125         if (stage != poll_stage)
126                 return -1;
127         return 0;
128 }
129
130
131 int be_cmd_POST(struct be_ctrl_info *ctrl)
132 {
133         u16 stage, error;
134
135         error = be_POST_stage_get(ctrl, &stage);
136         if (error)
137                 goto err;
138
139         if (stage == POST_STAGE_ARMFW_RDY)
140                 return 0;
141
142         if (stage != POST_STAGE_AWAITING_HOST_RDY)
143                 goto err;
144
145         /* On awaiting host rdy, reset and again poll on awaiting host rdy */
146         iowrite32(POST_STAGE_BE_RESET, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
147         error = be_POST_stage_poll(ctrl, POST_STAGE_AWAITING_HOST_RDY);
148         if (error)
149                 goto err;
150
151         /* Now kickoff POST and poll on armfw ready */
152         iowrite32(POST_STAGE_HOST_RDY, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
153         error = be_POST_stage_poll(ctrl, POST_STAGE_ARMFW_RDY);
154         if (error)
155                 goto err;
156
157         return 0;
158 err:
159         printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage);
160         return -1;
161 }
162
163 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
164 {
165         return wrb->payload.embedded_payload;
166 }
167
168 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
169 {
170         return &wrb->payload.sgl[0];
171 }
172
173 /* Don't touch the hdr after it's prepared */
174 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
175                                 bool embedded, u8 sge_cnt)
176 {
177         if (embedded)
178                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
179         else
180                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
181                                 MCC_WRB_SGE_CNT_SHIFT;
182         wrb->payload_length = payload_len;
183         be_dws_cpu_to_le(wrb, 20);
184 }
185
186 /* Don't touch the hdr after it's prepared */
187 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
188                                 u8 subsystem, u8 opcode, int cmd_len)
189 {
190         req_hdr->opcode = opcode;
191         req_hdr->subsystem = subsystem;
192         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
193 }
194
195 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
196                         struct be_dma_mem *mem)
197 {
198         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
199         u64 dma = (u64)mem->dma;
200
201         for (i = 0; i < buf_pages; i++) {
202                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
203                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
204                 dma += PAGE_SIZE_4K;
205         }
206 }
207
208 /* Converts interrupt delay in microseconds to multiplier value */
209 static u32 eq_delay_to_mult(u32 usec_delay)
210 {
211 #define MAX_INTR_RATE                   651042
212         const u32 round = 10;
213         u32 multiplier;
214
215         if (usec_delay == 0)
216                 multiplier = 0;
217         else {
218                 u32 interrupt_rate = 1000000 / usec_delay;
219                 /* Max delay, corresponding to the lowest interrupt rate */
220                 if (interrupt_rate == 0)
221                         multiplier = 1023;
222                 else {
223                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
224                         multiplier /= interrupt_rate;
225                         /* Round the multiplier to the closest value.*/
226                         multiplier = (multiplier + round/2) / round;
227                         multiplier = min(multiplier, (u32)1023);
228                 }
229         }
230         return multiplier;
231 }
232
233 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
234 {
235         return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
236 }
237
238 int be_cmd_eq_create(struct be_ctrl_info *ctrl,
239                 struct be_queue_info *eq, int eq_delay)
240 {
241         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
242         struct be_cmd_req_eq_create *req = embedded_payload(wrb);
243         struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
244         struct be_dma_mem *q_mem = &eq->dma_mem;
245         int status;
246
247         spin_lock(&ctrl->cmd_lock);
248         memset(wrb, 0, sizeof(*wrb));
249
250         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
251
252         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
253                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
254
255         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
256
257         AMAP_SET_BITS(struct amap_eq_context, func, req->context,
258                         ctrl->pci_func);
259         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
260         /* 4byte eqe*/
261         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
262         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
263                         __ilog2_u32(eq->len/256));
264         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
265                         eq_delay_to_mult(eq_delay));
266         be_dws_cpu_to_le(req->context, sizeof(req->context));
267
268         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
269
270         status = be_mbox_db_ring(ctrl);
271         if (!status) {
272                 eq->id = le16_to_cpu(resp->eq_id);
273                 eq->created = true;
274         }
275         spin_unlock(&ctrl->cmd_lock);
276         return status;
277 }
278
279 int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
280                         u8 type, bool permanent, u32 if_handle)
281 {
282         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
283         struct be_cmd_req_mac_query *req = embedded_payload(wrb);
284         struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
285         int status;
286
287         spin_lock(&ctrl->cmd_lock);
288         memset(wrb, 0, sizeof(*wrb));
289
290         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
291
292         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
293                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
294
295         req->type = type;
296         if (permanent) {
297                 req->permanent = 1;
298         } else {
299                 req->if_id = cpu_to_le16((u16)if_handle);
300                 req->permanent = 0;
301         }
302
303         status = be_mbox_db_ring(ctrl);
304         if (!status)
305                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
306
307         spin_unlock(&ctrl->cmd_lock);
308         return status;
309 }
310
311 int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
312                 u32 if_id, u32 *pmac_id)
313 {
314         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
315         struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
316         int status;
317
318         spin_lock(&ctrl->cmd_lock);
319         memset(wrb, 0, sizeof(*wrb));
320
321         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
322
323         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
324                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
325
326         req->if_id = cpu_to_le32(if_id);
327         memcpy(req->mac_address, mac_addr, ETH_ALEN);
328
329         status = be_mbox_db_ring(ctrl);
330         if (!status) {
331                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
332                 *pmac_id = le32_to_cpu(resp->pmac_id);
333         }
334
335         spin_unlock(&ctrl->cmd_lock);
336         return status;
337 }
338
339 int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
340 {
341         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
342         struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
343         int status;
344
345         spin_lock(&ctrl->cmd_lock);
346         memset(wrb, 0, sizeof(*wrb));
347
348         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
349
350         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
351                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
352
353         req->if_id = cpu_to_le32(if_id);
354         req->pmac_id = cpu_to_le32(pmac_id);
355
356         status = be_mbox_db_ring(ctrl);
357         spin_unlock(&ctrl->cmd_lock);
358
359         return status;
360 }
361
362 int be_cmd_cq_create(struct be_ctrl_info *ctrl,
363                 struct be_queue_info *cq, struct be_queue_info *eq,
364                 bool sol_evts, bool no_delay, int coalesce_wm)
365 {
366         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
367         struct be_cmd_req_cq_create *req = embedded_payload(wrb);
368         struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
369         struct be_dma_mem *q_mem = &cq->dma_mem;
370         void *ctxt = &req->context;
371         int status;
372
373         spin_lock(&ctrl->cmd_lock);
374         memset(wrb, 0, sizeof(*wrb));
375
376         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
377
378         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
379                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
380
381         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
382
383         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
384         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
385         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
386                         __ilog2_u32(cq->len/256));
387         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
388         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
389         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
390         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
391         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0);
392         AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
393         be_dws_cpu_to_le(ctxt, sizeof(req->context));
394
395         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
396
397         status = be_mbox_db_ring(ctrl);
398         if (!status) {
399                 cq->id = le16_to_cpu(resp->cq_id);
400                 cq->created = true;
401         }
402         spin_unlock(&ctrl->cmd_lock);
403
404         return status;
405 }
406
407 int be_cmd_txq_create(struct be_ctrl_info *ctrl,
408                         struct be_queue_info *txq,
409                         struct be_queue_info *cq)
410 {
411         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
412         struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb);
413         struct be_dma_mem *q_mem = &txq->dma_mem;
414         void *ctxt = &req->context;
415         int status;
416         u32 len_encoded;
417
418         spin_lock(&ctrl->cmd_lock);
419         memset(wrb, 0, sizeof(*wrb));
420
421         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
422
423         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
424                 sizeof(*req));
425
426         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
427         req->ulp_num = BE_ULP1_NUM;
428         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
429
430         len_encoded = fls(txq->len); /* log2(len) + 1 */
431         if (len_encoded == 16)
432                 len_encoded = 0;
433         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded);
434         AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
435                         ctrl->pci_func);
436         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
437         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
438
439         be_dws_cpu_to_le(ctxt, sizeof(req->context));
440
441         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
442
443         status = be_mbox_db_ring(ctrl);
444         if (!status) {
445                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
446                 txq->id = le16_to_cpu(resp->cid);
447                 txq->created = true;
448         }
449         spin_unlock(&ctrl->cmd_lock);
450
451         return status;
452 }
453
454 int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
455                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
456                 u16 max_frame_size, u32 if_id, u32 rss)
457 {
458         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
459         struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb);
460         struct be_dma_mem *q_mem = &rxq->dma_mem;
461         int status;
462
463         spin_lock(&ctrl->cmd_lock);
464         memset(wrb, 0, sizeof(*wrb));
465
466         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
467
468         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
469                 sizeof(*req));
470
471         req->cq_id = cpu_to_le16(cq_id);
472         req->frag_size = fls(frag_size) - 1;
473         req->num_pages = 2;
474         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
475         req->interface_id = cpu_to_le32(if_id);
476         req->max_frame_size = cpu_to_le16(max_frame_size);
477         req->rss_queue = cpu_to_le32(rss);
478
479         status = be_mbox_db_ring(ctrl);
480         if (!status) {
481                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
482                 rxq->id = le16_to_cpu(resp->id);
483                 rxq->created = true;
484         }
485         spin_unlock(&ctrl->cmd_lock);
486
487         return status;
488 }
489
490 /* Generic destroyer function for all types of queues */
491 int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
492                 int queue_type)
493 {
494         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
495         struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
496         u8 subsys = 0, opcode = 0;
497         int status;
498
499         spin_lock(&ctrl->cmd_lock);
500
501         memset(wrb, 0, sizeof(*wrb));
502         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
503
504         switch (queue_type) {
505         case QTYPE_EQ:
506                 subsys = CMD_SUBSYSTEM_COMMON;
507                 opcode = OPCODE_COMMON_EQ_DESTROY;
508                 break;
509         case QTYPE_CQ:
510                 subsys = CMD_SUBSYSTEM_COMMON;
511                 opcode = OPCODE_COMMON_CQ_DESTROY;
512                 break;
513         case QTYPE_TXQ:
514                 subsys = CMD_SUBSYSTEM_ETH;
515                 opcode = OPCODE_ETH_TX_DESTROY;
516                 break;
517         case QTYPE_RXQ:
518                 subsys = CMD_SUBSYSTEM_ETH;
519                 opcode = OPCODE_ETH_RX_DESTROY;
520                 break;
521         default:
522                 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
523                 status = -1;
524                 goto err;
525         }
526         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
527         req->id = cpu_to_le16(q->id);
528
529         status = be_mbox_db_ring(ctrl);
530 err:
531         spin_unlock(&ctrl->cmd_lock);
532
533         return status;
534 }
535
536 /* Create an rx filtering policy configuration on an i/f */
537 int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
538                 bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
539 {
540         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
541         struct be_cmd_req_if_create *req = embedded_payload(wrb);
542         int status;
543
544         spin_lock(&ctrl->cmd_lock);
545         memset(wrb, 0, sizeof(*wrb));
546
547         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
548
549         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
550                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
551
552         req->capability_flags = cpu_to_le32(flags);
553         req->enable_flags = cpu_to_le32(flags);
554         if (!pmac_invalid)
555                 memcpy(req->mac_addr, mac, ETH_ALEN);
556
557         status = be_mbox_db_ring(ctrl);
558         if (!status) {
559                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
560                 *if_handle = le32_to_cpu(resp->interface_id);
561                 if (!pmac_invalid)
562                         *pmac_id = le32_to_cpu(resp->pmac_id);
563         }
564
565         spin_unlock(&ctrl->cmd_lock);
566         return status;
567 }
568
569 int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
570 {
571         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
572         struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
573         int status;
574
575         spin_lock(&ctrl->cmd_lock);
576         memset(wrb, 0, sizeof(*wrb));
577
578         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
579
580         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
581                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
582
583         req->interface_id = cpu_to_le32(interface_id);
584         status = be_mbox_db_ring(ctrl);
585
586         spin_unlock(&ctrl->cmd_lock);
587
588         return status;
589 }
590
591 /* Get stats is a non embedded command: the request is not embedded inside
592  * WRB but is a separate dma memory block
593  */
594 int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
595 {
596         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
597         struct be_cmd_req_get_stats *req = nonemb_cmd->va;
598         struct be_sge *sge = nonembedded_sgl(wrb);
599         int status;
600
601         spin_lock(&ctrl->cmd_lock);
602         memset(wrb, 0, sizeof(*wrb));
603
604         memset(req, 0, sizeof(*req));
605
606         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
607
608         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
609                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
610         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
611         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
612         sge->len = cpu_to_le32(nonemb_cmd->size);
613
614         status = be_mbox_db_ring(ctrl);
615         if (!status) {
616                 struct be_cmd_resp_get_stats *resp = nonemb_cmd->va;
617                 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
618         }
619
620         spin_unlock(&ctrl->cmd_lock);
621         return status;
622 }
623
624 int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
625                         struct be_link_info *link)
626 {
627         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
628         struct be_cmd_req_link_status *req = embedded_payload(wrb);
629         int status;
630
631         spin_lock(&ctrl->cmd_lock);
632         memset(wrb, 0, sizeof(*wrb));
633
634         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
635
636         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
637                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
638
639         status = be_mbox_db_ring(ctrl);
640         if (!status) {
641                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
642                 link->speed = resp->mac_speed;
643                 link->duplex = resp->mac_duplex;
644                 link->fault = resp->mac_fault;
645         } else {
646                 link->speed = PHY_LINK_SPEED_ZERO;
647         }
648
649         spin_unlock(&ctrl->cmd_lock);
650         return status;
651 }
652
653 int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
654 {
655         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
656         struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
657         int status;
658
659         spin_lock(&ctrl->cmd_lock);
660         memset(wrb, 0, sizeof(*wrb));
661
662         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
663
664         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
665                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
666
667         status = be_mbox_db_ring(ctrl);
668         if (!status) {
669                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
670                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
671         }
672
673         spin_unlock(&ctrl->cmd_lock);
674         return status;
675 }
676
677 /* set the EQ delay interval of an EQ to specified value */
678 int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
679 {
680         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
681         struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
682         int status;
683
684         spin_lock(&ctrl->cmd_lock);
685         memset(wrb, 0, sizeof(*wrb));
686
687         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
688
689         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
690                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
691
692         req->num_eq = cpu_to_le32(1);
693         req->delay[0].eq_id = cpu_to_le32(eq_id);
694         req->delay[0].phase = 0;
695         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
696
697         status = be_mbox_db_ring(ctrl);
698
699         spin_unlock(&ctrl->cmd_lock);
700         return status;
701 }
702
703 int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
704                         u32 num, bool untagged, bool promiscuous)
705 {
706         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
707         struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
708         int status;
709
710         spin_lock(&ctrl->cmd_lock);
711         memset(wrb, 0, sizeof(*wrb));
712
713         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
714
715         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
716                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
717
718         req->interface_id = if_id;
719         req->promiscuous = promiscuous;
720         req->untagged = untagged;
721         req->num_vlan = num;
722         if (!promiscuous) {
723                 memcpy(req->normal_vlan, vtag_array,
724                         req->num_vlan * sizeof(vtag_array[0]));
725         }
726
727         status = be_mbox_db_ring(ctrl);
728
729         spin_unlock(&ctrl->cmd_lock);
730         return status;
731 }
732
733 int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
734 {
735         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
736         struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
737         int status;
738
739         spin_lock(&ctrl->cmd_lock);
740         memset(wrb, 0, sizeof(*wrb));
741
742         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
743
744         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
745                 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
746
747         if (port_num)
748                 req->port1_promiscuous = en;
749         else
750                 req->port0_promiscuous = en;
751
752         status = be_mbox_db_ring(ctrl);
753
754         spin_unlock(&ctrl->cmd_lock);
755         return status;
756 }
757
758 int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
759                         u32 num, bool promiscuous)
760 {
761         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
762         struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
763         int status;
764
765         spin_lock(&ctrl->cmd_lock);
766         memset(wrb, 0, sizeof(*wrb));
767
768         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
769
770         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
771                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
772
773         req->interface_id = if_id;
774         req->promiscuous = promiscuous;
775         if (!promiscuous) {
776                 req->num_mac = cpu_to_le16(num);
777                 if (num)
778                         memcpy(req->mac, mac_table, ETH_ALEN * num);
779         }
780
781         status = be_mbox_db_ring(ctrl);
782
783         spin_unlock(&ctrl->cmd_lock);
784         return status;
785 }
786
787 int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
788 {
789         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
790         struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
791         int status;
792
793         spin_lock(&ctrl->cmd_lock);
794
795         memset(wrb, 0, sizeof(*wrb));
796
797         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
798
799         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
800                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
801
802         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
803         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
804
805         status = be_mbox_db_ring(ctrl);
806
807         spin_unlock(&ctrl->cmd_lock);
808         return status;
809 }
810
811 int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
812 {
813         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
814         struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
815         int status;
816
817         spin_lock(&ctrl->cmd_lock);
818
819         memset(wrb, 0, sizeof(*wrb));
820
821         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
822
823         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
824                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
825
826         status = be_mbox_db_ring(ctrl);
827         if (!status) {
828                 struct be_cmd_resp_get_flow_control *resp =
829                                                 embedded_payload(wrb);
830                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
831                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
832         }
833
834         spin_unlock(&ctrl->cmd_lock);
835         return status;
836 }
837
838 int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
839 {
840         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
841         struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
842         int status;
843
844         spin_lock(&ctrl->cmd_lock);
845
846         memset(wrb, 0, sizeof(*wrb));
847
848         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
849
850         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
851                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
852
853         status = be_mbox_db_ring(ctrl);
854         if (!status) {
855                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
856                 *port_num = le32_to_cpu(resp->phys_port);
857         }
858
859         spin_unlock(&ctrl->cmd_lock);
860         return status;
861 }