Merge branch 'i2c-for-linus' of git://jdelvare.pck.nerim.net/jdelvare-2.6
[linux-2.6] / drivers / scsi / cxgb3i / cxgb3i_ddp.c
1 /*
2  * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
3  *
4  * Copyright (c) 2008 Chelsio Communications, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Karen Xie (kxie@chelsio.com)
11  */
12
13 #include <linux/skbuff.h>
14 #include <linux/scatterlist.h>
15
16 /* from cxgb3 LLD */
17 #include "common.h"
18 #include "t3_cpl.h"
19 #include "t3cdev.h"
20 #include "cxgb3_ctl_defs.h"
21 #include "cxgb3_offload.h"
22 #include "firmware_exports.h"
23
24 #include "cxgb3i_ddp.h"
25
26 #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
27 #define ddp_log_warn(fmt...)  printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
28 #define ddp_log_info(fmt...)  printk(KERN_INFO "cxgb3i_ddp: " fmt)
29
30 #ifdef __DEBUG_CXGB3I_DDP__
31 #define ddp_log_debug(fmt, args...) \
32         printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
33 #else
34 #define ddp_log_debug(fmt...)
35 #endif
36
37 /*
38  * iSCSI Direct Data Placement
39  *
40  * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
41  * pre-posted final destination host-memory buffers based on the Initiator
42  * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
43  *
44  * The host memory address is programmed into h/w in the format of pagepod
45  * entries.
46  * The location of the pagepod entry is encoded into ddp tag which is used or
47  * is the base for ITT/TTT.
48  */
49
50 #define DDP_PGIDX_MAX           4
51 #define DDP_THRESHOLD   2048
52 static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
53 static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
54 static unsigned char page_idx = DDP_PGIDX_MAX;
55
56 /*
57  * functions to program the pagepod in h/w
58  */
59 static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
60 {
61         struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
62
63         req->wr.wr_lo = 0;
64         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
65         req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
66                                    V_ULPTX_CMD(ULP_MEM_WRITE));
67         req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
68                          V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
69 }
70
71 static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
72                        unsigned int idx, unsigned int npods,
73                        struct cxgb3i_gather_list *gl)
74 {
75         unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
76         int i;
77
78         for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
79                 struct sk_buff *skb = ddp->gl_skb[idx];
80                 struct pagepod *ppod;
81                 int j, pidx;
82
83                 /* hold on to the skb until we clear the ddp mapping */
84                 skb_get(skb);
85
86                 ulp_mem_io_set_hdr(skb, pm_addr);
87                 ppod = (struct pagepod *)
88                        (skb->head + sizeof(struct ulp_mem_io));
89                 memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
90                 for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
91                         ppod->addr[j] = pidx < gl->nelem ?
92                                      cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
93
94                 skb->priority = CPL_PRIORITY_CONTROL;
95                 cxgb3_ofld_send(ddp->tdev, skb);
96         }
97         return 0;
98 }
99
100 static void clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int tag,
101                          unsigned int idx, unsigned int npods)
102 {
103         unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
104         int i;
105
106         for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
107                 struct sk_buff *skb = ddp->gl_skb[idx];
108
109                 if (!skb) {
110                         ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
111                                         tag, idx, i, npods);
112                         continue;
113                 }
114                 ddp->gl_skb[idx] = NULL;
115                 memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
116                 ulp_mem_io_set_hdr(skb, pm_addr);
117                 skb->priority = CPL_PRIORITY_CONTROL;
118                 cxgb3_ofld_send(ddp->tdev, skb);
119         }
120 }
121
122 static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
123                                           int start, int max, int count,
124                                           struct cxgb3i_gather_list *gl)
125 {
126         unsigned int i, j;
127
128         spin_lock(&ddp->map_lock);
129         for (i = start; i <= max;) {
130                 for (j = 0; j < count; j++) {
131                         if (ddp->gl_map[i + j])
132                                 break;
133                 }
134                 if (j == count) {
135                         for (j = 0; j < count; j++)
136                                 ddp->gl_map[i + j] = gl;
137                         spin_unlock(&ddp->map_lock);
138                         return i;
139                 }
140                 i += j + 1;
141         }
142         spin_unlock(&ddp->map_lock);
143         return -EBUSY;
144 }
145
146 static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
147                                       int start, int count)
148 {
149         spin_lock(&ddp->map_lock);
150         memset(&ddp->gl_map[start], 0,
151                count * sizeof(struct cxgb3i_gather_list *));
152         spin_unlock(&ddp->map_lock);
153 }
154
155 static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
156                                    int idx, int count)
157 {
158         int i;
159
160         for (i = 0; i < count; i++, idx++)
161                 if (ddp->gl_skb[idx]) {
162                         kfree_skb(ddp->gl_skb[idx]);
163                         ddp->gl_skb[idx] = NULL;
164                 }
165 }
166
167 static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
168                                    int count, gfp_t gfp)
169 {
170         int i;
171
172         for (i = 0; i < count; i++) {
173                 struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
174                                                 PPOD_SIZE, gfp);
175                 if (skb) {
176                         ddp->gl_skb[idx + i] = skb;
177                         skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
178                 } else {
179                         ddp_free_gl_skb(ddp, idx, i);
180                         return -ENOMEM;
181                 }
182         }
183         return 0;
184 }
185
186 /**
187  * cxgb3i_ddp_find_page_index - return ddp page index for a given page size
188  * @pgsz: page size
189  * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
190  */
191 int cxgb3i_ddp_find_page_index(unsigned long pgsz)
192 {
193         int i;
194
195         for (i = 0; i < DDP_PGIDX_MAX; i++) {
196                 if (pgsz == (1UL << ddp_page_shift[i]))
197                         return i;
198         }
199         ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
200         return DDP_PGIDX_MAX;
201 }
202
203 static inline void ddp_gl_unmap(struct pci_dev *pdev,
204                                 struct cxgb3i_gather_list *gl)
205 {
206         int i;
207
208         for (i = 0; i < gl->nelem; i++)
209                 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
210                                PCI_DMA_FROMDEVICE);
211 }
212
213 static inline int ddp_gl_map(struct pci_dev *pdev,
214                              struct cxgb3i_gather_list *gl)
215 {
216         int i;
217
218         for (i = 0; i < gl->nelem; i++) {
219                 gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
220                                                 PAGE_SIZE,
221                                                 PCI_DMA_FROMDEVICE);
222                 if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
223                         goto unmap;
224         }
225
226         return i;
227
228 unmap:
229         if (i) {
230                 unsigned int nelem = gl->nelem;
231
232                 gl->nelem = i;
233                 ddp_gl_unmap(pdev, gl);
234                 gl->nelem = nelem;
235         }
236         return -ENOMEM;
237 }
238
239 /**
240  * cxgb3i_ddp_make_gl - build ddp page buffer list
241  * @xferlen: total buffer length
242  * @sgl: page buffer scatter-gather list
243  * @sgcnt: # of page buffers
244  * @pdev: pci_dev, used for pci map
245  * @gfp: allocation mode
246  *
247  * construct a ddp page buffer list from the scsi scattergather list.
248  * coalesce buffers as much as possible, and obtain dma addresses for
249  * each page.
250  *
251  * Return the cxgb3i_gather_list constructed from the page buffers if the
252  * memory can be used for ddp. Return NULL otherwise.
253  */
254 struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
255                                               struct scatterlist *sgl,
256                                               unsigned int sgcnt,
257                                               struct pci_dev *pdev,
258                                               gfp_t gfp)
259 {
260         struct cxgb3i_gather_list *gl;
261         struct scatterlist *sg = sgl;
262         struct page *sgpage = sg_page(sg);
263         unsigned int sglen = sg->length;
264         unsigned int sgoffset = sg->offset;
265         unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
266                               PAGE_SHIFT;
267         int i = 1, j = 0;
268
269         if (xferlen < DDP_THRESHOLD) {
270                 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
271                               xferlen, DDP_THRESHOLD);
272                 return NULL;
273         }
274
275         gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
276                      npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
277                      gfp);
278         if (!gl)
279                 return NULL;
280
281         gl->pages = (struct page **)&gl->phys_addr[npages];
282         gl->length = xferlen;
283         gl->offset = sgoffset;
284         gl->pages[0] = sgpage;
285
286         sg = sg_next(sg);
287         while (sg) {
288                 struct page *page = sg_page(sg);
289
290                 if (sgpage == page && sg->offset == sgoffset + sglen)
291                         sglen += sg->length;
292                 else {
293                         /* make sure the sgl is fit for ddp:
294                          * each has the same page size, and
295                          * all of the middle pages are used completely
296                          */
297                         if ((j && sgoffset) ||
298                             ((i != sgcnt - 1) &&
299                              ((sglen + sgoffset) & ~PAGE_MASK)))
300                                 goto error_out;
301
302                         j++;
303                         if (j == gl->nelem || sg->offset)
304                                 goto error_out;
305                         gl->pages[j] = page;
306                         sglen = sg->length;
307                         sgoffset = sg->offset;
308                         sgpage = page;
309                 }
310                 i++;
311                 sg = sg_next(sg);
312         }
313         gl->nelem = ++j;
314
315         if (ddp_gl_map(pdev, gl) < 0)
316                 goto error_out;
317
318         return gl;
319
320 error_out:
321         kfree(gl);
322         return NULL;
323 }
324
325 /**
326  * cxgb3i_ddp_release_gl - release a page buffer list
327  * @gl: a ddp page buffer list
328  * @pdev: pci_dev used for pci_unmap
329  * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
330  */
331 void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
332                            struct pci_dev *pdev)
333 {
334         ddp_gl_unmap(pdev, gl);
335         kfree(gl);
336 }
337
338 /**
339  * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
340  * @tdev: t3cdev adapter
341  * @tid: connection id
342  * @tformat: tag format
343  * @tagp: contains s/w tag initially, will be updated with ddp/hw tag
344  * @gl: the page momory list
345  * @gfp: allocation mode
346  *
347  * ddp setup for a given page buffer list and construct the ddp tag.
348  * return 0 if success, < 0 otherwise.
349  */
350 int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
351                            struct cxgb3i_tag_format *tformat, u32 *tagp,
352                            struct cxgb3i_gather_list *gl, gfp_t gfp)
353 {
354         struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
355         struct pagepod_hdr hdr;
356         unsigned int npods;
357         int idx = -1, idx_max;
358         int err = -ENOMEM;
359         u32 sw_tag = *tagp;
360         u32 tag;
361
362         if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
363                 gl->length < DDP_THRESHOLD) {
364                 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
365                               page_idx, gl->length, DDP_THRESHOLD);
366                 return -EINVAL;
367         }
368
369         npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
370         idx_max = ddp->nppods - npods + 1;
371
372         if (ddp->idx_last == ddp->nppods)
373                 idx = ddp_find_unused_entries(ddp, 0, idx_max, npods, gl);
374         else {
375                 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
376                                               idx_max, npods, gl);
377                 if (idx < 0 && ddp->idx_last >= npods)
378                         idx = ddp_find_unused_entries(ddp, 0,
379                                                       ddp->idx_last - npods + 1,
380                                                       npods, gl);
381         }
382         if (idx < 0) {
383                 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
384                               gl->length, gl->nelem, npods);
385                 return idx;
386         }
387
388         err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
389         if (err < 0)
390                 goto unmark_entries;
391
392         tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
393         tag |= idx << PPOD_IDX_SHIFT;
394
395         hdr.rsvd = 0;
396         hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
397         hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
398         hdr.maxoffset = htonl(gl->length);
399         hdr.pgoffset = htonl(gl->offset);
400
401         err = set_ddp_map(ddp, &hdr, idx, npods, gl);
402         if (err < 0)
403                 goto free_gl_skb;
404
405         ddp->idx_last = idx;
406         ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
407                       gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
408                       idx, npods);
409         *tagp = tag;
410         return 0;
411
412 free_gl_skb:
413         ddp_free_gl_skb(ddp, idx, npods);
414 unmark_entries:
415         ddp_unmark_entries(ddp, idx, npods);
416         return err;
417 }
418
419 /**
420  * cxgb3i_ddp_tag_release - release a ddp tag
421  * @tdev: t3cdev adapter
422  * @tag: ddp tag
423  * ddp cleanup for a given ddp tag and release all the resources held
424  */
425 void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
426 {
427         struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
428         u32 idx;
429
430         if (!ddp) {
431                 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
432                 return;
433         }
434
435         idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
436         if (idx < ddp->nppods) {
437                 struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
438                 unsigned int npods;
439
440                 if (!gl || !gl->nelem) {
441                         ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n",
442                                       tag, idx, gl, gl ? gl->nelem : 0);
443                         return;
444                 }
445                 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
446                 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
447                               tag, idx, npods);
448                 clear_ddp_map(ddp, tag, idx, npods);
449                 ddp_unmark_entries(ddp, idx, npods);
450                 cxgb3i_ddp_release_gl(gl, ddp->pdev);
451         } else
452                 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
453                               tag, idx, ddp->nppods);
454 }
455
456 static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
457                             int reply)
458 {
459         struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
460                                         GFP_KERNEL);
461         struct cpl_set_tcb_field *req;
462         u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
463
464         if (!skb)
465                 return -ENOMEM;
466
467         /* set up ulp submode and page size */
468         req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
469         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
470         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
471         req->reply = V_NO_REPLY(reply ? 0 : 1);
472         req->cpu_idx = 0;
473         req->word = htons(31);
474         req->mask = cpu_to_be64(0xF0000000);
475         req->val = cpu_to_be64(val << 28);
476         skb->priority = CPL_PRIORITY_CONTROL;
477
478         cxgb3_ofld_send(tdev, skb);
479         return 0;
480 }
481
482 /**
483  * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
484  * @tdev: t3cdev adapter
485  * @tid: connection id
486  * @reply: request reply from h/w
487  * set up the ddp page size based on the host PAGE_SIZE for a connection
488  * identified by tid
489  */
490 int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
491                                     int reply)
492 {
493         return setup_conn_pgidx(tdev, tid, page_idx, reply);
494 }
495
496 /**
497  * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
498  * @tdev: t3cdev adapter
499  * @tid: connection id
500  * @reply: request reply from h/w
501  * @pgsz: ddp page size
502  * set up the ddp page size for a connection identified by tid
503  */
504 int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
505                                 int reply, unsigned long pgsz)
506 {
507         int pgidx = cxgb3i_ddp_find_page_index(pgsz);
508
509         return setup_conn_pgidx(tdev, tid, pgidx, reply);
510 }
511
512 /**
513  * cxgb3i_setup_conn_digest - setup conn. digest setting
514  * @tdev: t3cdev adapter
515  * @tid: connection id
516  * @hcrc: header digest enabled
517  * @dcrc: data digest enabled
518  * @reply: request reply from h/w
519  * set up the iscsi digest settings for a connection identified by tid
520  */
521 int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
522                              int hcrc, int dcrc, int reply)
523 {
524         struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
525                                         GFP_KERNEL);
526         struct cpl_set_tcb_field *req;
527         u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
528
529         if (!skb)
530                 return -ENOMEM;
531
532         /* set up ulp submode and page size */
533         req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
534         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
535         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
536         req->reply = V_NO_REPLY(reply ? 0 : 1);
537         req->cpu_idx = 0;
538         req->word = htons(31);
539         req->mask = cpu_to_be64(0x0F000000);
540         req->val = cpu_to_be64(val << 24);
541         skb->priority = CPL_PRIORITY_CONTROL;
542
543         cxgb3_ofld_send(tdev, skb);
544         return 0;
545 }
546
547
548 /**
549  * cxgb3i_adapter_ddp_info - read the adapter's ddp information
550  * @tdev: t3cdev adapter
551  * @tformat: tag format
552  * @txsz: max tx pdu payload size, filled in by this func.
553  * @rxsz: max rx pdu payload size, filled in by this func.
554  * setup the tag format for a given iscsi entity
555  */
556 int cxgb3i_adapter_ddp_info(struct t3cdev *tdev,
557                             struct cxgb3i_tag_format *tformat,
558                             unsigned int *txsz, unsigned int *rxsz)
559 {
560         struct cxgb3i_ddp_info *ddp;
561         unsigned char idx_bits;
562
563         if (!tformat)
564                 return -EINVAL;
565
566         if (!tdev->ulp_iscsi)
567                 return -EINVAL;
568
569         ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
570
571         idx_bits = 32 - tformat->sw_bits;
572         tformat->rsvd_bits = ddp->idx_bits;
573         tformat->rsvd_shift = PPOD_IDX_SHIFT;
574         tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
575
576         ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
577                       tformat->sw_bits, tformat->rsvd_bits,
578                       tformat->rsvd_shift, tformat->rsvd_mask);
579
580         *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
581                         ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
582         *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
583                         ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
584         ddp_log_info("max payload size: %u/%u, %u/%u.\n",
585                      *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
586         return 0;
587 }
588
589 /**
590  * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource
591  * @tdev: t3cdev adapter
592  * release all the resource held by the ddp pagepod manager for a given
593  * adapter if needed
594  */
595 void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
596 {
597         int i = 0;
598         struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
599
600         ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp);
601
602         if (ddp) {
603                 tdev->ulp_iscsi = NULL;
604                 while (i < ddp->nppods) {
605                         struct cxgb3i_gather_list *gl = ddp->gl_map[i];
606                         if (gl) {
607                                 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
608                                                 >> PPOD_PAGES_SHIFT;
609                                 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
610                                                 tdev, i, npods);
611                                 kfree(gl);
612                                 ddp_free_gl_skb(ddp, i, npods);
613                                 i += npods;
614                         } else
615                                 i++;
616                 }
617                 cxgb3i_free_big_mem(ddp);
618         }
619 }
620
621 /**
622  * ddp_init - initialize the cxgb3 adapter's ddp resource
623  * @tdev: t3cdev adapter
624  * initialize the ddp pagepod manager for a given adapter
625  */
626 static void ddp_init(struct t3cdev *tdev)
627 {
628         struct cxgb3i_ddp_info *ddp;
629         struct ulp_iscsi_info uinfo;
630         unsigned int ppmax, bits;
631         int i, err;
632
633         if (tdev->ulp_iscsi) {
634                 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
635                                 tdev, tdev->ulp_iscsi);
636                 return;
637         }
638
639         err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
640         if (err < 0) {
641                 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
642                                  tdev->name, err);
643                 return;
644         }
645
646         ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
647         bits = __ilog2_u32(ppmax) + 1;
648         if (bits > PPOD_IDX_MAX_SIZE)
649                 bits = PPOD_IDX_MAX_SIZE;
650         ppmax = (1 << (bits - 1)) - 1;
651
652         ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
653                                    ppmax *
654                                         (sizeof(struct cxgb3i_gather_list *) +
655                                         sizeof(struct sk_buff *)),
656                                    GFP_KERNEL);
657         if (!ddp) {
658                 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
659                              tdev->name, ppmax);
660                 return;
661         }
662         ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
663         ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
664                                           ppmax *
665                                           sizeof(struct cxgb3i_gather_list *));
666         spin_lock_init(&ddp->map_lock);
667
668         ddp->tdev = tdev;
669         ddp->pdev = uinfo.pdev;
670         ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
671         ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
672         ddp->llimit = uinfo.llimit;
673         ddp->ulimit = uinfo.ulimit;
674         ddp->nppods = ppmax;
675         ddp->idx_last = ppmax;
676         ddp->idx_bits = bits;
677         ddp->idx_mask = (1 << bits) - 1;
678         ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
679
680         uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
681         for (i = 0; i < DDP_PGIDX_MAX; i++)
682                 uinfo.pgsz_factor[i] = ddp_page_order[i];
683         uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
684
685         err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
686         if (err < 0) {
687                 ddp_log_warn("%s unable to set iscsi param err=%d, "
688                               "ddp disabled.\n", tdev->name, err);
689                 goto free_ddp_map;
690         }
691
692         tdev->ulp_iscsi = ddp;
693
694         ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u,"
695                         " %u/%u.\n",
696                         tdev, ppmax, ddp->idx_bits, ddp->idx_mask,
697                         ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
698                         ddp->max_rxsz, uinfo.max_rxsz);
699         return;
700
701 free_ddp_map:
702         cxgb3i_free_big_mem(ddp);
703 }
704
705 /**
706  * cxgb3i_ddp_init - initialize ddp functions
707  */
708 void cxgb3i_ddp_init(struct t3cdev *tdev)
709 {
710         if (page_idx == DDP_PGIDX_MAX) {
711                 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
712                 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
713                                 PAGE_SIZE, page_idx);
714         }
715         ddp_init(tdev);
716 }