2 * linux/drivers/net/ehea/ehea_qmr.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include "ehea_phyp.h"
35 struct ehea_busmap ehea_bmap = { 0, 0, NULL };
36 extern u64 ehea_driver_flags;
37 extern struct workqueue_struct *ehea_driver_wq;
38 extern struct work_struct ehea_rereg_mr_task;
41 static void *hw_qpageit_get_inc(struct hw_queue *queue)
43 void *retvalue = hw_qeit_get(queue);
45 queue->current_q_offset += queue->pagesize;
46 if (queue->current_q_offset > queue->queue_length) {
47 queue->current_q_offset -= queue->pagesize;
49 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
50 ehea_error("not on pageboundary");
56 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
57 const u32 pagesize, const u32 qe_size)
59 int pages_per_kpage = PAGE_SIZE / pagesize;
62 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
63 ehea_error("pagesize conflict! kernel pagesize=%d, "
64 "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
68 queue->queue_length = nr_of_pages * pagesize;
69 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
70 if (!queue->queue_pages) {
71 ehea_error("no mem for queue_pages");
76 * allocate pages for queue:
77 * outer loop allocates whole kernel pages (page aligned) and
78 * inner loop divides a kernel page into smaller hea queue pages
81 while (i < nr_of_pages) {
82 u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
85 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
86 (queue->queue_pages)[i] = (struct ehea_page*)kpage;
92 queue->current_q_offset = 0;
93 queue->qe_size = qe_size;
94 queue->pagesize = pagesize;
95 queue->toggle_state = 1;
99 for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
100 if (!(queue->queue_pages)[i])
102 free_page((unsigned long)(queue->queue_pages)[i]);
107 static void hw_queue_dtor(struct hw_queue *queue)
109 int pages_per_kpage = PAGE_SIZE / queue->pagesize;
112 if (!queue || !queue->queue_pages)
115 nr_pages = queue->queue_length / queue->pagesize;
117 for (i = 0; i < nr_pages; i += pages_per_kpage)
118 free_page((unsigned long)(queue->queue_pages)[i]);
120 kfree(queue->queue_pages);
123 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
124 int nr_of_cqe, u64 eq_handle, u32 cq_token)
128 u64 *cq_handle_ref, hret, rpage;
129 u32 act_nr_of_entries, act_pages, counter;
133 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
135 ehea_error("no mem for cq");
139 cq->attr.max_nr_of_cqes = nr_of_cqe;
140 cq->attr.cq_token = cq_token;
141 cq->attr.eq_handle = eq_handle;
143 cq->adapter = adapter;
145 cq_handle_ref = &cq->fw_handle;
146 act_nr_of_entries = 0;
149 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
150 &cq->fw_handle, &cq->epas);
151 if (hret != H_SUCCESS) {
152 ehea_error("alloc_resource_cq failed");
156 ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
157 EHEA_PAGESIZE, sizeof(struct ehea_cqe));
161 for (counter = 0; counter < cq->attr.nr_pages; counter++) {
162 vpage = hw_qpageit_get_inc(&cq->hw_queue);
164 ehea_error("hw_qpageit_get_inc failed");
168 rpage = virt_to_abs(vpage);
169 hret = ehea_h_register_rpage(adapter->handle,
170 0, EHEA_CQ_REGISTER_ORIG,
171 cq->fw_handle, rpage, 1);
172 if (hret < H_SUCCESS) {
173 ehea_error("register_rpage_cq failed ehea_cq=%p "
174 "hret=%lx counter=%i act_pages=%i",
175 cq, hret, counter, cq->attr.nr_pages);
179 if (counter == (cq->attr.nr_pages - 1)) {
180 vpage = hw_qpageit_get_inc(&cq->hw_queue);
182 if ((hret != H_SUCCESS) || (vpage)) {
183 ehea_error("registration of pages not "
184 "complete hret=%lx\n", hret);
188 if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
189 ehea_error("CQ: registration of page failed "
196 hw_qeit_reset(&cq->hw_queue);
197 epa = cq->epas.kernel;
198 ehea_reset_cq_ep(cq);
199 ehea_reset_cq_n1(cq);
204 hw_queue_dtor(&cq->hw_queue);
207 ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
216 u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
219 u64 adapter_handle = cq->adapter->handle;
221 /* deregister all previous registered pages */
222 hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
223 if (hret != H_SUCCESS)
226 hw_queue_dtor(&cq->hw_queue);
232 int ehea_destroy_cq(struct ehea_cq *cq)
238 hcp_epas_dtor(&cq->epas);
240 if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) {
241 ehea_error_data(cq->adapter, cq->fw_handle);
242 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
245 if (hret != H_SUCCESS) {
246 ehea_error("destroy CQ failed");
253 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
254 const enum ehea_eq_type type,
255 const u32 max_nr_of_eqes, const u8 eqe_gen)
262 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
264 ehea_error("no mem for eq");
268 eq->adapter = adapter;
269 eq->attr.type = type;
270 eq->attr.max_nr_of_eqes = max_nr_of_eqes;
271 eq->attr.eqe_gen = eqe_gen;
272 spin_lock_init(&eq->spinlock);
274 hret = ehea_h_alloc_resource_eq(adapter->handle,
275 &eq->attr, &eq->fw_handle);
276 if (hret != H_SUCCESS) {
277 ehea_error("alloc_resource_eq failed");
281 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
282 EHEA_PAGESIZE, sizeof(struct ehea_eqe));
284 ehea_error("can't allocate eq pages");
288 for (i = 0; i < eq->attr.nr_pages; i++) {
289 vpage = hw_qpageit_get_inc(&eq->hw_queue);
291 ehea_error("hw_qpageit_get_inc failed");
296 rpage = virt_to_abs(vpage);
298 hret = ehea_h_register_rpage(adapter->handle, 0,
299 EHEA_EQ_REGISTER_ORIG,
300 eq->fw_handle, rpage, 1);
302 if (i == (eq->attr.nr_pages - 1)) {
304 vpage = hw_qpageit_get_inc(&eq->hw_queue);
305 if ((hret != H_SUCCESS) || (vpage)) {
309 if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
315 hw_qeit_reset(&eq->hw_queue);
319 hw_queue_dtor(&eq->hw_queue);
322 ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
329 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
331 struct ehea_eqe *eqe;
334 spin_lock_irqsave(&eq->spinlock, flags);
335 eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
336 spin_unlock_irqrestore(&eq->spinlock, flags);
341 u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
346 spin_lock_irqsave(&eq->spinlock, flags);
348 hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
349 spin_unlock_irqrestore(&eq->spinlock, flags);
351 if (hret != H_SUCCESS)
354 hw_queue_dtor(&eq->hw_queue);
360 int ehea_destroy_eq(struct ehea_eq *eq)
366 hcp_epas_dtor(&eq->epas);
368 if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) {
369 ehea_error_data(eq->adapter, eq->fw_handle);
370 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
373 if (hret != H_SUCCESS) {
374 ehea_error("destroy EQ failed");
382 * allocates memory for a queue and registers pages in phyp
384 int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
385 int nr_pages, int wqe_size, int act_nr_sges,
386 struct ehea_adapter *adapter, int h_call_q_selector)
392 ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
396 for (cnt = 0; cnt < nr_pages; cnt++) {
397 vpage = hw_qpageit_get_inc(hw_queue);
399 ehea_error("hw_qpageit_get_inc failed");
402 rpage = virt_to_abs(vpage);
403 hret = ehea_h_register_rpage(adapter->handle,
404 0, h_call_q_selector,
405 qp->fw_handle, rpage, 1);
406 if (hret < H_SUCCESS) {
407 ehea_error("register_rpage_qp failed");
411 hw_qeit_reset(hw_queue);
415 hw_queue_dtor(hw_queue);
419 static inline u32 map_wqe_size(u8 wqe_enc_size)
421 return 128 << wqe_enc_size;
424 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
425 u32 pd, struct ehea_qp_init_attr *init_attr)
430 u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
431 u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
434 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
436 ehea_error("no mem for qp");
440 qp->adapter = adapter;
442 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
443 &qp->fw_handle, &qp->epas);
444 if (hret != H_SUCCESS) {
445 ehea_error("ehea_h_alloc_resource_qp failed");
449 wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
450 wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
451 wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
452 wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
454 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
455 wqe_size_in_bytes_sq,
456 init_attr->act_wqe_size_enc_sq, adapter,
459 ehea_error("can't register for sq ret=%x", ret);
463 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
464 init_attr->nr_rq1_pages,
465 wqe_size_in_bytes_rq1,
466 init_attr->act_wqe_size_enc_rq1,
469 ehea_error("can't register for rq1 ret=%x", ret);
473 if (init_attr->rq_count > 1) {
474 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
475 init_attr->nr_rq2_pages,
476 wqe_size_in_bytes_rq2,
477 init_attr->act_wqe_size_enc_rq2,
480 ehea_error("can't register for rq2 ret=%x", ret);
485 if (init_attr->rq_count > 2) {
486 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
487 init_attr->nr_rq3_pages,
488 wqe_size_in_bytes_rq3,
489 init_attr->act_wqe_size_enc_rq3,
492 ehea_error("can't register for rq3 ret=%x", ret);
497 qp->init_attr = *init_attr;
502 hw_queue_dtor(&qp->hw_rqueue2);
505 hw_queue_dtor(&qp->hw_rqueue1);
508 hw_queue_dtor(&qp->hw_squeue);
511 ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
512 ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
519 u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
522 struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
525 ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
526 hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
527 if (hret != H_SUCCESS)
530 hw_queue_dtor(&qp->hw_squeue);
531 hw_queue_dtor(&qp->hw_rqueue1);
533 if (qp_attr->rq_count > 1)
534 hw_queue_dtor(&qp->hw_rqueue2);
535 if (qp_attr->rq_count > 2)
536 hw_queue_dtor(&qp->hw_rqueue3);
542 int ehea_destroy_qp(struct ehea_qp *qp)
548 hcp_epas_dtor(&qp->epas);
550 if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
551 ehea_error_data(qp->adapter, qp->fw_handle);
552 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
555 if (hret != H_SUCCESS) {
556 ehea_error("destroy QP failed");
563 int ehea_create_busmap( void )
565 u64 vaddr = EHEA_BUSMAP_START;
566 unsigned long abs_max_pfn = 0;
567 unsigned long sec_max_pfn;
571 * Sections are not in ascending order -> Loop over all sections and
572 * find the highest PFN to compute the required map size.
574 ehea_bmap.valid_sections = 0;
576 for (i = 0; i < NR_MEM_SECTIONS; i++)
577 if (valid_section_nr(i)) {
578 sec_max_pfn = section_nr_to_pfn(i);
579 if (sec_max_pfn > abs_max_pfn)
580 abs_max_pfn = sec_max_pfn;
581 ehea_bmap.valid_sections++;
584 ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1;
585 ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
587 if (!ehea_bmap.vaddr)
590 for (i = 0 ; i < ehea_bmap.entries; i++) {
591 unsigned long pfn = section_nr_to_pfn(i);
593 if (pfn_valid(pfn)) {
594 ehea_bmap.vaddr[i] = vaddr;
595 vaddr += EHEA_SECTSIZE;
597 ehea_bmap.vaddr[i] = 0;
603 void ehea_destroy_busmap( void )
605 vfree(ehea_bmap.vaddr);
608 u64 ehea_map_vaddr(void *caddr)
611 unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
613 if (likely(index < ehea_bmap.entries)) {
614 mapped_addr = ehea_bmap.vaddr[index];
615 if (likely(mapped_addr))
616 mapped_addr |= (((unsigned long)caddr)
617 & (EHEA_SECTSIZE - 1));
623 if (unlikely(mapped_addr == -1))
624 if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
625 queue_work(ehea_driver_wq, &ehea_rereg_mr_task);
630 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
635 u64 hret, pt_abs, i, j, m, mr_len;
636 u32 acc_ctrl = EHEA_MR_ACC_CTRL;
638 mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
640 pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL);
642 ehea_error("no mem");
646 pt_abs = virt_to_abs(pt);
648 hret = ehea_h_alloc_resource_mr(adapter->handle,
649 EHEA_BUSMAP_START, mr_len,
650 acc_ctrl, adapter->pd,
651 &mr->handle, &mr->lkey);
652 if (hret != H_SUCCESS) {
653 ehea_error("alloc_resource_mr failed");
658 for (i = 0 ; i < ehea_bmap.entries; i++)
659 if (ehea_bmap.vaddr[i]) {
660 void *sectbase = __va(i << SECTION_SIZE_BITS);
663 for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE);
666 for (m = 0; m < EHEA_MAX_RPAGE; m++) {
667 pg = sectbase + ((k++) * EHEA_PAGESIZE);
668 pt[m] = virt_to_abs(pg);
671 hret = ehea_h_register_rpage_mr(adapter->handle,
675 if ((hret != H_SUCCESS)
676 && (hret != H_PAGE_REGISTERED)) {
677 ehea_h_free_resource(adapter->handle,
680 ehea_error("register_rpage_mr failed");
687 if (hret != H_SUCCESS) {
688 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
689 ehea_error("registering mr failed");
694 mr->vaddr = EHEA_BUSMAP_START;
695 mr->adapter = adapter;
702 int ehea_rem_mr(struct ehea_mr *mr)
706 if (!mr || !mr->adapter)
709 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
711 if (hret != H_SUCCESS) {
712 ehea_error("destroy MR failed");
719 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
720 struct ehea_mr *shared_mr)
724 hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
725 old_mr->vaddr, EHEA_MR_ACC_CTRL,
726 adapter->pd, shared_mr);
727 if (hret != H_SUCCESS)
730 shared_mr->adapter = adapter;
735 void print_error_data(u64 *data)
738 u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
739 u64 resource = data[1];
741 length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
743 if (length > EHEA_PAGESIZE)
744 length = EHEA_PAGESIZE;
746 if (type == 0x8) /* Queue Pair */
747 ehea_error("QP (resource=%lX) state: AER=0x%lX, AERR=0x%lX, "
748 "port=%lX", resource, data[6], data[12], data[22]);
750 if (type == 0x4) /* Completion Queue */
751 ehea_error("CQ (resource=%lX) state: AER=0x%lX", resource,
754 if (type == 0x3) /* Event Queue */
755 ehea_error("EQ (resource=%lX) state: AER=0x%lX", resource,
758 ehea_dump(data, length, "error data");
761 void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
766 rblock = kzalloc(PAGE_SIZE, GFP_KERNEL);
768 ehea_error("Cannot allocate rblock memory.");
772 ret = ehea_h_error_data(adapter->handle,
776 if (ret == H_R_STATE)
777 ehea_error("No error data is available: %lX.", res_handle);
778 else if (ret == H_SUCCESS)
779 print_error_data(rblock);
781 ehea_error("Error data could not be fetched: %lX", res_handle);