2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Functions for EQs, NEQs and interrupts
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Joachim Fenkes <fenkes@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
44 #include "ehca_classes.h"
46 #include "ehca_iverbs.h"
47 #include "ehca_tools.h"
50 #include "ipz_pt_fn.h"
52 #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
53 #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
54 #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
55 #define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
56 #define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
57 #define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
58 #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
60 #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
61 #define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
62 #define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
63 #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
64 #define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
65 #define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
67 #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
68 #define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
70 static void queue_comp_task(struct ehca_cq *__cq);
72 static struct ehca_comp_pool *pool;
74 static inline void comp_event_callback(struct ehca_cq *cq)
76 if (!cq->ib_cq.comp_handler)
79 spin_lock(&cq->cb_lock);
80 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
81 spin_unlock(&cq->cb_lock);
86 static void print_error_data(struct ehca_shca *shca, void *data,
87 u64 *rblock, int length)
89 u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
90 u64 resource = rblock[1];
93 case 0x1: /* Queue Pair */
95 struct ehca_qp *qp = (struct ehca_qp *)data;
97 /* only print error data if AER is set */
101 ehca_err(&shca->ib_device,
102 "QP 0x%x (resource=%lx) has errors.",
103 qp->ib_qp.qp_num, resource);
106 case 0x4: /* Completion Queue */
108 struct ehca_cq *cq = (struct ehca_cq *)data;
110 ehca_err(&shca->ib_device,
111 "CQ 0x%x (resource=%lx) has errors.",
112 cq->cq_number, resource);
116 ehca_err(&shca->ib_device,
117 "Unknown error type: %lx on %s.",
118 type, shca->ib_device.name);
122 ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
123 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
124 "---------------------------------------------------");
125 ehca_dmp(rblock, length, "resource=%lx", resource);
126 ehca_err(&shca->ib_device, "EHCA ----- error data end "
127 "----------------------------------------------------");
132 int ehca_error_data(struct ehca_shca *shca, void *data,
138 unsigned long block_count;
140 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
142 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
147 /* rblock must be 4K aligned and should be 4K large */
148 ret = hipz_h_error_data(shca->ipz_hca_handle,
153 if (ret == H_R_STATE)
154 ehca_err(&shca->ib_device,
155 "No error data is available: %lx.", resource);
156 else if (ret == H_SUCCESS) {
159 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
161 if (length > EHCA_PAGESIZE)
162 length = EHCA_PAGESIZE;
164 print_error_data(shca, data, rblock, length);
166 ehca_err(&shca->ib_device,
167 "Error data could not be fetched: %lx", resource);
169 ehca_free_fw_ctrlblock(rblock);
176 static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
177 enum ib_event_type event_type)
179 struct ib_event event;
181 event.device = &shca->ib_device;
182 event.event = event_type;
184 if (qp->ext_type == EQPT_SRQ) {
185 if (!qp->ib_srq.event_handler)
188 event.element.srq = &qp->ib_srq;
189 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
191 if (!qp->ib_qp.event_handler)
194 event.element.qp = &qp->ib_qp;
195 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
199 static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
200 enum ib_event_type event_type, int fatal)
203 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
205 read_lock(&ehca_qp_idr_lock);
206 qp = idr_find(&ehca_qp_idr, token);
208 atomic_inc(&qp->nr_events);
209 read_unlock(&ehca_qp_idr_lock);
215 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
217 dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
218 IB_EVENT_SRQ_ERR : event_type);
221 * eHCA only processes one WQE at a time for SRQ base QPs,
222 * so the last WQE has been processed as soon as the QP enters
225 if (fatal && qp->ext_type == EQPT_SRQBASE)
226 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
228 if (atomic_dec_and_test(&qp->nr_events))
229 wake_up(&qp->wait_completion);
233 static void cq_event_callback(struct ehca_shca *shca,
237 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
239 read_lock(&ehca_cq_idr_lock);
240 cq = idr_find(&ehca_cq_idr, token);
242 atomic_inc(&cq->nr_events);
243 read_unlock(&ehca_cq_idr_lock);
248 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
250 if (atomic_dec_and_test(&cq->nr_events))
251 wake_up(&cq->wait_completion);
256 static void parse_identifier(struct ehca_shca *shca, u64 eqe)
258 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
260 switch (identifier) {
261 case 0x02: /* path migrated */
262 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
264 case 0x03: /* communication established */
265 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
267 case 0x04: /* send queue drained */
268 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
270 case 0x05: /* QP error */
271 case 0x06: /* QP error */
272 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
274 case 0x07: /* CQ error */
275 case 0x08: /* CQ error */
276 cq_event_callback(shca, eqe);
278 case 0x09: /* MRMWPTE error */
279 ehca_err(&shca->ib_device, "MRMWPTE error.");
281 case 0x0A: /* port event */
282 ehca_err(&shca->ib_device, "Port event.");
284 case 0x0B: /* MR access error */
285 ehca_err(&shca->ib_device, "MR access error.");
287 case 0x0C: /* EQ error */
288 ehca_err(&shca->ib_device, "EQ error.");
290 case 0x0D: /* P/Q_Key mismatch */
291 ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
293 case 0x10: /* sampling complete */
294 ehca_err(&shca->ib_device, "Sampling complete.");
296 case 0x11: /* unaffiliated access error */
297 ehca_err(&shca->ib_device, "Unaffiliated access error.");
299 case 0x12: /* path migrating */
300 ehca_err(&shca->ib_device, "Path migrating.");
302 case 0x13: /* interface trace stopped */
303 ehca_err(&shca->ib_device, "Interface trace stopped.");
305 case 0x14: /* first error capture info available */
306 ehca_info(&shca->ib_device, "First error capture available");
308 case 0x15: /* SRQ limit reached */
309 qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
312 ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
313 identifier, shca->ib_device.name);
320 static void dispatch_port_event(struct ehca_shca *shca, int port_num,
321 enum ib_event_type type, const char *msg)
323 struct ib_event event;
325 ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
326 event.device = &shca->ib_device;
328 event.element.port_num = port_num;
329 ib_dispatch_event(&event);
332 static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
334 struct ehca_sma_attr new_attr;
335 struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
337 ehca_query_sma_attr(shca, port_num, &new_attr);
339 if (new_attr.sm_sl != old_attr->sm_sl ||
340 new_attr.sm_lid != old_attr->sm_lid)
341 dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
344 if (new_attr.lid != old_attr->lid ||
345 new_attr.lmc != old_attr->lmc)
346 dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
349 if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
350 memcmp(new_attr.pkeys, old_attr->pkeys,
351 sizeof(u16) * new_attr.pkey_tbl_len))
352 dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
355 *old_attr = new_attr;
358 static void parse_ec(struct ehca_shca *shca, u64 eqe)
360 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
361 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
363 struct ehca_sport *sport = &shca->sport[port - 1];
367 case 0x30: /* port availability change */
368 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
370 /* replay modify_qp for sqps */
371 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
372 suppress_event = !sport->ibqp_sqp[IB_QPT_GSI];
373 if (sport->ibqp_sqp[IB_QPT_SMI])
374 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
376 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
377 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
379 /* AQP1 was destroyed, ignore this event */
383 sport->port_state = IB_PORT_ACTIVE;
384 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
386 ehca_query_sma_attr(shca, port,
389 sport->port_state = IB_PORT_DOWN;
390 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
395 /* port configuration change
396 * disruptive change is caused by
397 * LID, PKEY or SM change
399 if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
400 ehca_warn(&shca->ib_device, "disruptive port "
401 "%d configuration change", port);
403 sport->port_state = IB_PORT_DOWN;
404 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
407 sport->port_state = IB_PORT_ACTIVE;
408 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
410 ehca_query_sma_attr(shca, port,
413 notify_port_conf_change(shca, port);
415 case 0x32: /* adapter malfunction */
416 ehca_err(&shca->ib_device, "Adapter malfunction.");
418 case 0x33: /* trace stopped */
419 ehca_err(&shca->ib_device, "Traced stopped.");
421 case 0x34: /* util async event */
422 spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
423 if (spec_event == 0x80) /* client reregister required */
424 dispatch_port_event(shca, port,
425 IB_EVENT_CLIENT_REREGISTER,
426 "client reregister req.");
428 ehca_warn(&shca->ib_device, "Unknown util async "
429 "event %x on port %x", spec_event, port);
432 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
433 ec, shca->ib_device.name);
440 static inline void reset_eq_pending(struct ehca_cq *cq)
443 struct h_galpa gal = cq->galpas.kernel;
445 hipz_galpa_store_cq(gal, cqx_ep, 0x0);
446 CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
451 irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
453 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
455 tasklet_hi_schedule(&shca->neq.interrupt_task);
460 void ehca_tasklet_neq(unsigned long data)
462 struct ehca_shca *shca = (struct ehca_shca*)data;
463 struct ehca_eqe *eqe;
466 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
469 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
470 parse_ec(shca, eqe->entry);
472 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
475 ret = hipz_h_reset_event(shca->ipz_hca_handle,
476 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
478 if (ret != H_SUCCESS)
479 ehca_err(&shca->ib_device, "Can't clear notification events.");
484 irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
486 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
488 tasklet_hi_schedule(&shca->eq.interrupt_task);
494 static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
500 eqe_value = eqe->entry;
501 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
502 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
503 ehca_dbg(&shca->ib_device, "Got completion event");
504 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
505 read_lock(&ehca_cq_idr_lock);
506 cq = idr_find(&ehca_cq_idr, token);
508 atomic_inc(&cq->nr_events);
509 read_unlock(&ehca_cq_idr_lock);
511 ehca_err(&shca->ib_device,
512 "Invalid eqe for non-existing cq token=%x",
516 reset_eq_pending(cq);
517 if (ehca_scaling_code)
520 comp_event_callback(cq);
521 if (atomic_dec_and_test(&cq->nr_events))
522 wake_up(&cq->wait_completion);
525 ehca_dbg(&shca->ib_device, "Got non completion event");
526 parse_identifier(shca, eqe_value);
530 void ehca_process_eq(struct ehca_shca *shca, int is_irq)
532 struct ehca_eq *eq = &shca->eq;
533 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
539 spin_lock_irqsave(&eq->irq_spinlock, flags);
541 const int max_query_cnt = 100;
545 int_state = hipz_h_query_int_state(
546 shca->ipz_hca_handle, eq->ist);
549 } while (int_state && query_cnt < max_query_cnt);
550 if (unlikely((query_cnt == max_query_cnt)))
551 ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
552 int_state, query_cnt);
555 /* read out all eqes */
559 eqe_cache[eqe_cnt].eqe =
560 (struct ehca_eqe *)ehca_poll_eq(shca, eq);
561 if (!eqe_cache[eqe_cnt].eqe)
563 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
564 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
565 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
566 read_lock(&ehca_cq_idr_lock);
567 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
568 if (eqe_cache[eqe_cnt].cq)
569 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
570 read_unlock(&ehca_cq_idr_lock);
571 if (!eqe_cache[eqe_cnt].cq) {
572 ehca_err(&shca->ib_device,
573 "Invalid eqe for non-existing cq "
578 eqe_cache[eqe_cnt].cq = NULL;
580 } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
583 ehca_dbg(&shca->ib_device,
584 "No eqe found for irq event");
585 goto unlock_irq_spinlock;
587 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
588 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
589 ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
590 /* enable irq for new packets */
591 for (i = 0; i < eqe_cnt; i++) {
592 if (eq->eqe_cache[i].cq)
593 reset_eq_pending(eq->eqe_cache[i].cq);
596 spin_lock(&eq->spinlock);
597 eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
598 spin_unlock(&eq->spinlock);
599 /* call completion handler for cached eqes */
600 for (i = 0; i < eqe_cnt; i++)
601 if (eq->eqe_cache[i].cq) {
602 if (ehca_scaling_code)
603 queue_comp_task(eq->eqe_cache[i].cq);
605 struct ehca_cq *cq = eq->eqe_cache[i].cq;
606 comp_event_callback(cq);
607 if (atomic_dec_and_test(&cq->nr_events))
608 wake_up(&cq->wait_completion);
611 ehca_dbg(&shca->ib_device, "Got non completion event");
612 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
614 /* poll eq if not empty */
616 goto unlock_irq_spinlock;
618 struct ehca_eqe *eqe;
619 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
622 process_eqe(shca, eqe);
626 spin_unlock_irqrestore(&eq->irq_spinlock, flags);
629 void ehca_tasklet_eq(unsigned long data)
631 ehca_process_eq((struct ehca_shca*)data, 1);
634 static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
639 WARN_ON_ONCE(!in_interrupt());
640 if (ehca_debug_level >= 3)
641 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
643 spin_lock_irqsave(&pool->last_cpu_lock, flags);
644 cpu = next_cpu(pool->last_cpu, cpu_online_map);
646 cpu = first_cpu(cpu_online_map);
647 pool->last_cpu = cpu;
648 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
653 static void __queue_comp_task(struct ehca_cq *__cq,
654 struct ehca_cpu_comp_task *cct)
658 spin_lock_irqsave(&cct->task_lock, flags);
659 spin_lock(&__cq->task_lock);
661 if (__cq->nr_callbacks == 0) {
662 __cq->nr_callbacks++;
663 list_add_tail(&__cq->entry, &cct->cq_list);
665 wake_up(&cct->wait_queue);
667 __cq->nr_callbacks++;
669 spin_unlock(&__cq->task_lock);
670 spin_unlock_irqrestore(&cct->task_lock, flags);
673 static void queue_comp_task(struct ehca_cq *__cq)
676 struct ehca_cpu_comp_task *cct;
680 cpu_id = find_next_online_cpu(pool);
681 BUG_ON(!cpu_online(cpu_id));
683 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
686 spin_lock_irqsave(&cct->task_lock, flags);
687 cq_jobs = cct->cq_jobs;
688 spin_unlock_irqrestore(&cct->task_lock, flags);
690 cpu_id = find_next_online_cpu(pool);
691 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
695 __queue_comp_task(__cq, cct);
698 static void run_comp_task(struct ehca_cpu_comp_task *cct)
703 spin_lock_irqsave(&cct->task_lock, flags);
705 while (!list_empty(&cct->cq_list)) {
706 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
707 spin_unlock_irqrestore(&cct->task_lock, flags);
709 comp_event_callback(cq);
710 if (atomic_dec_and_test(&cq->nr_events))
711 wake_up(&cq->wait_completion);
713 spin_lock_irqsave(&cct->task_lock, flags);
714 spin_lock(&cq->task_lock);
716 if (!cq->nr_callbacks) {
717 list_del_init(cct->cq_list.next);
720 spin_unlock(&cq->task_lock);
723 spin_unlock_irqrestore(&cct->task_lock, flags);
726 static int comp_task(void *__cct)
728 struct ehca_cpu_comp_task *cct = __cct;
730 DECLARE_WAITQUEUE(wait, current);
732 set_current_state(TASK_INTERRUPTIBLE);
733 while (!kthread_should_stop()) {
734 add_wait_queue(&cct->wait_queue, &wait);
736 spin_lock_irq(&cct->task_lock);
737 cql_empty = list_empty(&cct->cq_list);
738 spin_unlock_irq(&cct->task_lock);
742 __set_current_state(TASK_RUNNING);
744 remove_wait_queue(&cct->wait_queue, &wait);
746 spin_lock_irq(&cct->task_lock);
747 cql_empty = list_empty(&cct->cq_list);
748 spin_unlock_irq(&cct->task_lock);
750 run_comp_task(__cct);
752 set_current_state(TASK_INTERRUPTIBLE);
754 __set_current_state(TASK_RUNNING);
759 static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
762 struct ehca_cpu_comp_task *cct;
764 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
765 spin_lock_init(&cct->task_lock);
766 INIT_LIST_HEAD(&cct->cq_list);
767 init_waitqueue_head(&cct->wait_queue);
768 cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
773 static void destroy_comp_task(struct ehca_comp_pool *pool,
776 struct ehca_cpu_comp_task *cct;
777 struct task_struct *task;
778 unsigned long flags_cct;
780 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
782 spin_lock_irqsave(&cct->task_lock, flags_cct);
788 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
794 static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
796 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
799 unsigned long flags_cct;
801 spin_lock_irqsave(&cct->task_lock, flags_cct);
803 list_splice_init(&cct->cq_list, &list);
805 while (!list_empty(&list)) {
806 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
808 list_del(&cq->entry);
809 __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
810 smp_processor_id()));
813 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
817 static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
818 unsigned long action,
821 unsigned int cpu = (unsigned long)hcpu;
822 struct ehca_cpu_comp_task *cct;
826 case CPU_UP_PREPARE_FROZEN:
827 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
828 if (!create_comp_task(pool, cpu)) {
829 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
833 case CPU_UP_CANCELED:
834 case CPU_UP_CANCELED_FROZEN:
835 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
836 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
837 kthread_bind(cct->task, any_online_cpu(cpu_online_map));
838 destroy_comp_task(pool, cpu);
841 case CPU_ONLINE_FROZEN:
842 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
843 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
844 kthread_bind(cct->task, cpu);
845 wake_up_process(cct->task);
847 case CPU_DOWN_PREPARE:
848 case CPU_DOWN_PREPARE_FROZEN:
849 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
851 case CPU_DOWN_FAILED:
852 case CPU_DOWN_FAILED_FROZEN:
853 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
856 case CPU_DEAD_FROZEN:
857 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
858 destroy_comp_task(pool, cpu);
859 take_over_work(pool, cpu);
866 static struct notifier_block comp_pool_callback_nb __cpuinitdata = {
867 .notifier_call = comp_pool_callback,
871 int ehca_create_comp_pool(void)
874 struct task_struct *task;
876 if (!ehca_scaling_code)
879 pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
883 spin_lock_init(&pool->last_cpu_lock);
884 pool->last_cpu = any_online_cpu(cpu_online_map);
886 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
887 if (pool->cpu_comp_tasks == NULL) {
892 for_each_online_cpu(cpu) {
893 task = create_comp_task(pool, cpu);
895 kthread_bind(task, cpu);
896 wake_up_process(task);
900 register_hotcpu_notifier(&comp_pool_callback_nb);
902 printk(KERN_INFO "eHCA scaling code enabled\n");
907 void ehca_destroy_comp_pool(void)
911 if (!ehca_scaling_code)
914 unregister_hotcpu_notifier(&comp_pool_callback_nb);
916 for (i = 0; i < NR_CPUS; i++) {
918 destroy_comp_task(pool, i);
920 free_percpu(pool->cpu_comp_tasks);