2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Functions for EQs, NEQs and interrupts
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Joachim Fenkes <fenkes@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
44 #include "ehca_classes.h"
46 #include "ehca_iverbs.h"
47 #include "ehca_tools.h"
50 #include "ipz_pt_fn.h"
52 #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
53 #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
54 #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
55 #define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
56 #define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
57 #define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
58 #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
60 #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
61 #define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
62 #define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
63 #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
64 #define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
66 #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
67 #define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
69 static void queue_comp_task(struct ehca_cq *__cq);
71 static struct ehca_comp_pool *pool;
72 #ifdef CONFIG_HOTPLUG_CPU
73 static struct notifier_block comp_pool_callback_nb;
76 static inline void comp_event_callback(struct ehca_cq *cq)
78 if (!cq->ib_cq.comp_handler)
81 spin_lock(&cq->cb_lock);
82 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
83 spin_unlock(&cq->cb_lock);
88 static void print_error_data(struct ehca_shca *shca, void *data,
89 u64 *rblock, int length)
91 u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
92 u64 resource = rblock[1];
95 case 0x1: /* Queue Pair */
97 struct ehca_qp *qp = (struct ehca_qp *)data;
99 /* only print error data if AER is set */
103 ehca_err(&shca->ib_device,
104 "QP 0x%x (resource=%lx) has errors.",
105 qp->ib_qp.qp_num, resource);
108 case 0x4: /* Completion Queue */
110 struct ehca_cq *cq = (struct ehca_cq *)data;
112 ehca_err(&shca->ib_device,
113 "CQ 0x%x (resource=%lx) has errors.",
114 cq->cq_number, resource);
118 ehca_err(&shca->ib_device,
119 "Unknown error type: %lx on %s.",
120 type, shca->ib_device.name);
124 ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
125 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
126 "---------------------------------------------------");
127 ehca_dmp(rblock, length, "resource=%lx", resource);
128 ehca_err(&shca->ib_device, "EHCA ----- error data end "
129 "----------------------------------------------------");
134 int ehca_error_data(struct ehca_shca *shca, void *data,
140 unsigned long block_count;
142 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
144 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
149 /* rblock must be 4K aligned and should be 4K large */
150 ret = hipz_h_error_data(shca->ipz_hca_handle,
155 if (ret == H_R_STATE)
156 ehca_err(&shca->ib_device,
157 "No error data is available: %lx.", resource);
158 else if (ret == H_SUCCESS) {
161 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
163 if (length > EHCA_PAGESIZE)
164 length = EHCA_PAGESIZE;
166 print_error_data(shca, data, rblock, length);
168 ehca_err(&shca->ib_device,
169 "Error data could not be fetched: %lx", resource);
171 ehca_free_fw_ctrlblock(rblock);
178 static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
179 enum ib_event_type event_type, int fatal)
181 struct ib_event event;
183 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
185 read_lock(&ehca_qp_idr_lock);
186 qp = idr_find(&ehca_qp_idr, token);
187 read_unlock(&ehca_qp_idr_lock);
194 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
196 event.device = &shca->ib_device;
198 if (qp->ext_type == EQPT_SRQ) {
199 if (!qp->ib_srq.event_handler)
202 event.event = fatal ? IB_EVENT_SRQ_ERR : event_type;
203 event.element.srq = &qp->ib_srq;
204 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
206 if (!qp->ib_qp.event_handler)
209 event.event = event_type;
210 event.element.qp = &qp->ib_qp;
211 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
217 static void cq_event_callback(struct ehca_shca *shca,
221 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
223 read_lock(&ehca_cq_idr_lock);
224 cq = idr_find(&ehca_cq_idr, token);
226 atomic_inc(&cq->nr_events);
227 read_unlock(&ehca_cq_idr_lock);
232 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
234 if (atomic_dec_and_test(&cq->nr_events))
235 wake_up(&cq->wait_completion);
240 static void parse_identifier(struct ehca_shca *shca, u64 eqe)
242 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
244 switch (identifier) {
245 case 0x02: /* path migrated */
246 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
248 case 0x03: /* communication established */
249 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
251 case 0x04: /* send queue drained */
252 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
254 case 0x05: /* QP error */
255 case 0x06: /* QP error */
256 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
258 case 0x07: /* CQ error */
259 case 0x08: /* CQ error */
260 cq_event_callback(shca, eqe);
262 case 0x09: /* MRMWPTE error */
263 ehca_err(&shca->ib_device, "MRMWPTE error.");
265 case 0x0A: /* port event */
266 ehca_err(&shca->ib_device, "Port event.");
268 case 0x0B: /* MR access error */
269 ehca_err(&shca->ib_device, "MR access error.");
271 case 0x0C: /* EQ error */
272 ehca_err(&shca->ib_device, "EQ error.");
274 case 0x0D: /* P/Q_Key mismatch */
275 ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
277 case 0x10: /* sampling complete */
278 ehca_err(&shca->ib_device, "Sampling complete.");
280 case 0x11: /* unaffiliated access error */
281 ehca_err(&shca->ib_device, "Unaffiliated access error.");
283 case 0x12: /* path migrating error */
284 ehca_err(&shca->ib_device, "Path migration error.");
286 case 0x13: /* interface trace stopped */
287 ehca_err(&shca->ib_device, "Interface trace stopped.");
289 case 0x14: /* first error capture info available */
290 ehca_info(&shca->ib_device, "First error capture available");
292 case 0x15: /* SRQ limit reached */
293 qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
296 ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
297 identifier, shca->ib_device.name);
304 static void dispatch_port_event(struct ehca_shca *shca, int port_num,
305 enum ib_event_type type, const char *msg)
307 struct ib_event event;
309 ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
310 event.device = &shca->ib_device;
312 event.element.port_num = port_num;
313 ib_dispatch_event(&event);
316 static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
318 struct ehca_sma_attr new_attr;
319 struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
321 ehca_query_sma_attr(shca, port_num, &new_attr);
323 if (new_attr.sm_sl != old_attr->sm_sl ||
324 new_attr.sm_lid != old_attr->sm_lid)
325 dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
328 if (new_attr.lid != old_attr->lid ||
329 new_attr.lmc != old_attr->lmc)
330 dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
333 if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
334 memcmp(new_attr.pkeys, old_attr->pkeys,
335 sizeof(u16) * new_attr.pkey_tbl_len))
336 dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
339 *old_attr = new_attr;
342 static void parse_ec(struct ehca_shca *shca, u64 eqe)
344 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
345 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
348 case 0x30: /* port availability change */
349 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
350 shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
351 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
353 ehca_query_sma_attr(shca, port,
354 &shca->sport[port - 1].saved_attr);
356 shca->sport[port - 1].port_state = IB_PORT_DOWN;
357 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
362 /* port configuration change
363 * disruptive change is caused by
364 * LID, PKEY or SM change
366 if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
367 ehca_warn(&shca->ib_device, "disruptive port "
368 "%d configuration change", port);
370 shca->sport[port - 1].port_state = IB_PORT_DOWN;
371 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
374 shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
375 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
378 notify_port_conf_change(shca, port);
380 case 0x32: /* adapter malfunction */
381 ehca_err(&shca->ib_device, "Adapter malfunction.");
383 case 0x33: /* trace stopped */
384 ehca_err(&shca->ib_device, "Traced stopped.");
387 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
388 ec, shca->ib_device.name);
395 static inline void reset_eq_pending(struct ehca_cq *cq)
398 struct h_galpa gal = cq->galpas.kernel;
400 hipz_galpa_store_cq(gal, cqx_ep, 0x0);
401 CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
406 irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
408 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
410 tasklet_hi_schedule(&shca->neq.interrupt_task);
415 void ehca_tasklet_neq(unsigned long data)
417 struct ehca_shca *shca = (struct ehca_shca*)data;
418 struct ehca_eqe *eqe;
421 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
424 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
425 parse_ec(shca, eqe->entry);
427 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
430 ret = hipz_h_reset_event(shca->ipz_hca_handle,
431 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
433 if (ret != H_SUCCESS)
434 ehca_err(&shca->ib_device, "Can't clear notification events.");
439 irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
441 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
443 tasklet_hi_schedule(&shca->eq.interrupt_task);
449 static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
455 eqe_value = eqe->entry;
456 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
457 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
458 ehca_dbg(&shca->ib_device, "Got completion event");
459 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
460 read_lock(&ehca_cq_idr_lock);
461 cq = idr_find(&ehca_cq_idr, token);
463 atomic_inc(&cq->nr_events);
464 read_unlock(&ehca_cq_idr_lock);
466 ehca_err(&shca->ib_device,
467 "Invalid eqe for non-existing cq token=%x",
471 reset_eq_pending(cq);
472 if (ehca_scaling_code)
475 comp_event_callback(cq);
476 if (atomic_dec_and_test(&cq->nr_events))
477 wake_up(&cq->wait_completion);
480 ehca_dbg(&shca->ib_device, "Got non completion event");
481 parse_identifier(shca, eqe_value);
485 void ehca_process_eq(struct ehca_shca *shca, int is_irq)
487 struct ehca_eq *eq = &shca->eq;
488 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
494 spin_lock_irqsave(&eq->irq_spinlock, flags);
496 const int max_query_cnt = 100;
500 int_state = hipz_h_query_int_state(
501 shca->ipz_hca_handle, eq->ist);
504 } while (int_state && query_cnt < max_query_cnt);
505 if (unlikely((query_cnt == max_query_cnt)))
506 ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
507 int_state, query_cnt);
510 /* read out all eqes */
514 eqe_cache[eqe_cnt].eqe =
515 (struct ehca_eqe *)ehca_poll_eq(shca, eq);
516 if (!eqe_cache[eqe_cnt].eqe)
518 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
519 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
520 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
521 read_lock(&ehca_cq_idr_lock);
522 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
523 if (eqe_cache[eqe_cnt].cq)
524 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
525 read_unlock(&ehca_cq_idr_lock);
526 if (!eqe_cache[eqe_cnt].cq) {
527 ehca_err(&shca->ib_device,
528 "Invalid eqe for non-existing cq "
533 eqe_cache[eqe_cnt].cq = NULL;
535 } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
538 ehca_dbg(&shca->ib_device,
539 "No eqe found for irq event");
540 goto unlock_irq_spinlock;
542 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
543 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
544 ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
545 /* enable irq for new packets */
546 for (i = 0; i < eqe_cnt; i++) {
547 if (eq->eqe_cache[i].cq)
548 reset_eq_pending(eq->eqe_cache[i].cq);
551 spin_lock(&eq->spinlock);
552 eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
553 spin_unlock(&eq->spinlock);
554 /* call completion handler for cached eqes */
555 for (i = 0; i < eqe_cnt; i++)
556 if (eq->eqe_cache[i].cq) {
557 if (ehca_scaling_code)
558 queue_comp_task(eq->eqe_cache[i].cq);
560 struct ehca_cq *cq = eq->eqe_cache[i].cq;
561 comp_event_callback(cq);
562 if (atomic_dec_and_test(&cq->nr_events))
563 wake_up(&cq->wait_completion);
566 ehca_dbg(&shca->ib_device, "Got non completion event");
567 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
569 /* poll eq if not empty */
571 goto unlock_irq_spinlock;
573 struct ehca_eqe *eqe;
574 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
577 process_eqe(shca, eqe);
581 spin_unlock_irqrestore(&eq->irq_spinlock, flags);
584 void ehca_tasklet_eq(unsigned long data)
586 ehca_process_eq((struct ehca_shca*)data, 1);
589 static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
594 WARN_ON_ONCE(!in_interrupt());
595 if (ehca_debug_level)
596 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
598 spin_lock_irqsave(&pool->last_cpu_lock, flags);
599 cpu = next_cpu(pool->last_cpu, cpu_online_map);
601 cpu = first_cpu(cpu_online_map);
602 pool->last_cpu = cpu;
603 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
608 static void __queue_comp_task(struct ehca_cq *__cq,
609 struct ehca_cpu_comp_task *cct)
613 spin_lock_irqsave(&cct->task_lock, flags);
614 spin_lock(&__cq->task_lock);
616 if (__cq->nr_callbacks == 0) {
617 __cq->nr_callbacks++;
618 list_add_tail(&__cq->entry, &cct->cq_list);
620 wake_up(&cct->wait_queue);
622 __cq->nr_callbacks++;
624 spin_unlock(&__cq->task_lock);
625 spin_unlock_irqrestore(&cct->task_lock, flags);
628 static void queue_comp_task(struct ehca_cq *__cq)
631 struct ehca_cpu_comp_task *cct;
635 cpu_id = find_next_online_cpu(pool);
636 BUG_ON(!cpu_online(cpu_id));
638 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
641 spin_lock_irqsave(&cct->task_lock, flags);
642 cq_jobs = cct->cq_jobs;
643 spin_unlock_irqrestore(&cct->task_lock, flags);
645 cpu_id = find_next_online_cpu(pool);
646 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
650 __queue_comp_task(__cq, cct);
653 static void run_comp_task(struct ehca_cpu_comp_task *cct)
658 spin_lock_irqsave(&cct->task_lock, flags);
660 while (!list_empty(&cct->cq_list)) {
661 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
662 spin_unlock_irqrestore(&cct->task_lock, flags);
664 comp_event_callback(cq);
665 if (atomic_dec_and_test(&cq->nr_events))
666 wake_up(&cq->wait_completion);
668 spin_lock_irqsave(&cct->task_lock, flags);
669 spin_lock(&cq->task_lock);
671 if (!cq->nr_callbacks) {
672 list_del_init(cct->cq_list.next);
675 spin_unlock(&cq->task_lock);
678 spin_unlock_irqrestore(&cct->task_lock, flags);
681 static int comp_task(void *__cct)
683 struct ehca_cpu_comp_task *cct = __cct;
685 DECLARE_WAITQUEUE(wait, current);
687 set_current_state(TASK_INTERRUPTIBLE);
688 while (!kthread_should_stop()) {
689 add_wait_queue(&cct->wait_queue, &wait);
691 spin_lock_irq(&cct->task_lock);
692 cql_empty = list_empty(&cct->cq_list);
693 spin_unlock_irq(&cct->task_lock);
697 __set_current_state(TASK_RUNNING);
699 remove_wait_queue(&cct->wait_queue, &wait);
701 spin_lock_irq(&cct->task_lock);
702 cql_empty = list_empty(&cct->cq_list);
703 spin_unlock_irq(&cct->task_lock);
705 run_comp_task(__cct);
707 set_current_state(TASK_INTERRUPTIBLE);
709 __set_current_state(TASK_RUNNING);
714 static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
717 struct ehca_cpu_comp_task *cct;
719 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
720 spin_lock_init(&cct->task_lock);
721 INIT_LIST_HEAD(&cct->cq_list);
722 init_waitqueue_head(&cct->wait_queue);
723 cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
728 static void destroy_comp_task(struct ehca_comp_pool *pool,
731 struct ehca_cpu_comp_task *cct;
732 struct task_struct *task;
733 unsigned long flags_cct;
735 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
737 spin_lock_irqsave(&cct->task_lock, flags_cct);
743 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
749 #ifdef CONFIG_HOTPLUG_CPU
750 static void take_over_work(struct ehca_comp_pool *pool,
753 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
756 unsigned long flags_cct;
758 spin_lock_irqsave(&cct->task_lock, flags_cct);
760 list_splice_init(&cct->cq_list, &list);
762 while (!list_empty(&list)) {
763 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
765 list_del(&cq->entry);
766 __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
767 smp_processor_id()));
770 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
774 static int comp_pool_callback(struct notifier_block *nfb,
775 unsigned long action,
778 unsigned int cpu = (unsigned long)hcpu;
779 struct ehca_cpu_comp_task *cct;
783 case CPU_UP_PREPARE_FROZEN:
784 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
785 if (!create_comp_task(pool, cpu)) {
786 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
790 case CPU_UP_CANCELED:
791 case CPU_UP_CANCELED_FROZEN:
792 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
793 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
794 kthread_bind(cct->task, any_online_cpu(cpu_online_map));
795 destroy_comp_task(pool, cpu);
798 case CPU_ONLINE_FROZEN:
799 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
800 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
801 kthread_bind(cct->task, cpu);
802 wake_up_process(cct->task);
804 case CPU_DOWN_PREPARE:
805 case CPU_DOWN_PREPARE_FROZEN:
806 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
808 case CPU_DOWN_FAILED:
809 case CPU_DOWN_FAILED_FROZEN:
810 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
813 case CPU_DEAD_FROZEN:
814 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
815 destroy_comp_task(pool, cpu);
816 take_over_work(pool, cpu);
824 int ehca_create_comp_pool(void)
827 struct task_struct *task;
829 if (!ehca_scaling_code)
832 pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
836 spin_lock_init(&pool->last_cpu_lock);
837 pool->last_cpu = any_online_cpu(cpu_online_map);
839 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
840 if (pool->cpu_comp_tasks == NULL) {
845 for_each_online_cpu(cpu) {
846 task = create_comp_task(pool, cpu);
848 kthread_bind(task, cpu);
849 wake_up_process(task);
853 #ifdef CONFIG_HOTPLUG_CPU
854 comp_pool_callback_nb.notifier_call = comp_pool_callback;
855 comp_pool_callback_nb.priority = 0;
856 register_cpu_notifier(&comp_pool_callback_nb);
859 printk(KERN_INFO "eHCA scaling code enabled\n");
864 void ehca_destroy_comp_pool(void)
868 if (!ehca_scaling_code)
871 #ifdef CONFIG_HOTPLUG_CPU
872 unregister_cpu_notifier(&comp_pool_callback_nb);
875 for (i = 0; i < NR_CPUS; i++) {
877 destroy_comp_task(pool, i);
879 free_percpu(pool->cpu_comp_tasks);