2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/kernel.h>
13 #include <asm/mmu_context.h>
14 #include <asm/uv/uv.h>
15 #include <asm/uv/uv_mmrs.h>
16 #include <asm/uv/uv_hub.h>
17 #include <asm/uv/uv_bau.h>
21 #include <asm/irq_vectors.h>
23 static struct bau_control **uv_bau_table_bases __read_mostly;
24 static int uv_bau_retry_limit __read_mostly;
26 /* position of pnode (which is nasid>>1): */
27 static int uv_nshift __read_mostly;
29 static unsigned long uv_mmask __read_mostly;
31 static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
32 static DEFINE_PER_CPU(struct bau_control, bau_control);
35 * Free a software acknowledge hardware resource by clearing its Pending
36 * bit. This will return a reply to the sender.
37 * If the message has timed out, a reply has already been sent by the
38 * hardware but the resource has not been released. In that case our
39 * clear of the Timeout bit (as well) will free the resource. No reply will
40 * be sent (the hardware will only do one reply per message).
42 static void uv_reply_to_message(int resource,
43 struct bau_payload_queue_entry *msg,
44 struct bau_msg_status *msp)
48 dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
50 msg->sw_ack_vector = 0;
52 msp->seen_by.bits = 0;
53 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
57 * Do all the things a cpu should do for a TLB shootdown message.
58 * Other cpu's may come here at the same time for this message.
60 static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
61 int msg_slot, int sw_ack_slot)
63 unsigned long this_cpu_mask;
64 struct bau_msg_status *msp;
67 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
68 cpu = uv_blade_processor_id();
70 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
71 this_cpu_mask = 1UL << cpu;
72 if (msp->seen_by.bits & this_cpu_mask)
74 atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
76 if (msg->replied_to == 1)
79 if (msg->address == TLB_FLUSH_ALL) {
81 __get_cpu_var(ptcstats).alltlb++;
83 __flush_tlb_one(msg->address);
84 __get_cpu_var(ptcstats).onetlb++;
87 __get_cpu_var(ptcstats).requestee++;
89 atomic_inc_short(&msg->acknowledge_count);
90 if (msg->number_of_cpus == msg->acknowledge_count)
91 uv_reply_to_message(sw_ack_slot, msg, msp);
95 * Examine the payload queue on one distribution node to see
96 * which messages have not been seen, and which cpu(s) have not seen them.
98 * Returns the number of cpu's that have not responded.
100 static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
102 struct bau_payload_queue_entry *msg;
103 struct bau_msg_status *msp;
108 for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
110 if ((msg->sending_cpu == sender) && (!msg->replied_to)) {
111 msp = bau_tablesp->msg_statuses + i;
113 "blade %d: address:%#lx %d of %d, not cpu(s): ",
114 i, msg->address, msg->acknowledge_count,
115 msg->number_of_cpus);
116 for (j = 0; j < msg->number_of_cpus; j++) {
117 if (!((1L << j) & msp->seen_by.bits)) {
129 * Examine the payload queue on all the distribution nodes to see
130 * which messages have not been seen, and which cpu(s) have not seen them.
132 * Returns the number of cpu's that have not responded.
134 static int uv_examine_destinations(struct bau_target_nodemask *distribution)
140 sender = smp_processor_id();
141 for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) {
142 if (!bau_node_isset(i, distribution))
144 count += uv_examine_destination(uv_bau_table_bases[i], sender);
150 * wait for completion of a broadcast message
152 * return COMPLETE, RETRY or GIVEUP
154 static int uv_wait_completion(struct bau_desc *bau_desc,
155 unsigned long mmr_offset, int right_shift)
158 long destination_timeouts = 0;
159 long source_timeouts = 0;
160 unsigned long descriptor_status;
162 while ((descriptor_status = (((unsigned long)
163 uv_read_local_mmr(mmr_offset) >>
164 right_shift) & UV_ACT_STATUS_MASK)) !=
166 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
168 if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
170 __get_cpu_var(ptcstats).s_retry++;
174 * spin here looking for progress at the destinations
176 if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
177 destination_timeouts++;
178 if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
180 * returns number of cpus not responding
182 if (uv_examine_destinations
183 (&bau_desc->distribution) == 0) {
184 __get_cpu_var(ptcstats).d_retry++;
188 if (exams >= uv_bau_retry_limit) {
190 "uv_flush_tlb_others");
191 printk("giving up on cpu %d\n",
196 * delays can hang the simulator
199 destination_timeouts = 0;
204 return FLUSH_COMPLETE;
208 * uv_flush_send_and_wait
210 * Send a broadcast and wait for a broadcast message to complete.
212 * The flush_mask contains the cpus the broadcast was sent to.
214 * Returns NULL if all remote flushing was done. The mask is zeroed.
215 * Returns @flush_mask if some remote flushing remains to be done. The
216 * mask will have some bits still set.
218 const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
219 struct bau_desc *bau_desc,
220 struct cpumask *flush_mask)
222 int completion_status = 0;
227 unsigned long mmr_offset;
232 if (cpu < UV_CPUS_PER_ACT_STATUS) {
233 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
234 right_shift = cpu * UV_ACT_STATUS_SIZE;
236 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
238 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
240 time1 = get_cycles();
243 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
245 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
246 completion_status = uv_wait_completion(bau_desc, mmr_offset,
248 } while (completion_status == FLUSH_RETRY);
249 time2 = get_cycles();
250 __get_cpu_var(ptcstats).sflush += (time2 - time1);
252 __get_cpu_var(ptcstats).retriesok++;
254 if (completion_status == FLUSH_GIVEUP) {
256 * Cause the caller to do an IPI-style TLB shootdown on
257 * the cpu's, all of which are still in the mask.
259 __get_cpu_var(ptcstats).ptc_i++;
264 * Success, so clear the remote cpu's from the mask so we don't
265 * use the IPI method of shootdown on them.
267 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit);
269 if (blade == this_blade)
271 cpumask_clear_cpu(bit, flush_mask);
273 if (!cpumask_empty(flush_mask))
279 * uv_flush_tlb_others - globally purge translation cache of a virtual
280 * address or all TLB's
281 * @cpumask: mask of all cpu's in which the address is to be removed
282 * @mm: mm_struct containing virtual address range
283 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
284 * @cpu: the current cpu
286 * This is the entry point for initiating any UV global TLB shootdown.
288 * Purges the translation caches of all specified processors of the given
289 * virtual address, or purges all TLB's on specified processors.
291 * The caller has derived the cpumask from the mm_struct. This function
292 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
294 * The cpumask is converted into a nodemask of the nodes containing
297 * Note that this function should be called with preemption disabled.
299 * Returns NULL if all remote flushing was done.
300 * Returns pointer to cpumask if some remote flushing remains to be
301 * done. The returned pointer is valid till preemption is re-enabled.
303 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
304 struct mm_struct *mm,
305 unsigned long va, unsigned int cpu)
307 static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
308 struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
315 struct bau_desc *bau_desc;
317 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
319 uv_cpu = uv_blade_processor_id();
320 this_blade = uv_numa_blade_id();
321 bau_desc = __get_cpu_var(bau_control).descriptor_base;
322 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
324 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
327 for_each_cpu(bit, flush_mask) {
328 blade = uv_cpu_to_blade_id(bit);
329 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
330 if (blade == this_blade) {
334 bau_node_set(blade, &bau_desc->distribution);
339 * no off_node flushing; return status for local node
346 __get_cpu_var(ptcstats).requestor++;
347 __get_cpu_var(ptcstats).ntargeted += i;
349 bau_desc->payload.address = va;
350 bau_desc->payload.sending_cpu = cpu;
352 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask);
356 * The BAU message interrupt comes here. (registered by set_intr_gate)
359 * We received a broadcast assist message.
361 * Interrupts may have been disabled; this interrupt could represent
362 * the receipt of several messages.
364 * All cores/threads on this node get this interrupt.
365 * The last one to see it does the s/w ack.
366 * (the resource will not be freed until noninterruptable cpus see this
367 * interrupt; hardware will timeout the s/w ack and reply ERROR)
369 void uv_bau_message_interrupt(struct pt_regs *regs)
371 struct bau_payload_queue_entry *va_queue_first;
372 struct bau_payload_queue_entry *va_queue_last;
373 struct bau_payload_queue_entry *msg;
374 struct pt_regs *old_regs = set_irq_regs(regs);
381 unsigned long local_pnode;
387 time1 = get_cycles();
389 local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
391 va_queue_first = __get_cpu_var(bau_control).va_queue_first;
392 va_queue_last = __get_cpu_var(bau_control).va_queue_last;
394 msg = __get_cpu_var(bau_control).bau_msg_head;
395 while (msg->sw_ack_vector) {
397 fw = msg->sw_ack_vector;
398 msg_slot = msg - va_queue_first;
399 sw_ack_slot = ffs(fw) - 1;
401 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
404 if (msg > va_queue_last)
405 msg = va_queue_first;
406 __get_cpu_var(bau_control).bau_msg_head = msg;
409 __get_cpu_var(ptcstats).nomsg++;
411 __get_cpu_var(ptcstats).multmsg++;
413 time2 = get_cycles();
414 __get_cpu_var(ptcstats).dflush += (time2 - time1);
417 set_irq_regs(old_regs);
420 static void uv_enable_timeouts(void)
427 unsigned long apicid;
430 for_each_online_node(i) {
431 blade = uv_node_to_blade_id(i);
432 if (blade == last_blade)
435 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
436 pnode = uv_blade_to_pnode(blade);
437 cur_cpu += uv_blade_nr_possible_cpus(i);
441 static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
443 if (*offset < num_possible_cpus())
448 static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
451 if (*offset < num_possible_cpus())
456 static void uv_ptc_seq_stop(struct seq_file *file, void *data)
461 * Display the statistics thru /proc
462 * data points to the cpu number
464 static int uv_ptc_seq_show(struct seq_file *file, void *data)
466 struct ptc_stats *stat;
469 cpu = *(loff_t *)data;
473 "# cpu requestor requestee one all sretry dretry ptc_i ");
475 "sw_ack sflush dflush sok dnomsg dmult starget\n");
477 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
478 stat = &per_cpu(ptcstats, cpu);
479 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
480 cpu, stat->requestor,
481 stat->requestee, stat->onetlb, stat->alltlb,
482 stat->s_retry, stat->d_retry, stat->ptc_i);
483 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
484 uv_read_global_mmr64(uv_blade_to_pnode
485 (uv_cpu_to_blade_id(cpu)),
486 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
487 stat->sflush, stat->dflush,
488 stat->retriesok, stat->nomsg,
489 stat->multmsg, stat->ntargeted);
496 * 0: display meaning of the statistics
499 static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
500 size_t count, loff_t *data)
505 if (count == 0 || count > sizeof(optstr))
507 if (copy_from_user(optstr, user, count))
509 optstr[count - 1] = '\0';
510 if (strict_strtoul(optstr, 10, &newmode) < 0) {
511 printk(KERN_DEBUG "%s is invalid\n", optstr);
516 printk(KERN_DEBUG "# cpu: cpu number\n");
518 "requestor: times this cpu was the flush requestor\n");
520 "requestee: times this cpu was requested to flush its TLBs\n");
522 "one: times requested to flush a single address\n");
524 "all: times requested to flush all TLB's\n");
526 "sretry: number of retries of source-side timeouts\n");
528 "dretry: number of retries of destination-side timeouts\n");
530 "ptc_i: times UV fell through to IPI-style flushes\n");
532 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
534 "sflush_us: cycles spent in uv_flush_tlb_others()\n");
536 "dflush_us: cycles spent in handling flush requests\n");
537 printk(KERN_DEBUG "sok: successes on retry\n");
538 printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
540 "dmult: interrupts with multiple messages\n");
541 printk(KERN_DEBUG "starget: nodes targeted\n");
543 uv_bau_retry_limit = newmode;
544 printk(KERN_DEBUG "timeout retry limit:%d\n",
551 static const struct seq_operations uv_ptc_seq_ops = {
552 .start = uv_ptc_seq_start,
553 .next = uv_ptc_seq_next,
554 .stop = uv_ptc_seq_stop,
555 .show = uv_ptc_seq_show
558 static int uv_ptc_proc_open(struct inode *inode, struct file *file)
560 return seq_open(file, &uv_ptc_seq_ops);
563 static const struct file_operations proc_uv_ptc_operations = {
564 .open = uv_ptc_proc_open,
566 .write = uv_ptc_proc_write,
568 .release = seq_release,
571 static int __init uv_ptc_init(void)
573 struct proc_dir_entry *proc_uv_ptc;
578 proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
580 printk(KERN_ERR "unable to create %s proc entry\n",
584 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
589 * begin the initialization of the per-blade control structures
591 static struct bau_control * __init uv_table_bases_init(int blade, int node)
594 struct bau_msg_status *msp;
595 struct bau_control *bau_tabp;
598 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
601 bau_tabp->msg_statuses =
602 kmalloc_node(sizeof(struct bau_msg_status) *
603 DEST_Q_SIZE, GFP_KERNEL, node);
604 BUG_ON(!bau_tabp->msg_statuses);
606 for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
607 bau_cpubits_clear(&msp->seen_by, (int)
608 uv_blade_nr_possible_cpus(blade));
610 uv_bau_table_bases[blade] = bau_tabp;
616 * finish the initialization of the per-blade control structures
619 uv_table_bases_finish(int blade, int node, int cur_cpu,
620 struct bau_control *bau_tablesp,
621 struct bau_desc *adp)
623 struct bau_control *bcp;
626 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) {
627 bcp = (struct bau_control *)&per_cpu(bau_control, i);
629 bcp->bau_msg_head = bau_tablesp->va_queue_first;
630 bcp->va_queue_first = bau_tablesp->va_queue_first;
631 bcp->va_queue_last = bau_tablesp->va_queue_last;
632 bcp->msg_statuses = bau_tablesp->msg_statuses;
633 bcp->descriptor_base = adp;
638 * initialize the sending side's sending buffers
640 static struct bau_desc * __init
641 uv_activation_descriptor_init(int node, int pnode)
647 unsigned long mmr_image;
648 struct bau_desc *adp;
649 struct bau_desc *ad2;
651 adp = (struct bau_desc *)
652 kmalloc_node(16384, GFP_KERNEL, node);
655 pa = __pa((unsigned long)adp);
659 mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
661 uv_write_global_mmr64(pnode, (unsigned long)
662 UVH_LB_BAU_SB_DESCRIPTOR_BASE,
663 (n << UV_DESC_BASE_PNODE_SHIFT | m));
666 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
667 memset(ad2, 0, sizeof(struct bau_desc));
668 ad2->header.sw_ack_flag = 1;
669 ad2->header.base_dest_nodeid =
670 uv_blade_to_pnode(uv_cpu_to_blade_id(0));
671 ad2->header.command = UV_NET_ENDPOINT_INTD;
672 ad2->header.int_both = 1;
674 * all others need to be set to zero:
675 * fairness chaining multilevel count replied_to
682 * initialize the destination side's receiving buffers
684 static struct bau_payload_queue_entry * __init
685 uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
687 struct bau_payload_queue_entry *pqp;
690 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
691 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
695 cp = (char *)pqp + 31;
696 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
697 bau_tablesp->va_queue_first = pqp;
698 uv_write_global_mmr64(pnode,
699 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
700 ((unsigned long)pnode <<
701 UV_PAYLOADQ_PNODE_SHIFT) |
702 uv_physnodeaddr(pqp));
703 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
704 uv_physnodeaddr(pqp));
705 bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
706 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
708 uv_physnodeaddr(bau_tablesp->va_queue_last));
709 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
715 * Initialization of each UV blade's structures
717 static int __init uv_init_blade(int blade, int node, int cur_cpu)
721 unsigned long apicid;
722 struct bau_desc *adp;
723 struct bau_payload_queue_entry *pqp;
724 struct bau_control *bau_tablesp;
726 bau_tablesp = uv_table_bases_init(blade, node);
727 pnode = uv_blade_to_pnode(blade);
728 adp = uv_activation_descriptor_init(node, pnode);
729 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
730 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp);
732 * the below initialization can't be in firmware because the
733 * messaging IRQ will be determined by the OS
735 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
736 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
737 if ((pa & 0xff) != UV_BAU_MESSAGE) {
738 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
739 ((apicid << 32) | UV_BAU_MESSAGE));
745 * Initialization of BAU-related structures
747 static int __init uv_bau_init(void)
758 uv_bau_retry_limit = 1;
759 uv_nshift = uv_hub_info->n_val;
760 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
763 for_each_online_node(node) {
764 blade = uv_node_to_blade_id(node);
765 if (blade == last_blade)
770 uv_bau_table_bases = (struct bau_control **)
771 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
772 BUG_ON(!uv_bau_table_bases);
775 for_each_online_node(node) {
776 blade = uv_node_to_blade_id(node);
777 if (blade == last_blade)
780 uv_init_blade(blade, node, cur_cpu);
781 cur_cpu += uv_blade_nr_possible_cpus(blade);
783 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
784 uv_enable_timeouts();
788 __initcall(uv_bau_init);
789 __initcall(uv_ptc_init);