4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/spinlock.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/mutex.h>
16 #include <linux/init.h>
17 #include <linux/hash.h>
18 #include <linux/list.h>
19 #include <linux/cpu.h>
25 * The ring buffer header is special. We must manually up keep it.
27 int ring_buffer_print_entry_header(struct trace_seq *s)
31 ret = trace_seq_printf(s, "# compressed entry header\n");
32 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
33 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
34 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
35 ret = trace_seq_printf(s, "\n");
36 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
37 RINGBUF_TYPE_PADDING);
38 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39 RINGBUF_TYPE_TIME_EXTEND);
40 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
41 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
47 * The ring buffer is made up of a list of pages. A separate list of pages is
48 * allocated for each CPU. A writer may only write to a buffer that is
49 * associated with the CPU it is currently executing on. A reader may read
50 * from any per cpu buffer.
52 * The reader is special. For each per cpu buffer, the reader has its own
53 * reader page. When a reader has read the entire reader page, this reader
54 * page is swapped with another page in the ring buffer.
56 * Now, as long as the writer is off the reader page, the reader can do what
57 * ever it wants with that page. The writer will never write to that page
58 * again (as long as it is out of the ring buffer).
60 * Here's some silly ASCII art.
63 * |reader| RING BUFFER
65 * +------+ +---+ +---+ +---+
74 * |reader| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
85 * |reader| RING BUFFER
86 * |page |------------------v
87 * +------+ +---+ +---+ +---+
92 * +------------------------------+
96 * |buffer| RING BUFFER
97 * |page |------------------v
98 * +------+ +---+ +---+ +---+
100 * | New +---+ +---+ +---+
103 * +------------------------------+
106 * After we make this swap, the reader can hand this page off to the splice
107 * code and be done with it. It can even allocate a new page if it needs to
108 * and swap that into the ring buffer.
110 * We will be using cmpxchg soon to make all this lockless.
115 * A fast way to enable or disable all ring buffers is to
116 * call tracing_on or tracing_off. Turning off the ring buffers
117 * prevents all ring buffers from being recorded to.
118 * Turning this switch on, makes it OK to write to the
119 * ring buffer, if the ring buffer is enabled itself.
121 * There's three layers that must be on in order to write
122 * to the ring buffer.
124 * 1) This global flag must be set.
125 * 2) The ring buffer must be enabled for recording.
126 * 3) The per cpu buffer must be enabled for recording.
128 * In case of an anomaly, this global flag has a bit set that
129 * will permantly disable all ring buffers.
133 * Global flag to disable all recording to ring buffers
134 * This has two bits: ON, DISABLED
138 * 0 0 : ring buffers are off
139 * 1 0 : ring buffers are on
140 * X 1 : ring buffers are permanently disabled
144 RB_BUFFERS_ON_BIT = 0,
145 RB_BUFFERS_DISABLED_BIT = 1,
149 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
150 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
153 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
155 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
158 * tracing_on - enable all tracing buffers
160 * This function enables all tracing buffers that may have been
161 * disabled with tracing_off.
163 void tracing_on(void)
165 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
167 EXPORT_SYMBOL_GPL(tracing_on);
170 * tracing_off - turn off all tracing buffers
172 * This function stops all tracing buffers from recording data.
173 * It does not disable any overhead the tracers themselves may
174 * be causing. This function simply causes all recording to
175 * the ring buffers to fail.
177 void tracing_off(void)
179 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181 EXPORT_SYMBOL_GPL(tracing_off);
184 * tracing_off_permanent - permanently disable ring buffers
186 * This function, once called, will disable all ring buffers
189 void tracing_off_permanent(void)
191 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
195 * tracing_is_on - show state of ring buffers enabled
197 int tracing_is_on(void)
199 return ring_buffer_flags == RB_BUFFERS_ON;
201 EXPORT_SYMBOL_GPL(tracing_is_on);
205 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
206 #define RB_ALIGNMENT 4U
207 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
209 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
213 RB_LEN_TIME_EXTEND = 8,
214 RB_LEN_TIME_STAMP = 16,
217 static inline int rb_null_event(struct ring_buffer_event *event)
219 return event->type_len == RINGBUF_TYPE_PADDING
220 && event->time_delta == 0;
223 static inline int rb_discarded_event(struct ring_buffer_event *event)
225 return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
228 static void rb_event_set_padding(struct ring_buffer_event *event)
230 event->type_len = RINGBUF_TYPE_PADDING;
231 event->time_delta = 0;
235 rb_event_data_length(struct ring_buffer_event *event)
240 length = event->type_len * RB_ALIGNMENT;
242 length = event->array[0];
243 return length + RB_EVNT_HDR_SIZE;
246 /* inline for ring buffer fast paths */
248 rb_event_length(struct ring_buffer_event *event)
250 switch (event->type_len) {
251 case RINGBUF_TYPE_PADDING:
252 if (rb_null_event(event))
255 return event->array[0] + RB_EVNT_HDR_SIZE;
257 case RINGBUF_TYPE_TIME_EXTEND:
258 return RB_LEN_TIME_EXTEND;
260 case RINGBUF_TYPE_TIME_STAMP:
261 return RB_LEN_TIME_STAMP;
263 case RINGBUF_TYPE_DATA:
264 return rb_event_data_length(event);
273 * ring_buffer_event_length - return the length of the event
274 * @event: the event to get the length of
276 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
278 unsigned length = rb_event_length(event);
279 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
281 length -= RB_EVNT_HDR_SIZE;
282 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283 length -= sizeof(event->array[0]);
286 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
288 /* inline for ring buffer fast paths */
290 rb_event_data(struct ring_buffer_event *event)
292 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
293 /* If length is in len field, then array[0] has the data */
295 return (void *)&event->array[0];
296 /* Otherwise length is in array[0] and array[1] has the data */
297 return (void *)&event->array[1];
301 * ring_buffer_event_data - return the data of the event
302 * @event: the event to get the data from
304 void *ring_buffer_event_data(struct ring_buffer_event *event)
306 return rb_event_data(event);
308 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
310 #define for_each_buffer_cpu(buffer, cpu) \
311 for_each_cpu(cpu, buffer->cpumask)
314 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
315 #define TS_DELTA_TEST (~TS_MASK)
317 struct buffer_data_page {
318 u64 time_stamp; /* page time stamp */
319 local_t commit; /* write committed index */
320 unsigned char data[]; /* data of buffer page */
324 struct list_head list; /* list of buffer pages */
325 local_t write; /* index for next write */
326 unsigned read; /* index for next read */
327 local_t entries; /* entries on this page */
328 struct buffer_data_page *page; /* Actual data page */
331 static void rb_init_page(struct buffer_data_page *bpage)
333 local_set(&bpage->commit, 0);
337 * ring_buffer_page_len - the size of data on the page.
338 * @page: The page to read
340 * Returns the amount of data on the page, including buffer page header.
342 size_t ring_buffer_page_len(void *page)
344 return local_read(&((struct buffer_data_page *)page)->commit)
349 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
352 static void free_buffer_page(struct buffer_page *bpage)
354 free_page((unsigned long)bpage->page);
359 * We need to fit the time_stamp delta into 27 bits.
361 static inline int test_time_stamp(u64 delta)
363 if (delta & TS_DELTA_TEST)
368 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
370 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
371 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
373 /* Max number of timestamps that can fit on a page */
374 #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
376 int ring_buffer_print_page_header(struct trace_seq *s)
378 struct buffer_data_page field;
381 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
382 "offset:0;\tsize:%u;\n",
383 (unsigned int)sizeof(field.time_stamp));
385 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
386 "offset:%u;\tsize:%u;\n",
387 (unsigned int)offsetof(typeof(field), commit),
388 (unsigned int)sizeof(field.commit));
390 ret = trace_seq_printf(s, "\tfield: char data;\t"
391 "offset:%u;\tsize:%u;\n",
392 (unsigned int)offsetof(typeof(field), data),
393 (unsigned int)BUF_PAGE_SIZE);
399 * head_page == tail_page && head == tail then buffer is empty.
401 struct ring_buffer_per_cpu {
403 struct ring_buffer *buffer;
404 spinlock_t reader_lock; /* serialize readers */
406 struct lock_class_key lock_key;
407 struct list_head pages;
408 struct buffer_page *head_page; /* read from head */
409 struct buffer_page *tail_page; /* write to tail */
410 struct buffer_page *commit_page; /* committed pages */
411 struct buffer_page *reader_page;
412 unsigned long nmi_dropped;
413 unsigned long commit_overrun;
414 unsigned long overrun;
419 atomic_t record_disabled;
426 atomic_t record_disabled;
427 cpumask_var_t cpumask;
431 struct ring_buffer_per_cpu **buffers;
433 #ifdef CONFIG_HOTPLUG_CPU
434 struct notifier_block cpu_notify;
439 struct ring_buffer_iter {
440 struct ring_buffer_per_cpu *cpu_buffer;
442 struct buffer_page *head_page;
446 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
447 #define RB_WARN_ON(buffer, cond) \
449 int _____ret = unlikely(cond); \
451 atomic_inc(&buffer->record_disabled); \
457 /* Up this if you want to test the TIME_EXTENTS and normalization */
458 #define DEBUG_SHIFT 0
460 static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
462 /* shift to debug/test normalization and TIME_EXTENTS */
463 return buffer->clock() << DEBUG_SHIFT;
466 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
470 preempt_disable_notrace();
471 time = rb_time_stamp(buffer, cpu);
472 preempt_enable_no_resched_notrace();
476 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
478 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
481 /* Just stupid testing the normalize function and deltas */
484 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
487 * check_pages - integrity check of buffer pages
488 * @cpu_buffer: CPU buffer with pages to test
490 * As a safety measure we check to make sure the data pages have not
493 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
495 struct list_head *head = &cpu_buffer->pages;
496 struct buffer_page *bpage, *tmp;
498 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
500 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
503 list_for_each_entry_safe(bpage, tmp, head, list) {
504 if (RB_WARN_ON(cpu_buffer,
505 bpage->list.next->prev != &bpage->list))
507 if (RB_WARN_ON(cpu_buffer,
508 bpage->list.prev->next != &bpage->list))
515 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
518 struct list_head *head = &cpu_buffer->pages;
519 struct buffer_page *bpage, *tmp;
524 for (i = 0; i < nr_pages; i++) {
525 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
526 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
529 list_add(&bpage->list, &pages);
531 addr = __get_free_page(GFP_KERNEL);
534 bpage->page = (void *)addr;
535 rb_init_page(bpage->page);
538 list_splice(&pages, head);
540 rb_check_pages(cpu_buffer);
545 list_for_each_entry_safe(bpage, tmp, &pages, list) {
546 list_del_init(&bpage->list);
547 free_buffer_page(bpage);
552 static struct ring_buffer_per_cpu *
553 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
555 struct ring_buffer_per_cpu *cpu_buffer;
556 struct buffer_page *bpage;
560 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
561 GFP_KERNEL, cpu_to_node(cpu));
565 cpu_buffer->cpu = cpu;
566 cpu_buffer->buffer = buffer;
567 spin_lock_init(&cpu_buffer->reader_lock);
568 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
569 INIT_LIST_HEAD(&cpu_buffer->pages);
571 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
572 GFP_KERNEL, cpu_to_node(cpu));
574 goto fail_free_buffer;
576 cpu_buffer->reader_page = bpage;
577 addr = __get_free_page(GFP_KERNEL);
579 goto fail_free_reader;
580 bpage->page = (void *)addr;
581 rb_init_page(bpage->page);
583 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
585 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
587 goto fail_free_reader;
589 cpu_buffer->head_page
590 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
591 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
596 free_buffer_page(cpu_buffer->reader_page);
603 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
605 struct list_head *head = &cpu_buffer->pages;
606 struct buffer_page *bpage, *tmp;
608 free_buffer_page(cpu_buffer->reader_page);
610 list_for_each_entry_safe(bpage, tmp, head, list) {
611 list_del_init(&bpage->list);
612 free_buffer_page(bpage);
618 * Causes compile errors if the struct buffer_page gets bigger
619 * than the struct page.
621 extern int ring_buffer_page_too_big(void);
623 #ifdef CONFIG_HOTPLUG_CPU
624 static int rb_cpu_notify(struct notifier_block *self,
625 unsigned long action, void *hcpu);
629 * ring_buffer_alloc - allocate a new ring_buffer
630 * @size: the size in bytes per cpu that is needed.
631 * @flags: attributes to set for the ring buffer.
633 * Currently the only flag that is available is the RB_FL_OVERWRITE
634 * flag. This flag means that the buffer will overwrite old data
635 * when the buffer wraps. If this flag is not set, the buffer will
636 * drop data when the tail hits the head.
638 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
640 struct ring_buffer *buffer;
644 /* Paranoid! Optimizes out when all is well */
645 if (sizeof(struct buffer_page) > sizeof(struct page))
646 ring_buffer_page_too_big();
649 /* keep it in its own cache line */
650 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
655 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
656 goto fail_free_buffer;
658 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
659 buffer->flags = flags;
660 buffer->clock = trace_clock_local;
662 /* need at least two pages */
663 if (buffer->pages == 1)
667 * In case of non-hotplug cpu, if the ring-buffer is allocated
668 * in early initcall, it will not be notified of secondary cpus.
669 * In that off case, we need to allocate for all possible cpus.
671 #ifdef CONFIG_HOTPLUG_CPU
673 cpumask_copy(buffer->cpumask, cpu_online_mask);
675 cpumask_copy(buffer->cpumask, cpu_possible_mask);
677 buffer->cpus = nr_cpu_ids;
679 bsize = sizeof(void *) * nr_cpu_ids;
680 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
682 if (!buffer->buffers)
683 goto fail_free_cpumask;
685 for_each_buffer_cpu(buffer, cpu) {
686 buffer->buffers[cpu] =
687 rb_allocate_cpu_buffer(buffer, cpu);
688 if (!buffer->buffers[cpu])
689 goto fail_free_buffers;
692 #ifdef CONFIG_HOTPLUG_CPU
693 buffer->cpu_notify.notifier_call = rb_cpu_notify;
694 buffer->cpu_notify.priority = 0;
695 register_cpu_notifier(&buffer->cpu_notify);
699 mutex_init(&buffer->mutex);
704 for_each_buffer_cpu(buffer, cpu) {
705 if (buffer->buffers[cpu])
706 rb_free_cpu_buffer(buffer->buffers[cpu]);
708 kfree(buffer->buffers);
711 free_cpumask_var(buffer->cpumask);
718 EXPORT_SYMBOL_GPL(ring_buffer_alloc);
721 * ring_buffer_free - free a ring buffer.
722 * @buffer: the buffer to free.
725 ring_buffer_free(struct ring_buffer *buffer)
731 #ifdef CONFIG_HOTPLUG_CPU
732 unregister_cpu_notifier(&buffer->cpu_notify);
735 for_each_buffer_cpu(buffer, cpu)
736 rb_free_cpu_buffer(buffer->buffers[cpu]);
740 free_cpumask_var(buffer->cpumask);
744 EXPORT_SYMBOL_GPL(ring_buffer_free);
746 void ring_buffer_set_clock(struct ring_buffer *buffer,
749 buffer->clock = clock;
752 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
755 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
757 struct buffer_page *bpage;
761 atomic_inc(&cpu_buffer->record_disabled);
764 for (i = 0; i < nr_pages; i++) {
765 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
767 p = cpu_buffer->pages.next;
768 bpage = list_entry(p, struct buffer_page, list);
769 list_del_init(&bpage->list);
770 free_buffer_page(bpage);
772 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
775 rb_reset_cpu(cpu_buffer);
777 rb_check_pages(cpu_buffer);
779 atomic_dec(&cpu_buffer->record_disabled);
784 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
785 struct list_head *pages, unsigned nr_pages)
787 struct buffer_page *bpage;
791 atomic_inc(&cpu_buffer->record_disabled);
794 for (i = 0; i < nr_pages; i++) {
795 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
798 bpage = list_entry(p, struct buffer_page, list);
799 list_del_init(&bpage->list);
800 list_add_tail(&bpage->list, &cpu_buffer->pages);
802 rb_reset_cpu(cpu_buffer);
804 rb_check_pages(cpu_buffer);
806 atomic_dec(&cpu_buffer->record_disabled);
810 * ring_buffer_resize - resize the ring buffer
811 * @buffer: the buffer to resize.
812 * @size: the new size.
814 * The tracer is responsible for making sure that the buffer is
815 * not being used while changing the size.
816 * Note: We may be able to change the above requirement by using
817 * RCU synchronizations.
819 * Minimum size is 2 * BUF_PAGE_SIZE.
821 * Returns -1 on failure.
823 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
825 struct ring_buffer_per_cpu *cpu_buffer;
826 unsigned nr_pages, rm_pages, new_pages;
827 struct buffer_page *bpage, *tmp;
828 unsigned long buffer_size;
834 * Always succeed at resizing a non-existent buffer:
839 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
840 size *= BUF_PAGE_SIZE;
841 buffer_size = buffer->pages * BUF_PAGE_SIZE;
843 /* we need a minimum of two pages */
844 if (size < BUF_PAGE_SIZE * 2)
845 size = BUF_PAGE_SIZE * 2;
847 if (size == buffer_size)
850 mutex_lock(&buffer->mutex);
853 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
855 if (size < buffer_size) {
857 /* easy case, just free pages */
858 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
861 rm_pages = buffer->pages - nr_pages;
863 for_each_buffer_cpu(buffer, cpu) {
864 cpu_buffer = buffer->buffers[cpu];
865 rb_remove_pages(cpu_buffer, rm_pages);
871 * This is a bit more difficult. We only want to add pages
872 * when we can allocate enough for all CPUs. We do this
873 * by allocating all the pages and storing them on a local
874 * link list. If we succeed in our allocation, then we
875 * add these pages to the cpu_buffers. Otherwise we just free
876 * them all and return -ENOMEM;
878 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
881 new_pages = nr_pages - buffer->pages;
883 for_each_buffer_cpu(buffer, cpu) {
884 for (i = 0; i < new_pages; i++) {
885 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
887 GFP_KERNEL, cpu_to_node(cpu));
890 list_add(&bpage->list, &pages);
891 addr = __get_free_page(GFP_KERNEL);
894 bpage->page = (void *)addr;
895 rb_init_page(bpage->page);
899 for_each_buffer_cpu(buffer, cpu) {
900 cpu_buffer = buffer->buffers[cpu];
901 rb_insert_pages(cpu_buffer, &pages, new_pages);
904 if (RB_WARN_ON(buffer, !list_empty(&pages)))
908 buffer->pages = nr_pages;
910 mutex_unlock(&buffer->mutex);
915 list_for_each_entry_safe(bpage, tmp, &pages, list) {
916 list_del_init(&bpage->list);
917 free_buffer_page(bpage);
920 mutex_unlock(&buffer->mutex);
924 * Something went totally wrong, and we are too paranoid
925 * to even clean up the mess.
929 mutex_unlock(&buffer->mutex);
932 EXPORT_SYMBOL_GPL(ring_buffer_resize);
935 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
937 return bpage->data + index;
940 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
942 return bpage->page->data + index;
945 static inline struct ring_buffer_event *
946 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
948 return __rb_page_index(cpu_buffer->reader_page,
949 cpu_buffer->reader_page->read);
952 static inline struct ring_buffer_event *
953 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
955 return __rb_page_index(cpu_buffer->head_page,
956 cpu_buffer->head_page->read);
959 static inline struct ring_buffer_event *
960 rb_iter_head_event(struct ring_buffer_iter *iter)
962 return __rb_page_index(iter->head_page, iter->head);
965 static inline unsigned rb_page_write(struct buffer_page *bpage)
967 return local_read(&bpage->write);
970 static inline unsigned rb_page_commit(struct buffer_page *bpage)
972 return local_read(&bpage->page->commit);
975 /* Size is determined by what has been commited */
976 static inline unsigned rb_page_size(struct buffer_page *bpage)
978 return rb_page_commit(bpage);
981 static inline unsigned
982 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
984 return rb_page_commit(cpu_buffer->commit_page);
987 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
989 return rb_page_commit(cpu_buffer->head_page);
992 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
993 struct buffer_page **bpage)
995 struct list_head *p = (*bpage)->list.next;
997 if (p == &cpu_buffer->pages)
1000 *bpage = list_entry(p, struct buffer_page, list);
1003 static inline unsigned
1004 rb_event_index(struct ring_buffer_event *event)
1006 unsigned long addr = (unsigned long)event;
1008 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
1012 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1013 struct ring_buffer_event *event)
1015 unsigned long addr = (unsigned long)event;
1016 unsigned long index;
1018 index = rb_event_index(event);
1021 return cpu_buffer->commit_page->page == (void *)addr &&
1022 rb_commit_index(cpu_buffer) == index;
1026 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1027 struct ring_buffer_event *event)
1029 unsigned long addr = (unsigned long)event;
1030 unsigned long index;
1032 index = rb_event_index(event);
1035 while (cpu_buffer->commit_page->page != (void *)addr) {
1036 if (RB_WARN_ON(cpu_buffer,
1037 cpu_buffer->commit_page == cpu_buffer->tail_page))
1039 cpu_buffer->commit_page->page->commit =
1040 cpu_buffer->commit_page->write;
1041 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1042 cpu_buffer->write_stamp =
1043 cpu_buffer->commit_page->page->time_stamp;
1046 /* Now set the commit to the event's index */
1047 local_set(&cpu_buffer->commit_page->page->commit, index);
1051 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1054 * We only race with interrupts and NMIs on this CPU.
1055 * If we own the commit event, then we can commit
1056 * all others that interrupted us, since the interruptions
1057 * are in stack format (they finish before they come
1058 * back to us). This allows us to do a simple loop to
1059 * assign the commit to the tail.
1062 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1063 cpu_buffer->commit_page->page->commit =
1064 cpu_buffer->commit_page->write;
1065 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1066 cpu_buffer->write_stamp =
1067 cpu_buffer->commit_page->page->time_stamp;
1068 /* add barrier to keep gcc from optimizing too much */
1071 while (rb_commit_index(cpu_buffer) !=
1072 rb_page_write(cpu_buffer->commit_page)) {
1073 cpu_buffer->commit_page->page->commit =
1074 cpu_buffer->commit_page->write;
1078 /* again, keep gcc from optimizing */
1082 * If an interrupt came in just after the first while loop
1083 * and pushed the tail page forward, we will be left with
1084 * a dangling commit that will never go forward.
1086 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1090 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1092 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1093 cpu_buffer->reader_page->read = 0;
1096 static void rb_inc_iter(struct ring_buffer_iter *iter)
1098 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1101 * The iterator could be on the reader page (it starts there).
1102 * But the head could have moved, since the reader was
1103 * found. Check for this case and assign the iterator
1104 * to the head page instead of next.
1106 if (iter->head_page == cpu_buffer->reader_page)
1107 iter->head_page = cpu_buffer->head_page;
1109 rb_inc_page(cpu_buffer, &iter->head_page);
1111 iter->read_stamp = iter->head_page->page->time_stamp;
1116 * ring_buffer_update_event - update event type and data
1117 * @event: the even to update
1118 * @type: the type of event
1119 * @length: the size of the event field in the ring buffer
1121 * Update the type and data fields of the event. The length
1122 * is the actual size that is written to the ring buffer,
1123 * and with this, we can determine what to place into the
1127 rb_update_event(struct ring_buffer_event *event,
1128 unsigned type, unsigned length)
1130 event->type_len = type;
1134 case RINGBUF_TYPE_PADDING:
1135 case RINGBUF_TYPE_TIME_EXTEND:
1136 case RINGBUF_TYPE_TIME_STAMP:
1140 length -= RB_EVNT_HDR_SIZE;
1141 if (length > RB_MAX_SMALL_DATA)
1142 event->array[0] = length;
1144 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1151 static unsigned rb_calculate_event_length(unsigned length)
1153 struct ring_buffer_event event; /* Used only for sizeof array */
1155 /* zero length can cause confusions */
1159 if (length > RB_MAX_SMALL_DATA)
1160 length += sizeof(event.array[0]);
1162 length += RB_EVNT_HDR_SIZE;
1163 length = ALIGN(length, RB_ALIGNMENT);
1169 static struct ring_buffer_event *
1170 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1171 unsigned long length, unsigned long tail,
1172 struct buffer_page *commit_page,
1173 struct buffer_page *tail_page, u64 *ts)
1175 struct buffer_page *next_page, *head_page, *reader_page;
1176 struct ring_buffer *buffer = cpu_buffer->buffer;
1177 struct ring_buffer_event *event;
1178 bool lock_taken = false;
1179 unsigned long flags;
1181 next_page = tail_page;
1183 local_irq_save(flags);
1185 * Since the write to the buffer is still not
1186 * fully lockless, we must be careful with NMIs.
1187 * The locks in the writers are taken when a write
1188 * crosses to a new page. The locks protect against
1189 * races with the readers (this will soon be fixed
1190 * with a lockless solution).
1192 * Because we can not protect against NMIs, and we
1193 * want to keep traces reentrant, we need to manage
1194 * what happens when we are in an NMI.
1196 * NMIs can happen after we take the lock.
1197 * If we are in an NMI, only take the lock
1198 * if it is not already taken. Otherwise
1201 if (unlikely(in_nmi())) {
1202 if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1203 cpu_buffer->nmi_dropped++;
1207 __raw_spin_lock(&cpu_buffer->lock);
1211 rb_inc_page(cpu_buffer, &next_page);
1213 head_page = cpu_buffer->head_page;
1214 reader_page = cpu_buffer->reader_page;
1216 /* we grabbed the lock before incrementing */
1217 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1221 * If for some reason, we had an interrupt storm that made
1222 * it all the way around the buffer, bail, and warn
1225 if (unlikely(next_page == commit_page)) {
1226 cpu_buffer->commit_overrun++;
1230 if (next_page == head_page) {
1231 if (!(buffer->flags & RB_FL_OVERWRITE))
1234 /* tail_page has not moved yet? */
1235 if (tail_page == cpu_buffer->tail_page) {
1236 /* count overflows */
1237 cpu_buffer->overrun +=
1238 local_read(&head_page->entries);
1240 rb_inc_page(cpu_buffer, &head_page);
1241 cpu_buffer->head_page = head_page;
1242 cpu_buffer->head_page->read = 0;
1247 * If the tail page is still the same as what we think
1248 * it is, then it is up to us to update the tail
1251 if (tail_page == cpu_buffer->tail_page) {
1252 local_set(&next_page->write, 0);
1253 local_set(&next_page->entries, 0);
1254 local_set(&next_page->page->commit, 0);
1255 cpu_buffer->tail_page = next_page;
1257 /* reread the time stamp */
1258 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
1259 cpu_buffer->tail_page->page->time_stamp = *ts;
1263 * The actual tail page has moved forward.
1265 if (tail < BUF_PAGE_SIZE) {
1266 /* Mark the rest of the page with padding */
1267 event = __rb_page_index(tail_page, tail);
1268 rb_event_set_padding(event);
1271 /* Set the write back to the previous setting */
1272 local_sub(length, &tail_page->write);
1275 * If this was a commit entry that failed,
1276 * increment that too
1278 if (tail_page == cpu_buffer->commit_page &&
1279 tail == rb_commit_index(cpu_buffer)) {
1280 rb_set_commit_to_write(cpu_buffer);
1283 __raw_spin_unlock(&cpu_buffer->lock);
1284 local_irq_restore(flags);
1286 /* fail and let the caller try again */
1287 return ERR_PTR(-EAGAIN);
1291 local_sub(length, &tail_page->write);
1293 if (likely(lock_taken))
1294 __raw_spin_unlock(&cpu_buffer->lock);
1295 local_irq_restore(flags);
1299 static struct ring_buffer_event *
1300 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1301 unsigned type, unsigned long length, u64 *ts)
1303 struct buffer_page *tail_page, *commit_page;
1304 struct ring_buffer_event *event;
1305 unsigned long tail, write;
1307 commit_page = cpu_buffer->commit_page;
1308 /* we just need to protect against interrupts */
1310 tail_page = cpu_buffer->tail_page;
1311 write = local_add_return(length, &tail_page->write);
1312 tail = write - length;
1314 /* See if we shot pass the end of this buffer page */
1315 if (write > BUF_PAGE_SIZE)
1316 return rb_move_tail(cpu_buffer, length, tail,
1317 commit_page, tail_page, ts);
1319 /* We reserved something on the buffer */
1321 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1324 event = __rb_page_index(tail_page, tail);
1325 rb_update_event(event, type, length);
1327 /* The passed in type is zero for DATA */
1329 local_inc(&tail_page->entries);
1332 * If this is a commit and the tail is zero, then update
1333 * this page's time stamp.
1335 if (!tail && rb_is_commit(cpu_buffer, event))
1336 cpu_buffer->commit_page->page->time_stamp = *ts;
1342 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1343 struct ring_buffer_event *event)
1345 unsigned long new_index, old_index;
1346 struct buffer_page *bpage;
1347 unsigned long index;
1350 new_index = rb_event_index(event);
1351 old_index = new_index + rb_event_length(event);
1352 addr = (unsigned long)event;
1355 bpage = cpu_buffer->tail_page;
1357 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1359 * This is on the tail page. It is possible that
1360 * a write could come in and move the tail page
1361 * and write to the next page. That is fine
1362 * because we just shorten what is on this page.
1364 index = local_cmpxchg(&bpage->write, old_index, new_index);
1365 if (index == old_index)
1369 /* could not discard */
1374 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1375 u64 *ts, u64 *delta)
1377 struct ring_buffer_event *event;
1381 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1382 printk(KERN_WARNING "Delta way too big! %llu"
1383 " ts=%llu write stamp = %llu\n",
1384 (unsigned long long)*delta,
1385 (unsigned long long)*ts,
1386 (unsigned long long)cpu_buffer->write_stamp);
1391 * The delta is too big, we to add a
1394 event = __rb_reserve_next(cpu_buffer,
1395 RINGBUF_TYPE_TIME_EXTEND,
1401 if (PTR_ERR(event) == -EAGAIN)
1404 /* Only a commited time event can update the write stamp */
1405 if (rb_is_commit(cpu_buffer, event)) {
1407 * If this is the first on the page, then we need to
1408 * update the page itself, and just put in a zero.
1410 if (rb_event_index(event)) {
1411 event->time_delta = *delta & TS_MASK;
1412 event->array[0] = *delta >> TS_SHIFT;
1414 cpu_buffer->commit_page->page->time_stamp = *ts;
1415 /* try to discard, since we do not need this */
1416 if (!rb_try_to_discard(cpu_buffer, event)) {
1417 /* nope, just zero it */
1418 event->time_delta = 0;
1419 event->array[0] = 0;
1422 cpu_buffer->write_stamp = *ts;
1423 /* let the caller know this was the commit */
1426 /* Try to discard the event */
1427 if (!rb_try_to_discard(cpu_buffer, event)) {
1428 /* Darn, this is just wasted space */
1429 event->time_delta = 0;
1430 event->array[0] = 0;
1440 static struct ring_buffer_event *
1441 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1442 unsigned long length)
1444 struct ring_buffer_event *event;
1449 length = rb_calculate_event_length(length);
1452 * We allow for interrupts to reenter here and do a trace.
1453 * If one does, it will cause this original code to loop
1454 * back here. Even with heavy interrupts happening, this
1455 * should only happen a few times in a row. If this happens
1456 * 1000 times in a row, there must be either an interrupt
1457 * storm or we have something buggy.
1460 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1463 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
1466 * Only the first commit can update the timestamp.
1467 * Yes there is a race here. If an interrupt comes in
1468 * just after the conditional and it traces too, then it
1469 * will also check the deltas. More than one timestamp may
1470 * also be made. But only the entry that did the actual
1471 * commit will be something other than zero.
1473 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1474 rb_page_write(cpu_buffer->tail_page) ==
1475 rb_commit_index(cpu_buffer))) {
1478 diff = ts - cpu_buffer->write_stamp;
1480 /* make sure this diff is calculated here */
1483 /* Did the write stamp get updated already? */
1484 if (unlikely(ts < cpu_buffer->write_stamp))
1488 if (unlikely(test_time_stamp(delta))) {
1490 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1491 if (commit == -EBUSY)
1494 if (commit == -EAGAIN)
1497 RB_WARN_ON(cpu_buffer, commit < 0);
1502 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
1503 if (unlikely(PTR_ERR(event) == -EAGAIN))
1507 if (unlikely(commit))
1509 * Ouch! We needed a timestamp and it was commited. But
1510 * we didn't get our event reserved.
1512 rb_set_commit_to_write(cpu_buffer);
1517 * If the timestamp was commited, make the commit our entry
1518 * now so that we will update it when needed.
1520 if (unlikely(commit))
1521 rb_set_commit_event(cpu_buffer, event);
1522 else if (!rb_is_commit(cpu_buffer, event))
1525 event->time_delta = delta;
1530 #define TRACE_RECURSIVE_DEPTH 16
1532 static int trace_recursive_lock(void)
1534 current->trace_recursion++;
1536 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1539 /* Disable all tracing before we do anything else */
1540 tracing_off_permanent();
1542 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
1543 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1544 current->trace_recursion,
1545 hardirq_count() >> HARDIRQ_SHIFT,
1546 softirq_count() >> SOFTIRQ_SHIFT,
1553 static void trace_recursive_unlock(void)
1555 WARN_ON_ONCE(!current->trace_recursion);
1557 current->trace_recursion--;
1560 static DEFINE_PER_CPU(int, rb_need_resched);
1563 * ring_buffer_lock_reserve - reserve a part of the buffer
1564 * @buffer: the ring buffer to reserve from
1565 * @length: the length of the data to reserve (excluding event header)
1567 * Returns a reseverd event on the ring buffer to copy directly to.
1568 * The user of this interface will need to get the body to write into
1569 * and can use the ring_buffer_event_data() interface.
1571 * The length is the length of the data needed, not the event length
1572 * which also includes the event header.
1574 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1575 * If NULL is returned, then nothing has been allocated or locked.
1577 struct ring_buffer_event *
1578 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1580 struct ring_buffer_per_cpu *cpu_buffer;
1581 struct ring_buffer_event *event;
1584 if (ring_buffer_flags != RB_BUFFERS_ON)
1587 if (atomic_read(&buffer->record_disabled))
1590 /* If we are tracing schedule, we don't want to recurse */
1591 resched = ftrace_preempt_disable();
1593 if (trace_recursive_lock())
1596 cpu = raw_smp_processor_id();
1598 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1601 cpu_buffer = buffer->buffers[cpu];
1603 if (atomic_read(&cpu_buffer->record_disabled))
1606 if (length > BUF_MAX_DATA_SIZE)
1609 event = rb_reserve_next_event(cpu_buffer, length);
1614 * Need to store resched state on this cpu.
1615 * Only the first needs to.
1618 if (preempt_count() == 1)
1619 per_cpu(rb_need_resched, cpu) = resched;
1624 trace_recursive_unlock();
1627 ftrace_preempt_enable(resched);
1630 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1632 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1633 struct ring_buffer_event *event)
1635 local_inc(&cpu_buffer->entries);
1637 /* Only process further if we own the commit */
1638 if (!rb_is_commit(cpu_buffer, event))
1641 cpu_buffer->write_stamp += event->time_delta;
1643 rb_set_commit_to_write(cpu_buffer);
1647 * ring_buffer_unlock_commit - commit a reserved
1648 * @buffer: The buffer to commit to
1649 * @event: The event pointer to commit.
1651 * This commits the data to the ring buffer, and releases any locks held.
1653 * Must be paired with ring_buffer_lock_reserve.
1655 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1656 struct ring_buffer_event *event)
1658 struct ring_buffer_per_cpu *cpu_buffer;
1659 int cpu = raw_smp_processor_id();
1661 cpu_buffer = buffer->buffers[cpu];
1663 rb_commit(cpu_buffer, event);
1665 trace_recursive_unlock();
1668 * Only the last preempt count needs to restore preemption.
1670 if (preempt_count() == 1)
1671 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1673 preempt_enable_no_resched_notrace();
1677 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1679 static inline void rb_event_discard(struct ring_buffer_event *event)
1681 /* array[0] holds the actual length for the discarded event */
1682 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1683 event->type_len = RINGBUF_TYPE_PADDING;
1684 /* time delta must be non zero */
1685 if (!event->time_delta)
1686 event->time_delta = 1;
1690 * ring_buffer_event_discard - discard any event in the ring buffer
1691 * @event: the event to discard
1693 * Sometimes a event that is in the ring buffer needs to be ignored.
1694 * This function lets the user discard an event in the ring buffer
1695 * and then that event will not be read later.
1697 * Note, it is up to the user to be careful with this, and protect
1698 * against races. If the user discards an event that has been consumed
1699 * it is possible that it could corrupt the ring buffer.
1701 void ring_buffer_event_discard(struct ring_buffer_event *event)
1703 rb_event_discard(event);
1705 EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1708 * ring_buffer_commit_discard - discard an event that has not been committed
1709 * @buffer: the ring buffer
1710 * @event: non committed event to discard
1712 * This is similar to ring_buffer_event_discard but must only be
1713 * performed on an event that has not been committed yet. The difference
1714 * is that this will also try to free the event from the ring buffer
1715 * if another event has not been added behind it.
1717 * If another event has been added behind it, it will set the event
1718 * up as discarded, and perform the commit.
1720 * If this function is called, do not call ring_buffer_unlock_commit on
1723 void ring_buffer_discard_commit(struct ring_buffer *buffer,
1724 struct ring_buffer_event *event)
1726 struct ring_buffer_per_cpu *cpu_buffer;
1729 /* The event is discarded regardless */
1730 rb_event_discard(event);
1733 * This must only be called if the event has not been
1734 * committed yet. Thus we can assume that preemption
1735 * is still disabled.
1737 RB_WARN_ON(buffer, preemptible());
1739 cpu = smp_processor_id();
1740 cpu_buffer = buffer->buffers[cpu];
1742 if (!rb_try_to_discard(cpu_buffer, event))
1746 * The commit is still visible by the reader, so we
1747 * must increment entries.
1749 local_inc(&cpu_buffer->entries);
1752 * If a write came in and pushed the tail page
1753 * we still need to update the commit pointer
1754 * if we were the commit.
1756 if (rb_is_commit(cpu_buffer, event))
1757 rb_set_commit_to_write(cpu_buffer);
1759 trace_recursive_unlock();
1762 * Only the last preempt count needs to restore preemption.
1764 if (preempt_count() == 1)
1765 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1767 preempt_enable_no_resched_notrace();
1770 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1773 * ring_buffer_write - write data to the buffer without reserving
1774 * @buffer: The ring buffer to write to.
1775 * @length: The length of the data being written (excluding the event header)
1776 * @data: The data to write to the buffer.
1778 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1779 * one function. If you already have the data to write to the buffer, it
1780 * may be easier to simply call this function.
1782 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1783 * and not the length of the event which would hold the header.
1785 int ring_buffer_write(struct ring_buffer *buffer,
1786 unsigned long length,
1789 struct ring_buffer_per_cpu *cpu_buffer;
1790 struct ring_buffer_event *event;
1795 if (ring_buffer_flags != RB_BUFFERS_ON)
1798 if (atomic_read(&buffer->record_disabled))
1801 resched = ftrace_preempt_disable();
1803 cpu = raw_smp_processor_id();
1805 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1808 cpu_buffer = buffer->buffers[cpu];
1810 if (atomic_read(&cpu_buffer->record_disabled))
1813 if (length > BUF_MAX_DATA_SIZE)
1816 event = rb_reserve_next_event(cpu_buffer, length);
1820 body = rb_event_data(event);
1822 memcpy(body, data, length);
1824 rb_commit(cpu_buffer, event);
1828 ftrace_preempt_enable(resched);
1832 EXPORT_SYMBOL_GPL(ring_buffer_write);
1834 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1836 struct buffer_page *reader = cpu_buffer->reader_page;
1837 struct buffer_page *head = cpu_buffer->head_page;
1838 struct buffer_page *commit = cpu_buffer->commit_page;
1840 return reader->read == rb_page_commit(reader) &&
1841 (commit == reader ||
1843 head->read == rb_page_commit(commit)));
1847 * ring_buffer_record_disable - stop all writes into the buffer
1848 * @buffer: The ring buffer to stop writes to.
1850 * This prevents all writes to the buffer. Any attempt to write
1851 * to the buffer after this will fail and return NULL.
1853 * The caller should call synchronize_sched() after this.
1855 void ring_buffer_record_disable(struct ring_buffer *buffer)
1857 atomic_inc(&buffer->record_disabled);
1859 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1862 * ring_buffer_record_enable - enable writes to the buffer
1863 * @buffer: The ring buffer to enable writes
1865 * Note, multiple disables will need the same number of enables
1866 * to truely enable the writing (much like preempt_disable).
1868 void ring_buffer_record_enable(struct ring_buffer *buffer)
1870 atomic_dec(&buffer->record_disabled);
1872 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1875 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1876 * @buffer: The ring buffer to stop writes to.
1877 * @cpu: The CPU buffer to stop
1879 * This prevents all writes to the buffer. Any attempt to write
1880 * to the buffer after this will fail and return NULL.
1882 * The caller should call synchronize_sched() after this.
1884 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1886 struct ring_buffer_per_cpu *cpu_buffer;
1888 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1891 cpu_buffer = buffer->buffers[cpu];
1892 atomic_inc(&cpu_buffer->record_disabled);
1894 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1897 * ring_buffer_record_enable_cpu - enable writes to the buffer
1898 * @buffer: The ring buffer to enable writes
1899 * @cpu: The CPU to enable.
1901 * Note, multiple disables will need the same number of enables
1902 * to truely enable the writing (much like preempt_disable).
1904 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1906 struct ring_buffer_per_cpu *cpu_buffer;
1908 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1911 cpu_buffer = buffer->buffers[cpu];
1912 atomic_dec(&cpu_buffer->record_disabled);
1914 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1917 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1918 * @buffer: The ring buffer
1919 * @cpu: The per CPU buffer to get the entries from.
1921 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1923 struct ring_buffer_per_cpu *cpu_buffer;
1926 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1929 cpu_buffer = buffer->buffers[cpu];
1930 ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1935 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1938 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1939 * @buffer: The ring buffer
1940 * @cpu: The per CPU buffer to get the number of overruns from
1942 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1944 struct ring_buffer_per_cpu *cpu_buffer;
1947 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1950 cpu_buffer = buffer->buffers[cpu];
1951 ret = cpu_buffer->overrun;
1955 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1958 * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
1959 * @buffer: The ring buffer
1960 * @cpu: The per CPU buffer to get the number of overruns from
1962 unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
1964 struct ring_buffer_per_cpu *cpu_buffer;
1967 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1970 cpu_buffer = buffer->buffers[cpu];
1971 ret = cpu_buffer->nmi_dropped;
1975 EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
1978 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
1979 * @buffer: The ring buffer
1980 * @cpu: The per CPU buffer to get the number of overruns from
1983 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
1985 struct ring_buffer_per_cpu *cpu_buffer;
1988 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1991 cpu_buffer = buffer->buffers[cpu];
1992 ret = cpu_buffer->commit_overrun;
1996 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
1999 * ring_buffer_entries - get the number of entries in a buffer
2000 * @buffer: The ring buffer
2002 * Returns the total number of entries in the ring buffer
2005 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2007 struct ring_buffer_per_cpu *cpu_buffer;
2008 unsigned long entries = 0;
2011 /* if you care about this being correct, lock the buffer */
2012 for_each_buffer_cpu(buffer, cpu) {
2013 cpu_buffer = buffer->buffers[cpu];
2014 entries += (local_read(&cpu_buffer->entries) -
2015 cpu_buffer->overrun) - cpu_buffer->read;
2020 EXPORT_SYMBOL_GPL(ring_buffer_entries);
2023 * ring_buffer_overrun_cpu - get the number of overruns in buffer
2024 * @buffer: The ring buffer
2026 * Returns the total number of overruns in the ring buffer
2029 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2031 struct ring_buffer_per_cpu *cpu_buffer;
2032 unsigned long overruns = 0;
2035 /* if you care about this being correct, lock the buffer */
2036 for_each_buffer_cpu(buffer, cpu) {
2037 cpu_buffer = buffer->buffers[cpu];
2038 overruns += cpu_buffer->overrun;
2043 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2045 static void rb_iter_reset(struct ring_buffer_iter *iter)
2047 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2049 /* Iterator usage is expected to have record disabled */
2050 if (list_empty(&cpu_buffer->reader_page->list)) {
2051 iter->head_page = cpu_buffer->head_page;
2052 iter->head = cpu_buffer->head_page->read;
2054 iter->head_page = cpu_buffer->reader_page;
2055 iter->head = cpu_buffer->reader_page->read;
2058 iter->read_stamp = cpu_buffer->read_stamp;
2060 iter->read_stamp = iter->head_page->page->time_stamp;
2064 * ring_buffer_iter_reset - reset an iterator
2065 * @iter: The iterator to reset
2067 * Resets the iterator, so that it will start from the beginning
2070 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2072 struct ring_buffer_per_cpu *cpu_buffer;
2073 unsigned long flags;
2078 cpu_buffer = iter->cpu_buffer;
2080 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2081 rb_iter_reset(iter);
2082 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2084 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2087 * ring_buffer_iter_empty - check if an iterator has no more to read
2088 * @iter: The iterator to check
2090 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2092 struct ring_buffer_per_cpu *cpu_buffer;
2094 cpu_buffer = iter->cpu_buffer;
2096 return iter->head_page == cpu_buffer->commit_page &&
2097 iter->head == rb_commit_index(cpu_buffer);
2099 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2102 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2103 struct ring_buffer_event *event)
2107 switch (event->type_len) {
2108 case RINGBUF_TYPE_PADDING:
2111 case RINGBUF_TYPE_TIME_EXTEND:
2112 delta = event->array[0];
2114 delta += event->time_delta;
2115 cpu_buffer->read_stamp += delta;
2118 case RINGBUF_TYPE_TIME_STAMP:
2119 /* FIXME: not implemented */
2122 case RINGBUF_TYPE_DATA:
2123 cpu_buffer->read_stamp += event->time_delta;
2133 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2134 struct ring_buffer_event *event)
2138 switch (event->type_len) {
2139 case RINGBUF_TYPE_PADDING:
2142 case RINGBUF_TYPE_TIME_EXTEND:
2143 delta = event->array[0];
2145 delta += event->time_delta;
2146 iter->read_stamp += delta;
2149 case RINGBUF_TYPE_TIME_STAMP:
2150 /* FIXME: not implemented */
2153 case RINGBUF_TYPE_DATA:
2154 iter->read_stamp += event->time_delta;
2163 static struct buffer_page *
2164 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2166 struct buffer_page *reader = NULL;
2167 unsigned long flags;
2170 local_irq_save(flags);
2171 __raw_spin_lock(&cpu_buffer->lock);
2175 * This should normally only loop twice. But because the
2176 * start of the reader inserts an empty page, it causes
2177 * a case where we will loop three times. There should be no
2178 * reason to loop four times (that I know of).
2180 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2185 reader = cpu_buffer->reader_page;
2187 /* If there's more to read, return this page */
2188 if (cpu_buffer->reader_page->read < rb_page_size(reader))
2191 /* Never should we have an index greater than the size */
2192 if (RB_WARN_ON(cpu_buffer,
2193 cpu_buffer->reader_page->read > rb_page_size(reader)))
2196 /* check if we caught up to the tail */
2198 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2202 * Splice the empty reader page into the list around the head.
2203 * Reset the reader page to size zero.
2206 reader = cpu_buffer->head_page;
2207 cpu_buffer->reader_page->list.next = reader->list.next;
2208 cpu_buffer->reader_page->list.prev = reader->list.prev;
2210 local_set(&cpu_buffer->reader_page->write, 0);
2211 local_set(&cpu_buffer->reader_page->entries, 0);
2212 local_set(&cpu_buffer->reader_page->page->commit, 0);
2214 /* Make the reader page now replace the head */
2215 reader->list.prev->next = &cpu_buffer->reader_page->list;
2216 reader->list.next->prev = &cpu_buffer->reader_page->list;
2219 * If the tail is on the reader, then we must set the head
2220 * to the inserted page, otherwise we set it one before.
2222 cpu_buffer->head_page = cpu_buffer->reader_page;
2224 if (cpu_buffer->commit_page != reader)
2225 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2227 /* Finally update the reader page to the new head */
2228 cpu_buffer->reader_page = reader;
2229 rb_reset_reader_page(cpu_buffer);
2234 __raw_spin_unlock(&cpu_buffer->lock);
2235 local_irq_restore(flags);
2240 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2242 struct ring_buffer_event *event;
2243 struct buffer_page *reader;
2246 reader = rb_get_reader_page(cpu_buffer);
2248 /* This function should not be called when buffer is empty */
2249 if (RB_WARN_ON(cpu_buffer, !reader))
2252 event = rb_reader_event(cpu_buffer);
2254 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2255 || rb_discarded_event(event))
2258 rb_update_read_stamp(cpu_buffer, event);
2260 length = rb_event_length(event);
2261 cpu_buffer->reader_page->read += length;
2264 static void rb_advance_iter(struct ring_buffer_iter *iter)
2266 struct ring_buffer *buffer;
2267 struct ring_buffer_per_cpu *cpu_buffer;
2268 struct ring_buffer_event *event;
2271 cpu_buffer = iter->cpu_buffer;
2272 buffer = cpu_buffer->buffer;
2275 * Check if we are at the end of the buffer.
2277 if (iter->head >= rb_page_size(iter->head_page)) {
2278 /* discarded commits can make the page empty */
2279 if (iter->head_page == cpu_buffer->commit_page)
2285 event = rb_iter_head_event(iter);
2287 length = rb_event_length(event);
2290 * This should not be called to advance the header if we are
2291 * at the tail of the buffer.
2293 if (RB_WARN_ON(cpu_buffer,
2294 (iter->head_page == cpu_buffer->commit_page) &&
2295 (iter->head + length > rb_commit_index(cpu_buffer))))
2298 rb_update_iter_read_stamp(iter, event);
2300 iter->head += length;
2302 /* check for end of page padding */
2303 if ((iter->head >= rb_page_size(iter->head_page)) &&
2304 (iter->head_page != cpu_buffer->commit_page))
2305 rb_advance_iter(iter);
2308 static struct ring_buffer_event *
2309 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2311 struct ring_buffer_per_cpu *cpu_buffer;
2312 struct ring_buffer_event *event;
2313 struct buffer_page *reader;
2316 cpu_buffer = buffer->buffers[cpu];
2320 * We repeat when a timestamp is encountered. It is possible
2321 * to get multiple timestamps from an interrupt entering just
2322 * as one timestamp is about to be written, or from discarded
2323 * commits. The most that we can have is the number on a single page.
2325 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2328 reader = rb_get_reader_page(cpu_buffer);
2332 event = rb_reader_event(cpu_buffer);
2334 switch (event->type_len) {
2335 case RINGBUF_TYPE_PADDING:
2336 if (rb_null_event(event))
2337 RB_WARN_ON(cpu_buffer, 1);
2339 * Because the writer could be discarding every
2340 * event it creates (which would probably be bad)
2341 * if we were to go back to "again" then we may never
2342 * catch up, and will trigger the warn on, or lock
2343 * the box. Return the padding, and we will release
2344 * the current locks, and try again.
2346 rb_advance_reader(cpu_buffer);
2349 case RINGBUF_TYPE_TIME_EXTEND:
2350 /* Internal data, OK to advance */
2351 rb_advance_reader(cpu_buffer);
2354 case RINGBUF_TYPE_TIME_STAMP:
2355 /* FIXME: not implemented */
2356 rb_advance_reader(cpu_buffer);
2359 case RINGBUF_TYPE_DATA:
2361 *ts = cpu_buffer->read_stamp + event->time_delta;
2362 ring_buffer_normalize_time_stamp(buffer,
2363 cpu_buffer->cpu, ts);
2373 EXPORT_SYMBOL_GPL(ring_buffer_peek);
2375 static struct ring_buffer_event *
2376 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2378 struct ring_buffer *buffer;
2379 struct ring_buffer_per_cpu *cpu_buffer;
2380 struct ring_buffer_event *event;
2383 if (ring_buffer_iter_empty(iter))
2386 cpu_buffer = iter->cpu_buffer;
2387 buffer = cpu_buffer->buffer;
2391 * We repeat when a timestamp is encountered.
2392 * We can get multiple timestamps by nested interrupts or also
2393 * if filtering is on (discarding commits). Since discarding
2394 * commits can be frequent we can get a lot of timestamps.
2395 * But we limit them by not adding timestamps if they begin
2396 * at the start of a page.
2398 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2401 if (rb_per_cpu_empty(cpu_buffer))
2404 event = rb_iter_head_event(iter);
2406 switch (event->type_len) {
2407 case RINGBUF_TYPE_PADDING:
2408 if (rb_null_event(event)) {
2412 rb_advance_iter(iter);
2415 case RINGBUF_TYPE_TIME_EXTEND:
2416 /* Internal data, OK to advance */
2417 rb_advance_iter(iter);
2420 case RINGBUF_TYPE_TIME_STAMP:
2421 /* FIXME: not implemented */
2422 rb_advance_iter(iter);
2425 case RINGBUF_TYPE_DATA:
2427 *ts = iter->read_stamp + event->time_delta;
2428 ring_buffer_normalize_time_stamp(buffer,
2429 cpu_buffer->cpu, ts);
2439 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
2442 * ring_buffer_peek - peek at the next event to be read
2443 * @buffer: The ring buffer to read
2444 * @cpu: The cpu to peak at
2445 * @ts: The timestamp counter of this event.
2447 * This will return the event that will be read next, but does
2448 * not consume the data.
2450 struct ring_buffer_event *
2451 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2453 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2454 struct ring_buffer_event *event;
2455 unsigned long flags;
2457 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2461 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2462 event = rb_buffer_peek(buffer, cpu, ts);
2463 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2465 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2474 * ring_buffer_iter_peek - peek at the next event to be read
2475 * @iter: The ring buffer iterator
2476 * @ts: The timestamp counter of this event.
2478 * This will return the event that will be read next, but does
2479 * not increment the iterator.
2481 struct ring_buffer_event *
2482 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2484 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2485 struct ring_buffer_event *event;
2486 unsigned long flags;
2489 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2490 event = rb_iter_peek(iter, ts);
2491 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2493 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2502 * ring_buffer_consume - return an event and consume it
2503 * @buffer: The ring buffer to get the next event from
2505 * Returns the next event in the ring buffer, and that event is consumed.
2506 * Meaning, that sequential reads will keep returning a different event,
2507 * and eventually empty the ring buffer if the producer is slower.
2509 struct ring_buffer_event *
2510 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2512 struct ring_buffer_per_cpu *cpu_buffer;
2513 struct ring_buffer_event *event = NULL;
2514 unsigned long flags;
2517 /* might be called in atomic */
2520 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2523 cpu_buffer = buffer->buffers[cpu];
2524 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2526 event = rb_buffer_peek(buffer, cpu, ts);
2530 rb_advance_reader(cpu_buffer);
2533 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2538 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2545 EXPORT_SYMBOL_GPL(ring_buffer_consume);
2548 * ring_buffer_read_start - start a non consuming read of the buffer
2549 * @buffer: The ring buffer to read from
2550 * @cpu: The cpu buffer to iterate over
2552 * This starts up an iteration through the buffer. It also disables
2553 * the recording to the buffer until the reading is finished.
2554 * This prevents the reading from being corrupted. This is not
2555 * a consuming read, so a producer is not expected.
2557 * Must be paired with ring_buffer_finish.
2559 struct ring_buffer_iter *
2560 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2562 struct ring_buffer_per_cpu *cpu_buffer;
2563 struct ring_buffer_iter *iter;
2564 unsigned long flags;
2566 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2569 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2573 cpu_buffer = buffer->buffers[cpu];
2575 iter->cpu_buffer = cpu_buffer;
2577 atomic_inc(&cpu_buffer->record_disabled);
2578 synchronize_sched();
2580 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2581 __raw_spin_lock(&cpu_buffer->lock);
2582 rb_iter_reset(iter);
2583 __raw_spin_unlock(&cpu_buffer->lock);
2584 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2588 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2591 * ring_buffer_finish - finish reading the iterator of the buffer
2592 * @iter: The iterator retrieved by ring_buffer_start
2594 * This re-enables the recording to the buffer, and frees the
2598 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2600 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2602 atomic_dec(&cpu_buffer->record_disabled);
2605 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2608 * ring_buffer_read - read the next item in the ring buffer by the iterator
2609 * @iter: The ring buffer iterator
2610 * @ts: The time stamp of the event read.
2612 * This reads the next event in the ring buffer and increments the iterator.
2614 struct ring_buffer_event *
2615 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2617 struct ring_buffer_event *event;
2618 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2619 unsigned long flags;
2622 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2623 event = rb_iter_peek(iter, ts);
2627 rb_advance_iter(iter);
2629 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2631 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2638 EXPORT_SYMBOL_GPL(ring_buffer_read);
2641 * ring_buffer_size - return the size of the ring buffer (in bytes)
2642 * @buffer: The ring buffer.
2644 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2646 return BUF_PAGE_SIZE * buffer->pages;
2648 EXPORT_SYMBOL_GPL(ring_buffer_size);
2651 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2653 cpu_buffer->head_page
2654 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2655 local_set(&cpu_buffer->head_page->write, 0);
2656 local_set(&cpu_buffer->head_page->entries, 0);
2657 local_set(&cpu_buffer->head_page->page->commit, 0);
2659 cpu_buffer->head_page->read = 0;
2661 cpu_buffer->tail_page = cpu_buffer->head_page;
2662 cpu_buffer->commit_page = cpu_buffer->head_page;
2664 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2665 local_set(&cpu_buffer->reader_page->write, 0);
2666 local_set(&cpu_buffer->reader_page->entries, 0);
2667 local_set(&cpu_buffer->reader_page->page->commit, 0);
2668 cpu_buffer->reader_page->read = 0;
2670 cpu_buffer->nmi_dropped = 0;
2671 cpu_buffer->commit_overrun = 0;
2672 cpu_buffer->overrun = 0;
2673 cpu_buffer->read = 0;
2674 local_set(&cpu_buffer->entries, 0);
2676 cpu_buffer->write_stamp = 0;
2677 cpu_buffer->read_stamp = 0;
2681 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2682 * @buffer: The ring buffer to reset a per cpu buffer of
2683 * @cpu: The CPU buffer to be reset
2685 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2687 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2688 unsigned long flags;
2690 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2693 atomic_inc(&cpu_buffer->record_disabled);
2695 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2697 __raw_spin_lock(&cpu_buffer->lock);
2699 rb_reset_cpu(cpu_buffer);
2701 __raw_spin_unlock(&cpu_buffer->lock);
2703 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2705 atomic_dec(&cpu_buffer->record_disabled);
2707 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2710 * ring_buffer_reset - reset a ring buffer
2711 * @buffer: The ring buffer to reset all cpu buffers
2713 void ring_buffer_reset(struct ring_buffer *buffer)
2717 for_each_buffer_cpu(buffer, cpu)
2718 ring_buffer_reset_cpu(buffer, cpu);
2720 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2723 * rind_buffer_empty - is the ring buffer empty?
2724 * @buffer: The ring buffer to test
2726 int ring_buffer_empty(struct ring_buffer *buffer)
2728 struct ring_buffer_per_cpu *cpu_buffer;
2731 /* yes this is racy, but if you don't like the race, lock the buffer */
2732 for_each_buffer_cpu(buffer, cpu) {
2733 cpu_buffer = buffer->buffers[cpu];
2734 if (!rb_per_cpu_empty(cpu_buffer))
2740 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2743 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2744 * @buffer: The ring buffer
2745 * @cpu: The CPU buffer to test
2747 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2749 struct ring_buffer_per_cpu *cpu_buffer;
2752 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2755 cpu_buffer = buffer->buffers[cpu];
2756 ret = rb_per_cpu_empty(cpu_buffer);
2761 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2764 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2765 * @buffer_a: One buffer to swap with
2766 * @buffer_b: The other buffer to swap with
2768 * This function is useful for tracers that want to take a "snapshot"
2769 * of a CPU buffer and has another back up buffer lying around.
2770 * it is expected that the tracer handles the cpu buffer not being
2771 * used at the moment.
2773 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2774 struct ring_buffer *buffer_b, int cpu)
2776 struct ring_buffer_per_cpu *cpu_buffer_a;
2777 struct ring_buffer_per_cpu *cpu_buffer_b;
2780 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2781 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2784 /* At least make sure the two buffers are somewhat the same */
2785 if (buffer_a->pages != buffer_b->pages)
2790 if (ring_buffer_flags != RB_BUFFERS_ON)
2793 if (atomic_read(&buffer_a->record_disabled))
2796 if (atomic_read(&buffer_b->record_disabled))
2799 cpu_buffer_a = buffer_a->buffers[cpu];
2800 cpu_buffer_b = buffer_b->buffers[cpu];
2802 if (atomic_read(&cpu_buffer_a->record_disabled))
2805 if (atomic_read(&cpu_buffer_b->record_disabled))
2809 * We can't do a synchronize_sched here because this
2810 * function can be called in atomic context.
2811 * Normally this will be called from the same CPU as cpu.
2812 * If not it's up to the caller to protect this.
2814 atomic_inc(&cpu_buffer_a->record_disabled);
2815 atomic_inc(&cpu_buffer_b->record_disabled);
2817 buffer_a->buffers[cpu] = cpu_buffer_b;
2818 buffer_b->buffers[cpu] = cpu_buffer_a;
2820 cpu_buffer_b->buffer = buffer_a;
2821 cpu_buffer_a->buffer = buffer_b;
2823 atomic_dec(&cpu_buffer_a->record_disabled);
2824 atomic_dec(&cpu_buffer_b->record_disabled);
2830 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2833 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2834 * @buffer: the buffer to allocate for.
2836 * This function is used in conjunction with ring_buffer_read_page.
2837 * When reading a full page from the ring buffer, these functions
2838 * can be used to speed up the process. The calling function should
2839 * allocate a few pages first with this function. Then when it
2840 * needs to get pages from the ring buffer, it passes the result
2841 * of this function into ring_buffer_read_page, which will swap
2842 * the page that was allocated, with the read page of the buffer.
2845 * The page allocated, or NULL on error.
2847 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2849 struct buffer_data_page *bpage;
2852 addr = __get_free_page(GFP_KERNEL);
2856 bpage = (void *)addr;
2858 rb_init_page(bpage);
2862 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
2865 * ring_buffer_free_read_page - free an allocated read page
2866 * @buffer: the buffer the page was allocate for
2867 * @data: the page to free
2869 * Free a page allocated from ring_buffer_alloc_read_page.
2871 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2873 free_page((unsigned long)data);
2875 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
2878 * ring_buffer_read_page - extract a page from the ring buffer
2879 * @buffer: buffer to extract from
2880 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2881 * @len: amount to extract
2882 * @cpu: the cpu of the buffer to extract
2883 * @full: should the extraction only happen when the page is full.
2885 * This function will pull out a page from the ring buffer and consume it.
2886 * @data_page must be the address of the variable that was returned
2887 * from ring_buffer_alloc_read_page. This is because the page might be used
2888 * to swap with a page in the ring buffer.
2891 * rpage = ring_buffer_alloc_read_page(buffer);
2894 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2896 * process_page(rpage, ret);
2898 * When @full is set, the function will not return true unless
2899 * the writer is off the reader page.
2901 * Note: it is up to the calling functions to handle sleeps and wakeups.
2902 * The ring buffer can be used anywhere in the kernel and can not
2903 * blindly call wake_up. The layer that uses the ring buffer must be
2904 * responsible for that.
2907 * >=0 if data has been transferred, returns the offset of consumed data.
2908 * <0 if no data has been transferred.
2910 int ring_buffer_read_page(struct ring_buffer *buffer,
2911 void **data_page, size_t len, int cpu, int full)
2913 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2914 struct ring_buffer_event *event;
2915 struct buffer_data_page *bpage;
2916 struct buffer_page *reader;
2917 unsigned long flags;
2918 unsigned int commit;
2923 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2927 * If len is not big enough to hold the page header, then
2928 * we can not copy anything.
2930 if (len <= BUF_PAGE_HDR_SIZE)
2933 len -= BUF_PAGE_HDR_SIZE;
2942 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2944 reader = rb_get_reader_page(cpu_buffer);
2948 event = rb_reader_event(cpu_buffer);
2950 read = reader->read;
2951 commit = rb_page_commit(reader);
2954 * If this page has been partially read or
2955 * if len is not big enough to read the rest of the page or
2956 * a writer is still on the page, then
2957 * we must copy the data from the page to the buffer.
2958 * Otherwise, we can simply swap the page with the one passed in.
2960 if (read || (len < (commit - read)) ||
2961 cpu_buffer->reader_page == cpu_buffer->commit_page) {
2962 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2963 unsigned int rpos = read;
2964 unsigned int pos = 0;
2970 if (len > (commit - read))
2971 len = (commit - read);
2973 size = rb_event_length(event);
2978 /* save the current timestamp, since the user will need it */
2979 save_timestamp = cpu_buffer->read_stamp;
2981 /* Need to copy one event at a time */
2983 memcpy(bpage->data + pos, rpage->data + rpos, size);
2987 rb_advance_reader(cpu_buffer);
2988 rpos = reader->read;
2991 event = rb_reader_event(cpu_buffer);
2992 size = rb_event_length(event);
2993 } while (len > size);
2996 local_set(&bpage->commit, pos);
2997 bpage->time_stamp = save_timestamp;
2999 /* we copied everything to the beginning */
3002 /* update the entry counter */
3003 cpu_buffer->read += local_read(&reader->entries);
3005 /* swap the pages */
3006 rb_init_page(bpage);
3007 bpage = reader->page;
3008 reader->page = *data_page;
3009 local_set(&reader->write, 0);
3010 local_set(&reader->entries, 0);
3017 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3022 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3025 rb_simple_read(struct file *filp, char __user *ubuf,
3026 size_t cnt, loff_t *ppos)
3028 unsigned long *p = filp->private_data;
3032 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3033 r = sprintf(buf, "permanently disabled\n");
3035 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3037 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3041 rb_simple_write(struct file *filp, const char __user *ubuf,
3042 size_t cnt, loff_t *ppos)
3044 unsigned long *p = filp->private_data;
3049 if (cnt >= sizeof(buf))
3052 if (copy_from_user(&buf, ubuf, cnt))
3057 ret = strict_strtoul(buf, 10, &val);
3062 set_bit(RB_BUFFERS_ON_BIT, p);
3064 clear_bit(RB_BUFFERS_ON_BIT, p);
3071 static const struct file_operations rb_simple_fops = {
3072 .open = tracing_open_generic,
3073 .read = rb_simple_read,
3074 .write = rb_simple_write,
3078 static __init int rb_init_debugfs(void)
3080 struct dentry *d_tracer;
3082 d_tracer = tracing_init_dentry();
3084 trace_create_file("tracing_on", 0644, d_tracer,
3085 &ring_buffer_flags, &rb_simple_fops);
3090 fs_initcall(rb_init_debugfs);
3092 #ifdef CONFIG_HOTPLUG_CPU
3093 static int rb_cpu_notify(struct notifier_block *self,
3094 unsigned long action, void *hcpu)
3096 struct ring_buffer *buffer =
3097 container_of(self, struct ring_buffer, cpu_notify);
3098 long cpu = (long)hcpu;
3101 case CPU_UP_PREPARE:
3102 case CPU_UP_PREPARE_FROZEN:
3103 if (cpu_isset(cpu, *buffer->cpumask))
3106 buffer->buffers[cpu] =
3107 rb_allocate_cpu_buffer(buffer, cpu);
3108 if (!buffer->buffers[cpu]) {
3109 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3114 cpu_set(cpu, *buffer->cpumask);
3116 case CPU_DOWN_PREPARE:
3117 case CPU_DOWN_PREPARE_FROZEN:
3120 * If we were to free the buffer, then the user would
3121 * lose any trace that was in the buffer.