4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h> /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
21 /* Global flag to disable all recording to ring buffers */
22 static int ring_buffers_off __read_mostly;
25 * tracing_on - enable all tracing buffers
27 * This function enables all tracing buffers that may have been
28 * disabled with tracing_off.
36 * tracing_off - turn off all tracing buffers
38 * This function stops all tracing buffers from recording data.
39 * It does not disable any overhead the tracers themselves may
40 * be causing. This function simply causes all recording to
41 * the ring buffers to fail.
43 void tracing_off(void)
48 /* Up this if you want to test the TIME_EXTENTS and normalization */
52 u64 ring_buffer_time_stamp(int cpu)
56 preempt_disable_notrace();
57 /* shift to debug/test normalization and TIME_EXTENTS */
58 time = sched_clock() << DEBUG_SHIFT;
59 preempt_enable_notrace();
64 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
66 /* Just stupid testing the normalize function and deltas */
70 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
71 #define RB_ALIGNMENT_SHIFT 2
72 #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
73 #define RB_MAX_SMALL_DATA 28
76 RB_LEN_TIME_EXTEND = 8,
77 RB_LEN_TIME_STAMP = 16,
80 /* inline for ring buffer fast paths */
81 static inline unsigned
82 rb_event_length(struct ring_buffer_event *event)
86 switch (event->type) {
87 case RINGBUF_TYPE_PADDING:
91 case RINGBUF_TYPE_TIME_EXTEND:
92 return RB_LEN_TIME_EXTEND;
94 case RINGBUF_TYPE_TIME_STAMP:
95 return RB_LEN_TIME_STAMP;
97 case RINGBUF_TYPE_DATA:
99 length = event->len << RB_ALIGNMENT_SHIFT;
101 length = event->array[0];
102 return length + RB_EVNT_HDR_SIZE;
111 * ring_buffer_event_length - return the length of the event
112 * @event: the event to get the length of
114 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
116 return rb_event_length(event);
119 /* inline for ring buffer fast paths */
121 rb_event_data(struct ring_buffer_event *event)
123 BUG_ON(event->type != RINGBUF_TYPE_DATA);
124 /* If length is in len field, then array[0] has the data */
126 return (void *)&event->array[0];
127 /* Otherwise length is in array[0] and array[1] has the data */
128 return (void *)&event->array[1];
132 * ring_buffer_event_data - return the data of the event
133 * @event: the event to get the data from
135 void *ring_buffer_event_data(struct ring_buffer_event *event)
137 return rb_event_data(event);
140 #define for_each_buffer_cpu(buffer, cpu) \
141 for_each_cpu_mask(cpu, buffer->cpumask)
144 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
145 #define TS_DELTA_TEST (~TS_MASK)
148 * This hack stolen from mm/slob.c.
149 * We can store per page timing information in the page frame of the page.
150 * Thanks to Peter Zijlstra for suggesting this idea.
153 u64 time_stamp; /* page time stamp */
154 local_t write; /* index for next write */
155 local_t commit; /* write commited index */
156 unsigned read; /* index for next read */
157 struct list_head list; /* list of free pages */
158 void *page; /* Actual data page */
162 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
165 static inline void free_buffer_page(struct buffer_page *bpage)
168 free_page((unsigned long)bpage->page);
173 * We need to fit the time_stamp delta into 27 bits.
175 static inline int test_time_stamp(u64 delta)
177 if (delta & TS_DELTA_TEST)
182 #define BUF_PAGE_SIZE PAGE_SIZE
185 * head_page == tail_page && head == tail then buffer is empty.
187 struct ring_buffer_per_cpu {
189 struct ring_buffer *buffer;
191 struct lock_class_key lock_key;
192 struct list_head pages;
193 struct buffer_page *head_page; /* read from head */
194 struct buffer_page *tail_page; /* write to tail */
195 struct buffer_page *commit_page; /* commited pages */
196 struct buffer_page *reader_page;
197 unsigned long overrun;
198 unsigned long entries;
201 atomic_t record_disabled;
210 atomic_t record_disabled;
214 struct ring_buffer_per_cpu **buffers;
217 struct ring_buffer_iter {
218 struct ring_buffer_per_cpu *cpu_buffer;
220 struct buffer_page *head_page;
224 #define RB_WARN_ON(buffer, cond) \
226 if (unlikely(cond)) { \
227 atomic_inc(&buffer->record_disabled); \
232 #define RB_WARN_ON_RET(buffer, cond) \
234 if (unlikely(cond)) { \
235 atomic_inc(&buffer->record_disabled); \
241 #define RB_WARN_ON_ONCE(buffer, cond) \
244 if (unlikely(cond) && !once) { \
246 atomic_inc(&buffer->record_disabled); \
252 * check_pages - integrity check of buffer pages
253 * @cpu_buffer: CPU buffer with pages to test
255 * As a safty measure we check to make sure the data pages have not
258 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
260 struct list_head *head = &cpu_buffer->pages;
261 struct buffer_page *page, *tmp;
263 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
264 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
266 list_for_each_entry_safe(page, tmp, head, list) {
267 RB_WARN_ON_RET(cpu_buffer,
268 page->list.next->prev != &page->list);
269 RB_WARN_ON_RET(cpu_buffer,
270 page->list.prev->next != &page->list);
276 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
279 struct list_head *head = &cpu_buffer->pages;
280 struct buffer_page *page, *tmp;
285 for (i = 0; i < nr_pages; i++) {
286 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
287 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
290 list_add(&page->list, &pages);
292 addr = __get_free_page(GFP_KERNEL);
295 page->page = (void *)addr;
298 list_splice(&pages, head);
300 rb_check_pages(cpu_buffer);
305 list_for_each_entry_safe(page, tmp, &pages, list) {
306 list_del_init(&page->list);
307 free_buffer_page(page);
312 static struct ring_buffer_per_cpu *
313 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
315 struct ring_buffer_per_cpu *cpu_buffer;
316 struct buffer_page *page;
320 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
321 GFP_KERNEL, cpu_to_node(cpu));
325 cpu_buffer->cpu = cpu;
326 cpu_buffer->buffer = buffer;
327 spin_lock_init(&cpu_buffer->lock);
328 INIT_LIST_HEAD(&cpu_buffer->pages);
330 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
331 GFP_KERNEL, cpu_to_node(cpu));
333 goto fail_free_buffer;
335 cpu_buffer->reader_page = page;
336 addr = __get_free_page(GFP_KERNEL);
338 goto fail_free_reader;
339 page->page = (void *)addr;
341 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
343 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
345 goto fail_free_reader;
347 cpu_buffer->head_page
348 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
349 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
354 free_buffer_page(cpu_buffer->reader_page);
361 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
363 struct list_head *head = &cpu_buffer->pages;
364 struct buffer_page *page, *tmp;
366 list_del_init(&cpu_buffer->reader_page->list);
367 free_buffer_page(cpu_buffer->reader_page);
369 list_for_each_entry_safe(page, tmp, head, list) {
370 list_del_init(&page->list);
371 free_buffer_page(page);
377 * Causes compile errors if the struct buffer_page gets bigger
378 * than the struct page.
380 extern int ring_buffer_page_too_big(void);
383 * ring_buffer_alloc - allocate a new ring_buffer
384 * @size: the size in bytes that is needed.
385 * @flags: attributes to set for the ring buffer.
387 * Currently the only flag that is available is the RB_FL_OVERWRITE
388 * flag. This flag means that the buffer will overwrite old data
389 * when the buffer wraps. If this flag is not set, the buffer will
390 * drop data when the tail hits the head.
392 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
394 struct ring_buffer *buffer;
398 /* Paranoid! Optimizes out when all is well */
399 if (sizeof(struct buffer_page) > sizeof(struct page))
400 ring_buffer_page_too_big();
403 /* keep it in its own cache line */
404 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
409 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
410 buffer->flags = flags;
412 /* need at least two pages */
413 if (buffer->pages == 1)
416 buffer->cpumask = cpu_possible_map;
417 buffer->cpus = nr_cpu_ids;
419 bsize = sizeof(void *) * nr_cpu_ids;
420 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
422 if (!buffer->buffers)
423 goto fail_free_buffer;
425 for_each_buffer_cpu(buffer, cpu) {
426 buffer->buffers[cpu] =
427 rb_allocate_cpu_buffer(buffer, cpu);
428 if (!buffer->buffers[cpu])
429 goto fail_free_buffers;
432 mutex_init(&buffer->mutex);
437 for_each_buffer_cpu(buffer, cpu) {
438 if (buffer->buffers[cpu])
439 rb_free_cpu_buffer(buffer->buffers[cpu]);
441 kfree(buffer->buffers);
449 * ring_buffer_free - free a ring buffer.
450 * @buffer: the buffer to free.
453 ring_buffer_free(struct ring_buffer *buffer)
457 for_each_buffer_cpu(buffer, cpu)
458 rb_free_cpu_buffer(buffer->buffers[cpu]);
463 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
466 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
468 struct buffer_page *page;
472 atomic_inc(&cpu_buffer->record_disabled);
475 for (i = 0; i < nr_pages; i++) {
476 BUG_ON(list_empty(&cpu_buffer->pages));
477 p = cpu_buffer->pages.next;
478 page = list_entry(p, struct buffer_page, list);
479 list_del_init(&page->list);
480 free_buffer_page(page);
482 BUG_ON(list_empty(&cpu_buffer->pages));
484 rb_reset_cpu(cpu_buffer);
486 rb_check_pages(cpu_buffer);
488 atomic_dec(&cpu_buffer->record_disabled);
493 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
494 struct list_head *pages, unsigned nr_pages)
496 struct buffer_page *page;
500 atomic_inc(&cpu_buffer->record_disabled);
503 for (i = 0; i < nr_pages; i++) {
504 BUG_ON(list_empty(pages));
506 page = list_entry(p, struct buffer_page, list);
507 list_del_init(&page->list);
508 list_add_tail(&page->list, &cpu_buffer->pages);
510 rb_reset_cpu(cpu_buffer);
512 rb_check_pages(cpu_buffer);
514 atomic_dec(&cpu_buffer->record_disabled);
518 * ring_buffer_resize - resize the ring buffer
519 * @buffer: the buffer to resize.
520 * @size: the new size.
522 * The tracer is responsible for making sure that the buffer is
523 * not being used while changing the size.
524 * Note: We may be able to change the above requirement by using
525 * RCU synchronizations.
527 * Minimum size is 2 * BUF_PAGE_SIZE.
529 * Returns -1 on failure.
531 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
533 struct ring_buffer_per_cpu *cpu_buffer;
534 unsigned nr_pages, rm_pages, new_pages;
535 struct buffer_page *page, *tmp;
536 unsigned long buffer_size;
542 * Always succeed at resizing a non-existent buffer:
547 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
548 size *= BUF_PAGE_SIZE;
549 buffer_size = buffer->pages * BUF_PAGE_SIZE;
551 /* we need a minimum of two pages */
552 if (size < BUF_PAGE_SIZE * 2)
553 size = BUF_PAGE_SIZE * 2;
555 if (size == buffer_size)
558 mutex_lock(&buffer->mutex);
560 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
562 if (size < buffer_size) {
564 /* easy case, just free pages */
565 BUG_ON(nr_pages >= buffer->pages);
567 rm_pages = buffer->pages - nr_pages;
569 for_each_buffer_cpu(buffer, cpu) {
570 cpu_buffer = buffer->buffers[cpu];
571 rb_remove_pages(cpu_buffer, rm_pages);
577 * This is a bit more difficult. We only want to add pages
578 * when we can allocate enough for all CPUs. We do this
579 * by allocating all the pages and storing them on a local
580 * link list. If we succeed in our allocation, then we
581 * add these pages to the cpu_buffers. Otherwise we just free
582 * them all and return -ENOMEM;
584 BUG_ON(nr_pages <= buffer->pages);
585 new_pages = nr_pages - buffer->pages;
587 for_each_buffer_cpu(buffer, cpu) {
588 for (i = 0; i < new_pages; i++) {
589 page = kzalloc_node(ALIGN(sizeof(*page),
591 GFP_KERNEL, cpu_to_node(cpu));
594 list_add(&page->list, &pages);
595 addr = __get_free_page(GFP_KERNEL);
598 page->page = (void *)addr;
602 for_each_buffer_cpu(buffer, cpu) {
603 cpu_buffer = buffer->buffers[cpu];
604 rb_insert_pages(cpu_buffer, &pages, new_pages);
607 BUG_ON(!list_empty(&pages));
610 buffer->pages = nr_pages;
611 mutex_unlock(&buffer->mutex);
616 list_for_each_entry_safe(page, tmp, &pages, list) {
617 list_del_init(&page->list);
618 free_buffer_page(page);
623 static inline int rb_null_event(struct ring_buffer_event *event)
625 return event->type == RINGBUF_TYPE_PADDING;
628 static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
630 return page->page + index;
633 static inline struct ring_buffer_event *
634 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
636 return __rb_page_index(cpu_buffer->reader_page,
637 cpu_buffer->reader_page->read);
640 static inline struct ring_buffer_event *
641 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
643 return __rb_page_index(cpu_buffer->head_page,
644 cpu_buffer->head_page->read);
647 static inline struct ring_buffer_event *
648 rb_iter_head_event(struct ring_buffer_iter *iter)
650 return __rb_page_index(iter->head_page, iter->head);
653 static inline unsigned rb_page_write(struct buffer_page *bpage)
655 return local_read(&bpage->write);
658 static inline unsigned rb_page_commit(struct buffer_page *bpage)
660 return local_read(&bpage->commit);
663 /* Size is determined by what has been commited */
664 static inline unsigned rb_page_size(struct buffer_page *bpage)
666 return rb_page_commit(bpage);
669 static inline unsigned
670 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
672 return rb_page_commit(cpu_buffer->commit_page);
675 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
677 return rb_page_commit(cpu_buffer->head_page);
681 * When the tail hits the head and the buffer is in overwrite mode,
682 * the head jumps to the next page and all content on the previous
683 * page is discarded. But before doing so, we update the overrun
684 * variable of the buffer.
686 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
688 struct ring_buffer_event *event;
691 for (head = 0; head < rb_head_size(cpu_buffer);
692 head += rb_event_length(event)) {
694 event = __rb_page_index(cpu_buffer->head_page, head);
695 BUG_ON(rb_null_event(event));
696 /* Only count data entries */
697 if (event->type != RINGBUF_TYPE_DATA)
699 cpu_buffer->overrun++;
700 cpu_buffer->entries--;
704 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
705 struct buffer_page **page)
707 struct list_head *p = (*page)->list.next;
709 if (p == &cpu_buffer->pages)
712 *page = list_entry(p, struct buffer_page, list);
715 static inline unsigned
716 rb_event_index(struct ring_buffer_event *event)
718 unsigned long addr = (unsigned long)event;
720 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
724 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
725 struct ring_buffer_event *event)
727 unsigned long addr = (unsigned long)event;
730 index = rb_event_index(event);
733 return cpu_buffer->commit_page->page == (void *)addr &&
734 rb_commit_index(cpu_buffer) == index;
738 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
739 struct ring_buffer_event *event)
741 unsigned long addr = (unsigned long)event;
744 index = rb_event_index(event);
747 while (cpu_buffer->commit_page->page != (void *)addr) {
748 RB_WARN_ON(cpu_buffer,
749 cpu_buffer->commit_page == cpu_buffer->tail_page);
750 cpu_buffer->commit_page->commit =
751 cpu_buffer->commit_page->write;
752 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
753 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
756 /* Now set the commit to the event's index */
757 local_set(&cpu_buffer->commit_page->commit, index);
761 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
764 * We only race with interrupts and NMIs on this CPU.
765 * If we own the commit event, then we can commit
766 * all others that interrupted us, since the interruptions
767 * are in stack format (they finish before they come
768 * back to us). This allows us to do a simple loop to
769 * assign the commit to the tail.
771 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
772 cpu_buffer->commit_page->commit =
773 cpu_buffer->commit_page->write;
774 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
775 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
776 /* add barrier to keep gcc from optimizing too much */
779 while (rb_commit_index(cpu_buffer) !=
780 rb_page_write(cpu_buffer->commit_page)) {
781 cpu_buffer->commit_page->commit =
782 cpu_buffer->commit_page->write;
787 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
789 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
790 cpu_buffer->reader_page->read = 0;
793 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
795 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
798 * The iterator could be on the reader page (it starts there).
799 * But the head could have moved, since the reader was
800 * found. Check for this case and assign the iterator
801 * to the head page instead of next.
803 if (iter->head_page == cpu_buffer->reader_page)
804 iter->head_page = cpu_buffer->head_page;
806 rb_inc_page(cpu_buffer, &iter->head_page);
808 iter->read_stamp = iter->head_page->time_stamp;
813 * ring_buffer_update_event - update event type and data
814 * @event: the even to update
815 * @type: the type of event
816 * @length: the size of the event field in the ring buffer
818 * Update the type and data fields of the event. The length
819 * is the actual size that is written to the ring buffer,
820 * and with this, we can determine what to place into the
824 rb_update_event(struct ring_buffer_event *event,
825 unsigned type, unsigned length)
831 case RINGBUF_TYPE_PADDING:
834 case RINGBUF_TYPE_TIME_EXTEND:
836 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
837 >> RB_ALIGNMENT_SHIFT;
840 case RINGBUF_TYPE_TIME_STAMP:
842 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
843 >> RB_ALIGNMENT_SHIFT;
846 case RINGBUF_TYPE_DATA:
847 length -= RB_EVNT_HDR_SIZE;
848 if (length > RB_MAX_SMALL_DATA) {
850 event->array[0] = length;
853 (length + (RB_ALIGNMENT-1))
854 >> RB_ALIGNMENT_SHIFT;
861 static inline unsigned rb_calculate_event_length(unsigned length)
863 struct ring_buffer_event event; /* Used only for sizeof array */
865 /* zero length can cause confusions */
869 if (length > RB_MAX_SMALL_DATA)
870 length += sizeof(event.array[0]);
872 length += RB_EVNT_HDR_SIZE;
873 length = ALIGN(length, RB_ALIGNMENT);
878 static struct ring_buffer_event *
879 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
880 unsigned type, unsigned long length, u64 *ts)
882 struct buffer_page *tail_page, *head_page, *reader_page;
883 unsigned long tail, write;
884 struct ring_buffer *buffer = cpu_buffer->buffer;
885 struct ring_buffer_event *event;
888 tail_page = cpu_buffer->tail_page;
889 write = local_add_return(length, &tail_page->write);
890 tail = write - length;
892 /* See if we shot pass the end of this buffer page */
893 if (write > BUF_PAGE_SIZE) {
894 struct buffer_page *next_page = tail_page;
896 spin_lock_irqsave(&cpu_buffer->lock, flags);
898 rb_inc_page(cpu_buffer, &next_page);
900 head_page = cpu_buffer->head_page;
901 reader_page = cpu_buffer->reader_page;
903 /* we grabbed the lock before incrementing */
904 RB_WARN_ON(cpu_buffer, next_page == reader_page);
907 * If for some reason, we had an interrupt storm that made
908 * it all the way around the buffer, bail, and warn
911 if (unlikely(next_page == cpu_buffer->commit_page)) {
916 if (next_page == head_page) {
917 if (!(buffer->flags & RB_FL_OVERWRITE)) {
919 if (tail <= BUF_PAGE_SIZE)
920 local_set(&tail_page->write, tail);
924 /* tail_page has not moved yet? */
925 if (tail_page == cpu_buffer->tail_page) {
926 /* count overflows */
927 rb_update_overflow(cpu_buffer);
929 rb_inc_page(cpu_buffer, &head_page);
930 cpu_buffer->head_page = head_page;
931 cpu_buffer->head_page->read = 0;
936 * If the tail page is still the same as what we think
937 * it is, then it is up to us to update the tail
940 if (tail_page == cpu_buffer->tail_page) {
941 local_set(&next_page->write, 0);
942 local_set(&next_page->commit, 0);
943 cpu_buffer->tail_page = next_page;
945 /* reread the time stamp */
946 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
947 cpu_buffer->tail_page->time_stamp = *ts;
951 * The actual tail page has moved forward.
953 if (tail < BUF_PAGE_SIZE) {
954 /* Mark the rest of the page with padding */
955 event = __rb_page_index(tail_page, tail);
956 event->type = RINGBUF_TYPE_PADDING;
959 if (tail <= BUF_PAGE_SIZE)
960 /* Set the write back to the previous setting */
961 local_set(&tail_page->write, tail);
964 * If this was a commit entry that failed,
967 if (tail_page == cpu_buffer->commit_page &&
968 tail == rb_commit_index(cpu_buffer)) {
969 rb_set_commit_to_write(cpu_buffer);
972 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
974 /* fail and let the caller try again */
975 return ERR_PTR(-EAGAIN);
978 /* We reserved something on the buffer */
980 BUG_ON(write > BUF_PAGE_SIZE);
982 event = __rb_page_index(tail_page, tail);
983 rb_update_event(event, type, length);
986 * If this is a commit and the tail is zero, then update
987 * this page's time stamp.
989 if (!tail && rb_is_commit(cpu_buffer, event))
990 cpu_buffer->commit_page->time_stamp = *ts;
995 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1000 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1001 u64 *ts, u64 *delta)
1003 struct ring_buffer_event *event;
1007 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1008 printk(KERN_WARNING "Delta way too big! %llu"
1009 " ts=%llu write stamp = %llu\n",
1010 (unsigned long long)*delta,
1011 (unsigned long long)*ts,
1012 (unsigned long long)cpu_buffer->write_stamp);
1017 * The delta is too big, we to add a
1020 event = __rb_reserve_next(cpu_buffer,
1021 RINGBUF_TYPE_TIME_EXTEND,
1027 if (PTR_ERR(event) == -EAGAIN)
1030 /* Only a commited time event can update the write stamp */
1031 if (rb_is_commit(cpu_buffer, event)) {
1033 * If this is the first on the page, then we need to
1034 * update the page itself, and just put in a zero.
1036 if (rb_event_index(event)) {
1037 event->time_delta = *delta & TS_MASK;
1038 event->array[0] = *delta >> TS_SHIFT;
1040 cpu_buffer->commit_page->time_stamp = *ts;
1041 event->time_delta = 0;
1042 event->array[0] = 0;
1044 cpu_buffer->write_stamp = *ts;
1045 /* let the caller know this was the commit */
1048 /* Darn, this is just wasted space */
1049 event->time_delta = 0;
1050 event->array[0] = 0;
1059 static struct ring_buffer_event *
1060 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1061 unsigned type, unsigned long length)
1063 struct ring_buffer_event *event;
1070 * We allow for interrupts to reenter here and do a trace.
1071 * If one does, it will cause this original code to loop
1072 * back here. Even with heavy interrupts happening, this
1073 * should only happen a few times in a row. If this happens
1074 * 1000 times in a row, there must be either an interrupt
1075 * storm or we have something buggy.
1078 if (unlikely(++nr_loops > 1000)) {
1079 RB_WARN_ON(cpu_buffer, 1);
1083 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1086 * Only the first commit can update the timestamp.
1087 * Yes there is a race here. If an interrupt comes in
1088 * just after the conditional and it traces too, then it
1089 * will also check the deltas. More than one timestamp may
1090 * also be made. But only the entry that did the actual
1091 * commit will be something other than zero.
1093 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1094 rb_page_write(cpu_buffer->tail_page) ==
1095 rb_commit_index(cpu_buffer)) {
1097 delta = ts - cpu_buffer->write_stamp;
1099 /* make sure this delta is calculated here */
1102 /* Did the write stamp get updated already? */
1103 if (unlikely(ts < cpu_buffer->write_stamp))
1106 if (test_time_stamp(delta)) {
1108 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1110 if (commit == -EBUSY)
1113 if (commit == -EAGAIN)
1116 RB_WARN_ON(cpu_buffer, commit < 0);
1119 /* Non commits have zero deltas */
1122 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1123 if (PTR_ERR(event) == -EAGAIN)
1127 if (unlikely(commit))
1129 * Ouch! We needed a timestamp and it was commited. But
1130 * we didn't get our event reserved.
1132 rb_set_commit_to_write(cpu_buffer);
1137 * If the timestamp was commited, make the commit our entry
1138 * now so that we will update it when needed.
1141 rb_set_commit_event(cpu_buffer, event);
1142 else if (!rb_is_commit(cpu_buffer, event))
1145 event->time_delta = delta;
1150 static DEFINE_PER_CPU(int, rb_need_resched);
1153 * ring_buffer_lock_reserve - reserve a part of the buffer
1154 * @buffer: the ring buffer to reserve from
1155 * @length: the length of the data to reserve (excluding event header)
1156 * @flags: a pointer to save the interrupt flags
1158 * Returns a reseverd event on the ring buffer to copy directly to.
1159 * The user of this interface will need to get the body to write into
1160 * and can use the ring_buffer_event_data() interface.
1162 * The length is the length of the data needed, not the event length
1163 * which also includes the event header.
1165 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1166 * If NULL is returned, then nothing has been allocated or locked.
1168 struct ring_buffer_event *
1169 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1170 unsigned long length,
1171 unsigned long *flags)
1173 struct ring_buffer_per_cpu *cpu_buffer;
1174 struct ring_buffer_event *event;
1177 if (ring_buffers_off)
1180 if (atomic_read(&buffer->record_disabled))
1183 /* If we are tracing schedule, we don't want to recurse */
1184 resched = need_resched();
1185 preempt_disable_notrace();
1187 cpu = raw_smp_processor_id();
1189 if (!cpu_isset(cpu, buffer->cpumask))
1192 cpu_buffer = buffer->buffers[cpu];
1194 if (atomic_read(&cpu_buffer->record_disabled))
1197 length = rb_calculate_event_length(length);
1198 if (length > BUF_PAGE_SIZE)
1201 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1206 * Need to store resched state on this cpu.
1207 * Only the first needs to.
1210 if (preempt_count() == 1)
1211 per_cpu(rb_need_resched, cpu) = resched;
1217 preempt_enable_notrace();
1219 preempt_enable_notrace();
1223 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1224 struct ring_buffer_event *event)
1226 cpu_buffer->entries++;
1228 /* Only process further if we own the commit */
1229 if (!rb_is_commit(cpu_buffer, event))
1232 cpu_buffer->write_stamp += event->time_delta;
1234 rb_set_commit_to_write(cpu_buffer);
1238 * ring_buffer_unlock_commit - commit a reserved
1239 * @buffer: The buffer to commit to
1240 * @event: The event pointer to commit.
1241 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1243 * This commits the data to the ring buffer, and releases any locks held.
1245 * Must be paired with ring_buffer_lock_reserve.
1247 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1248 struct ring_buffer_event *event,
1249 unsigned long flags)
1251 struct ring_buffer_per_cpu *cpu_buffer;
1252 int cpu = raw_smp_processor_id();
1254 cpu_buffer = buffer->buffers[cpu];
1256 rb_commit(cpu_buffer, event);
1259 * Only the last preempt count needs to restore preemption.
1261 if (preempt_count() == 1) {
1262 if (per_cpu(rb_need_resched, cpu))
1263 preempt_enable_no_resched_notrace();
1265 preempt_enable_notrace();
1267 preempt_enable_no_resched_notrace();
1273 * ring_buffer_write - write data to the buffer without reserving
1274 * @buffer: The ring buffer to write to.
1275 * @length: The length of the data being written (excluding the event header)
1276 * @data: The data to write to the buffer.
1278 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1279 * one function. If you already have the data to write to the buffer, it
1280 * may be easier to simply call this function.
1282 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1283 * and not the length of the event which would hold the header.
1285 int ring_buffer_write(struct ring_buffer *buffer,
1286 unsigned long length,
1289 struct ring_buffer_per_cpu *cpu_buffer;
1290 struct ring_buffer_event *event;
1291 unsigned long event_length;
1296 if (ring_buffers_off)
1299 if (atomic_read(&buffer->record_disabled))
1302 resched = need_resched();
1303 preempt_disable_notrace();
1305 cpu = raw_smp_processor_id();
1307 if (!cpu_isset(cpu, buffer->cpumask))
1310 cpu_buffer = buffer->buffers[cpu];
1312 if (atomic_read(&cpu_buffer->record_disabled))
1315 event_length = rb_calculate_event_length(length);
1316 event = rb_reserve_next_event(cpu_buffer,
1317 RINGBUF_TYPE_DATA, event_length);
1321 body = rb_event_data(event);
1323 memcpy(body, data, length);
1325 rb_commit(cpu_buffer, event);
1330 preempt_enable_no_resched_notrace();
1332 preempt_enable_notrace();
1337 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1339 struct buffer_page *reader = cpu_buffer->reader_page;
1340 struct buffer_page *head = cpu_buffer->head_page;
1341 struct buffer_page *commit = cpu_buffer->commit_page;
1343 return reader->read == rb_page_commit(reader) &&
1344 (commit == reader ||
1346 head->read == rb_page_commit(commit)));
1350 * ring_buffer_record_disable - stop all writes into the buffer
1351 * @buffer: The ring buffer to stop writes to.
1353 * This prevents all writes to the buffer. Any attempt to write
1354 * to the buffer after this will fail and return NULL.
1356 * The caller should call synchronize_sched() after this.
1358 void ring_buffer_record_disable(struct ring_buffer *buffer)
1360 atomic_inc(&buffer->record_disabled);
1364 * ring_buffer_record_enable - enable writes to the buffer
1365 * @buffer: The ring buffer to enable writes
1367 * Note, multiple disables will need the same number of enables
1368 * to truely enable the writing (much like preempt_disable).
1370 void ring_buffer_record_enable(struct ring_buffer *buffer)
1372 atomic_dec(&buffer->record_disabled);
1376 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1377 * @buffer: The ring buffer to stop writes to.
1378 * @cpu: The CPU buffer to stop
1380 * This prevents all writes to the buffer. Any attempt to write
1381 * to the buffer after this will fail and return NULL.
1383 * The caller should call synchronize_sched() after this.
1385 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1387 struct ring_buffer_per_cpu *cpu_buffer;
1389 if (!cpu_isset(cpu, buffer->cpumask))
1392 cpu_buffer = buffer->buffers[cpu];
1393 atomic_inc(&cpu_buffer->record_disabled);
1397 * ring_buffer_record_enable_cpu - enable writes to the buffer
1398 * @buffer: The ring buffer to enable writes
1399 * @cpu: The CPU to enable.
1401 * Note, multiple disables will need the same number of enables
1402 * to truely enable the writing (much like preempt_disable).
1404 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1406 struct ring_buffer_per_cpu *cpu_buffer;
1408 if (!cpu_isset(cpu, buffer->cpumask))
1411 cpu_buffer = buffer->buffers[cpu];
1412 atomic_dec(&cpu_buffer->record_disabled);
1416 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1417 * @buffer: The ring buffer
1418 * @cpu: The per CPU buffer to get the entries from.
1420 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1422 struct ring_buffer_per_cpu *cpu_buffer;
1424 if (!cpu_isset(cpu, buffer->cpumask))
1427 cpu_buffer = buffer->buffers[cpu];
1428 return cpu_buffer->entries;
1432 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1433 * @buffer: The ring buffer
1434 * @cpu: The per CPU buffer to get the number of overruns from
1436 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1438 struct ring_buffer_per_cpu *cpu_buffer;
1440 if (!cpu_isset(cpu, buffer->cpumask))
1443 cpu_buffer = buffer->buffers[cpu];
1444 return cpu_buffer->overrun;
1448 * ring_buffer_entries - get the number of entries in a buffer
1449 * @buffer: The ring buffer
1451 * Returns the total number of entries in the ring buffer
1454 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1456 struct ring_buffer_per_cpu *cpu_buffer;
1457 unsigned long entries = 0;
1460 /* if you care about this being correct, lock the buffer */
1461 for_each_buffer_cpu(buffer, cpu) {
1462 cpu_buffer = buffer->buffers[cpu];
1463 entries += cpu_buffer->entries;
1470 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1471 * @buffer: The ring buffer
1473 * Returns the total number of overruns in the ring buffer
1476 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1478 struct ring_buffer_per_cpu *cpu_buffer;
1479 unsigned long overruns = 0;
1482 /* if you care about this being correct, lock the buffer */
1483 for_each_buffer_cpu(buffer, cpu) {
1484 cpu_buffer = buffer->buffers[cpu];
1485 overruns += cpu_buffer->overrun;
1492 * ring_buffer_iter_reset - reset an iterator
1493 * @iter: The iterator to reset
1495 * Resets the iterator, so that it will start from the beginning
1498 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1500 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1502 /* Iterator usage is expected to have record disabled */
1503 if (list_empty(&cpu_buffer->reader_page->list)) {
1504 iter->head_page = cpu_buffer->head_page;
1505 iter->head = cpu_buffer->head_page->read;
1507 iter->head_page = cpu_buffer->reader_page;
1508 iter->head = cpu_buffer->reader_page->read;
1511 iter->read_stamp = cpu_buffer->read_stamp;
1513 iter->read_stamp = iter->head_page->time_stamp;
1517 * ring_buffer_iter_empty - check if an iterator has no more to read
1518 * @iter: The iterator to check
1520 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1522 struct ring_buffer_per_cpu *cpu_buffer;
1524 cpu_buffer = iter->cpu_buffer;
1526 return iter->head_page == cpu_buffer->commit_page &&
1527 iter->head == rb_commit_index(cpu_buffer);
1531 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1532 struct ring_buffer_event *event)
1536 switch (event->type) {
1537 case RINGBUF_TYPE_PADDING:
1540 case RINGBUF_TYPE_TIME_EXTEND:
1541 delta = event->array[0];
1543 delta += event->time_delta;
1544 cpu_buffer->read_stamp += delta;
1547 case RINGBUF_TYPE_TIME_STAMP:
1548 /* FIXME: not implemented */
1551 case RINGBUF_TYPE_DATA:
1552 cpu_buffer->read_stamp += event->time_delta;
1562 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1563 struct ring_buffer_event *event)
1567 switch (event->type) {
1568 case RINGBUF_TYPE_PADDING:
1571 case RINGBUF_TYPE_TIME_EXTEND:
1572 delta = event->array[0];
1574 delta += event->time_delta;
1575 iter->read_stamp += delta;
1578 case RINGBUF_TYPE_TIME_STAMP:
1579 /* FIXME: not implemented */
1582 case RINGBUF_TYPE_DATA:
1583 iter->read_stamp += event->time_delta;
1592 static struct buffer_page *
1593 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1595 struct buffer_page *reader = NULL;
1596 unsigned long flags;
1599 spin_lock_irqsave(&cpu_buffer->lock, flags);
1603 * This should normally only loop twice. But because the
1604 * start of the reader inserts an empty page, it causes
1605 * a case where we will loop three times. There should be no
1606 * reason to loop four times (that I know of).
1608 if (unlikely(++nr_loops > 3)) {
1609 RB_WARN_ON(cpu_buffer, 1);
1614 reader = cpu_buffer->reader_page;
1616 /* If there's more to read, return this page */
1617 if (cpu_buffer->reader_page->read < rb_page_size(reader))
1620 /* Never should we have an index greater than the size */
1621 RB_WARN_ON(cpu_buffer,
1622 cpu_buffer->reader_page->read > rb_page_size(reader));
1624 /* check if we caught up to the tail */
1626 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1630 * Splice the empty reader page into the list around the head.
1631 * Reset the reader page to size zero.
1634 reader = cpu_buffer->head_page;
1635 cpu_buffer->reader_page->list.next = reader->list.next;
1636 cpu_buffer->reader_page->list.prev = reader->list.prev;
1638 local_set(&cpu_buffer->reader_page->write, 0);
1639 local_set(&cpu_buffer->reader_page->commit, 0);
1641 /* Make the reader page now replace the head */
1642 reader->list.prev->next = &cpu_buffer->reader_page->list;
1643 reader->list.next->prev = &cpu_buffer->reader_page->list;
1646 * If the tail is on the reader, then we must set the head
1647 * to the inserted page, otherwise we set it one before.
1649 cpu_buffer->head_page = cpu_buffer->reader_page;
1651 if (cpu_buffer->commit_page != reader)
1652 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1654 /* Finally update the reader page to the new head */
1655 cpu_buffer->reader_page = reader;
1656 rb_reset_reader_page(cpu_buffer);
1661 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1666 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1668 struct ring_buffer_event *event;
1669 struct buffer_page *reader;
1672 reader = rb_get_reader_page(cpu_buffer);
1674 /* This function should not be called when buffer is empty */
1677 event = rb_reader_event(cpu_buffer);
1679 if (event->type == RINGBUF_TYPE_DATA)
1680 cpu_buffer->entries--;
1682 rb_update_read_stamp(cpu_buffer, event);
1684 length = rb_event_length(event);
1685 cpu_buffer->reader_page->read += length;
1688 static void rb_advance_iter(struct ring_buffer_iter *iter)
1690 struct ring_buffer *buffer;
1691 struct ring_buffer_per_cpu *cpu_buffer;
1692 struct ring_buffer_event *event;
1695 cpu_buffer = iter->cpu_buffer;
1696 buffer = cpu_buffer->buffer;
1699 * Check if we are at the end of the buffer.
1701 if (iter->head >= rb_page_size(iter->head_page)) {
1702 BUG_ON(iter->head_page == cpu_buffer->commit_page);
1707 event = rb_iter_head_event(iter);
1709 length = rb_event_length(event);
1712 * This should not be called to advance the header if we are
1713 * at the tail of the buffer.
1715 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1716 (iter->head + length > rb_commit_index(cpu_buffer)));
1718 rb_update_iter_read_stamp(iter, event);
1720 iter->head += length;
1722 /* check for end of page padding */
1723 if ((iter->head >= rb_page_size(iter->head_page)) &&
1724 (iter->head_page != cpu_buffer->commit_page))
1725 rb_advance_iter(iter);
1729 * ring_buffer_peek - peek at the next event to be read
1730 * @buffer: The ring buffer to read
1731 * @cpu: The cpu to peak at
1732 * @ts: The timestamp counter of this event.
1734 * This will return the event that will be read next, but does
1735 * not consume the data.
1737 struct ring_buffer_event *
1738 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1740 struct ring_buffer_per_cpu *cpu_buffer;
1741 struct ring_buffer_event *event;
1742 struct buffer_page *reader;
1745 if (!cpu_isset(cpu, buffer->cpumask))
1748 cpu_buffer = buffer->buffers[cpu];
1752 * We repeat when a timestamp is encountered. It is possible
1753 * to get multiple timestamps from an interrupt entering just
1754 * as one timestamp is about to be written. The max times
1755 * that this can happen is the number of nested interrupts we
1756 * can have. Nesting 10 deep of interrupts is clearly
1759 if (unlikely(++nr_loops > 10)) {
1760 RB_WARN_ON(cpu_buffer, 1);
1764 reader = rb_get_reader_page(cpu_buffer);
1768 event = rb_reader_event(cpu_buffer);
1770 switch (event->type) {
1771 case RINGBUF_TYPE_PADDING:
1772 RB_WARN_ON(cpu_buffer, 1);
1773 rb_advance_reader(cpu_buffer);
1776 case RINGBUF_TYPE_TIME_EXTEND:
1777 /* Internal data, OK to advance */
1778 rb_advance_reader(cpu_buffer);
1781 case RINGBUF_TYPE_TIME_STAMP:
1782 /* FIXME: not implemented */
1783 rb_advance_reader(cpu_buffer);
1786 case RINGBUF_TYPE_DATA:
1788 *ts = cpu_buffer->read_stamp + event->time_delta;
1789 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1801 * ring_buffer_iter_peek - peek at the next event to be read
1802 * @iter: The ring buffer iterator
1803 * @ts: The timestamp counter of this event.
1805 * This will return the event that will be read next, but does
1806 * not increment the iterator.
1808 struct ring_buffer_event *
1809 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1811 struct ring_buffer *buffer;
1812 struct ring_buffer_per_cpu *cpu_buffer;
1813 struct ring_buffer_event *event;
1816 if (ring_buffer_iter_empty(iter))
1819 cpu_buffer = iter->cpu_buffer;
1820 buffer = cpu_buffer->buffer;
1824 * We repeat when a timestamp is encountered. It is possible
1825 * to get multiple timestamps from an interrupt entering just
1826 * as one timestamp is about to be written. The max times
1827 * that this can happen is the number of nested interrupts we
1828 * can have. Nesting 10 deep of interrupts is clearly
1831 if (unlikely(++nr_loops > 10)) {
1832 RB_WARN_ON(cpu_buffer, 1);
1836 if (rb_per_cpu_empty(cpu_buffer))
1839 event = rb_iter_head_event(iter);
1841 switch (event->type) {
1842 case RINGBUF_TYPE_PADDING:
1846 case RINGBUF_TYPE_TIME_EXTEND:
1847 /* Internal data, OK to advance */
1848 rb_advance_iter(iter);
1851 case RINGBUF_TYPE_TIME_STAMP:
1852 /* FIXME: not implemented */
1853 rb_advance_iter(iter);
1856 case RINGBUF_TYPE_DATA:
1858 *ts = iter->read_stamp + event->time_delta;
1859 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1871 * ring_buffer_consume - return an event and consume it
1872 * @buffer: The ring buffer to get the next event from
1874 * Returns the next event in the ring buffer, and that event is consumed.
1875 * Meaning, that sequential reads will keep returning a different event,
1876 * and eventually empty the ring buffer if the producer is slower.
1878 struct ring_buffer_event *
1879 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1881 struct ring_buffer_per_cpu *cpu_buffer;
1882 struct ring_buffer_event *event;
1884 if (!cpu_isset(cpu, buffer->cpumask))
1887 event = ring_buffer_peek(buffer, cpu, ts);
1891 cpu_buffer = buffer->buffers[cpu];
1892 rb_advance_reader(cpu_buffer);
1898 * ring_buffer_read_start - start a non consuming read of the buffer
1899 * @buffer: The ring buffer to read from
1900 * @cpu: The cpu buffer to iterate over
1902 * This starts up an iteration through the buffer. It also disables
1903 * the recording to the buffer until the reading is finished.
1904 * This prevents the reading from being corrupted. This is not
1905 * a consuming read, so a producer is not expected.
1907 * Must be paired with ring_buffer_finish.
1909 struct ring_buffer_iter *
1910 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1912 struct ring_buffer_per_cpu *cpu_buffer;
1913 struct ring_buffer_iter *iter;
1914 unsigned long flags;
1916 if (!cpu_isset(cpu, buffer->cpumask))
1919 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1923 cpu_buffer = buffer->buffers[cpu];
1925 iter->cpu_buffer = cpu_buffer;
1927 atomic_inc(&cpu_buffer->record_disabled);
1928 synchronize_sched();
1930 spin_lock_irqsave(&cpu_buffer->lock, flags);
1931 ring_buffer_iter_reset(iter);
1932 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1938 * ring_buffer_finish - finish reading the iterator of the buffer
1939 * @iter: The iterator retrieved by ring_buffer_start
1941 * This re-enables the recording to the buffer, and frees the
1945 ring_buffer_read_finish(struct ring_buffer_iter *iter)
1947 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1949 atomic_dec(&cpu_buffer->record_disabled);
1954 * ring_buffer_read - read the next item in the ring buffer by the iterator
1955 * @iter: The ring buffer iterator
1956 * @ts: The time stamp of the event read.
1958 * This reads the next event in the ring buffer and increments the iterator.
1960 struct ring_buffer_event *
1961 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1963 struct ring_buffer_event *event;
1965 event = ring_buffer_iter_peek(iter, ts);
1969 rb_advance_iter(iter);
1975 * ring_buffer_size - return the size of the ring buffer (in bytes)
1976 * @buffer: The ring buffer.
1978 unsigned long ring_buffer_size(struct ring_buffer *buffer)
1980 return BUF_PAGE_SIZE * buffer->pages;
1984 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1986 cpu_buffer->head_page
1987 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1988 local_set(&cpu_buffer->head_page->write, 0);
1989 local_set(&cpu_buffer->head_page->commit, 0);
1991 cpu_buffer->head_page->read = 0;
1993 cpu_buffer->tail_page = cpu_buffer->head_page;
1994 cpu_buffer->commit_page = cpu_buffer->head_page;
1996 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1997 local_set(&cpu_buffer->reader_page->write, 0);
1998 local_set(&cpu_buffer->reader_page->commit, 0);
1999 cpu_buffer->reader_page->read = 0;
2001 cpu_buffer->overrun = 0;
2002 cpu_buffer->entries = 0;
2006 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2007 * @buffer: The ring buffer to reset a per cpu buffer of
2008 * @cpu: The CPU buffer to be reset
2010 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2012 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2013 unsigned long flags;
2015 if (!cpu_isset(cpu, buffer->cpumask))
2018 spin_lock_irqsave(&cpu_buffer->lock, flags);
2020 rb_reset_cpu(cpu_buffer);
2022 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
2026 * ring_buffer_reset - reset a ring buffer
2027 * @buffer: The ring buffer to reset all cpu buffers
2029 void ring_buffer_reset(struct ring_buffer *buffer)
2033 for_each_buffer_cpu(buffer, cpu)
2034 ring_buffer_reset_cpu(buffer, cpu);
2038 * rind_buffer_empty - is the ring buffer empty?
2039 * @buffer: The ring buffer to test
2041 int ring_buffer_empty(struct ring_buffer *buffer)
2043 struct ring_buffer_per_cpu *cpu_buffer;
2046 /* yes this is racy, but if you don't like the race, lock the buffer */
2047 for_each_buffer_cpu(buffer, cpu) {
2048 cpu_buffer = buffer->buffers[cpu];
2049 if (!rb_per_cpu_empty(cpu_buffer))
2056 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2057 * @buffer: The ring buffer
2058 * @cpu: The CPU buffer to test
2060 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2062 struct ring_buffer_per_cpu *cpu_buffer;
2064 if (!cpu_isset(cpu, buffer->cpumask))
2067 cpu_buffer = buffer->buffers[cpu];
2068 return rb_per_cpu_empty(cpu_buffer);
2072 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2073 * @buffer_a: One buffer to swap with
2074 * @buffer_b: The other buffer to swap with
2076 * This function is useful for tracers that want to take a "snapshot"
2077 * of a CPU buffer and has another back up buffer lying around.
2078 * it is expected that the tracer handles the cpu buffer not being
2079 * used at the moment.
2081 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2082 struct ring_buffer *buffer_b, int cpu)
2084 struct ring_buffer_per_cpu *cpu_buffer_a;
2085 struct ring_buffer_per_cpu *cpu_buffer_b;
2087 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2088 !cpu_isset(cpu, buffer_b->cpumask))
2091 /* At least make sure the two buffers are somewhat the same */
2092 if (buffer_a->size != buffer_b->size ||
2093 buffer_a->pages != buffer_b->pages)
2096 cpu_buffer_a = buffer_a->buffers[cpu];
2097 cpu_buffer_b = buffer_b->buffers[cpu];
2100 * We can't do a synchronize_sched here because this
2101 * function can be called in atomic context.
2102 * Normally this will be called from the same CPU as cpu.
2103 * If not it's up to the caller to protect this.
2105 atomic_inc(&cpu_buffer_a->record_disabled);
2106 atomic_inc(&cpu_buffer_b->record_disabled);
2108 buffer_a->buffers[cpu] = cpu_buffer_b;
2109 buffer_b->buffers[cpu] = cpu_buffer_a;
2111 cpu_buffer_b->buffer = buffer_a;
2112 cpu_buffer_a->buffer = buffer_b;
2114 atomic_dec(&cpu_buffer_a->record_disabled);
2115 atomic_dec(&cpu_buffer_b->record_disabled);
2121 rb_simple_read(struct file *filp, char __user *ubuf,
2122 size_t cnt, loff_t *ppos)
2124 int *p = filp->private_data;
2128 /* !ring_buffers_off == tracing_on */
2129 r = sprintf(buf, "%d\n", !*p);
2131 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2135 rb_simple_write(struct file *filp, const char __user *ubuf,
2136 size_t cnt, loff_t *ppos)
2138 int *p = filp->private_data;
2143 if (cnt >= sizeof(buf))
2146 if (copy_from_user(&buf, ubuf, cnt))
2151 ret = strict_strtoul(buf, 10, &val);
2155 /* !ring_buffers_off == tracing_on */
2163 static struct file_operations rb_simple_fops = {
2164 .open = tracing_open_generic,
2165 .read = rb_simple_read,
2166 .write = rb_simple_write,
2170 static __init int rb_init_debugfs(void)
2172 struct dentry *d_tracer;
2173 struct dentry *entry;
2175 d_tracer = tracing_init_dentry();
2177 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2178 &ring_buffers_off, &rb_simple_fops);
2180 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2185 fs_initcall(rb_init_debugfs);