Merge commit 'v2.6.29-rc4' into tracing/core
[linux-2.6] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>        /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 /*
22  * A fast way to enable or disable all ring buffers is to
23  * call tracing_on or tracing_off. Turning off the ring buffers
24  * prevents all ring buffers from being recorded to.
25  * Turning this switch on, makes it OK to write to the
26  * ring buffer, if the ring buffer is enabled itself.
27  *
28  * There's three layers that must be on in order to write
29  * to the ring buffer.
30  *
31  * 1) This global flag must be set.
32  * 2) The ring buffer must be enabled for recording.
33  * 3) The per cpu buffer must be enabled for recording.
34  *
35  * In case of an anomaly, this global flag has a bit set that
36  * will permantly disable all ring buffers.
37  */
38
39 /*
40  * Global flag to disable all recording to ring buffers
41  *  This has two bits: ON, DISABLED
42  *
43  *  ON   DISABLED
44  * ---- ----------
45  *   0      0        : ring buffers are off
46  *   1      0        : ring buffers are on
47  *   X      1        : ring buffers are permanently disabled
48  */
49
50 enum {
51         RB_BUFFERS_ON_BIT       = 0,
52         RB_BUFFERS_DISABLED_BIT = 1,
53 };
54
55 enum {
56         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
57         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
58 };
59
60 static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
61
62 /**
63  * tracing_on - enable all tracing buffers
64  *
65  * This function enables all tracing buffers that may have been
66  * disabled with tracing_off.
67  */
68 void tracing_on(void)
69 {
70         set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
71 }
72 EXPORT_SYMBOL_GPL(tracing_on);
73
74 /**
75  * tracing_off - turn off all tracing buffers
76  *
77  * This function stops all tracing buffers from recording data.
78  * It does not disable any overhead the tracers themselves may
79  * be causing. This function simply causes all recording to
80  * the ring buffers to fail.
81  */
82 void tracing_off(void)
83 {
84         clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
85 }
86 EXPORT_SYMBOL_GPL(tracing_off);
87
88 /**
89  * tracing_off_permanent - permanently disable ring buffers
90  *
91  * This function, once called, will disable all ring buffers
92  * permanenty.
93  */
94 void tracing_off_permanent(void)
95 {
96         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
97 }
98
99 #include "trace.h"
100
101 /* Up this if you want to test the TIME_EXTENTS and normalization */
102 #define DEBUG_SHIFT 0
103
104 /* FIXME!!! */
105 u64 ring_buffer_time_stamp(int cpu)
106 {
107         u64 time;
108
109         preempt_disable_notrace();
110         /* shift to debug/test normalization and TIME_EXTENTS */
111         time = sched_clock() << DEBUG_SHIFT;
112         preempt_enable_no_resched_notrace();
113
114         return time;
115 }
116 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
117
118 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
119 {
120         /* Just stupid testing the normalize function and deltas */
121         *ts >>= DEBUG_SHIFT;
122 }
123 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
124
125 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
126 #define RB_ALIGNMENT            4U
127 #define RB_MAX_SMALL_DATA       28
128
129 enum {
130         RB_LEN_TIME_EXTEND = 8,
131         RB_LEN_TIME_STAMP = 16,
132 };
133
134 /* inline for ring buffer fast paths */
135 static unsigned
136 rb_event_length(struct ring_buffer_event *event)
137 {
138         unsigned length;
139
140         switch (event->type) {
141         case RINGBUF_TYPE_PADDING:
142                 /* undefined */
143                 return -1;
144
145         case RINGBUF_TYPE_TIME_EXTEND:
146                 return RB_LEN_TIME_EXTEND;
147
148         case RINGBUF_TYPE_TIME_STAMP:
149                 return RB_LEN_TIME_STAMP;
150
151         case RINGBUF_TYPE_DATA:
152                 if (event->len)
153                         length = event->len * RB_ALIGNMENT;
154                 else
155                         length = event->array[0];
156                 return length + RB_EVNT_HDR_SIZE;
157         default:
158                 BUG();
159         }
160         /* not hit */
161         return 0;
162 }
163
164 /**
165  * ring_buffer_event_length - return the length of the event
166  * @event: the event to get the length of
167  */
168 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
169 {
170         unsigned length = rb_event_length(event);
171         if (event->type != RINGBUF_TYPE_DATA)
172                 return length;
173         length -= RB_EVNT_HDR_SIZE;
174         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
175                 length -= sizeof(event->array[0]);
176         return length;
177 }
178 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
179
180 /* inline for ring buffer fast paths */
181 static void *
182 rb_event_data(struct ring_buffer_event *event)
183 {
184         BUG_ON(event->type != RINGBUF_TYPE_DATA);
185         /* If length is in len field, then array[0] has the data */
186         if (event->len)
187                 return (void *)&event->array[0];
188         /* Otherwise length is in array[0] and array[1] has the data */
189         return (void *)&event->array[1];
190 }
191
192 /**
193  * ring_buffer_event_data - return the data of the event
194  * @event: the event to get the data from
195  */
196 void *ring_buffer_event_data(struct ring_buffer_event *event)
197 {
198         return rb_event_data(event);
199 }
200 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
201
202 #define for_each_buffer_cpu(buffer, cpu)                \
203         for_each_cpu(cpu, buffer->cpumask)
204
205 #define TS_SHIFT        27
206 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
207 #define TS_DELTA_TEST   (~TS_MASK)
208
209 struct buffer_data_page {
210         u64              time_stamp;    /* page time stamp */
211         local_t          commit;        /* write commited index */
212         unsigned char    data[];        /* data of buffer page */
213 };
214
215 struct buffer_page {
216         local_t          write;         /* index for next write */
217         unsigned         read;          /* index for next read */
218         struct list_head list;          /* list of free pages */
219         struct buffer_data_page *page;  /* Actual data page */
220 };
221
222 static void rb_init_page(struct buffer_data_page *bpage)
223 {
224         local_set(&bpage->commit, 0);
225 }
226
227 /*
228  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
229  * this issue out.
230  */
231 static void free_buffer_page(struct buffer_page *bpage)
232 {
233         free_page((unsigned long)bpage->page);
234         kfree(bpage);
235 }
236
237 /*
238  * We need to fit the time_stamp delta into 27 bits.
239  */
240 static inline int test_time_stamp(u64 delta)
241 {
242         if (delta & TS_DELTA_TEST)
243                 return 1;
244         return 0;
245 }
246
247 #define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data))
248
249 /*
250  * head_page == tail_page && head == tail then buffer is empty.
251  */
252 struct ring_buffer_per_cpu {
253         int                             cpu;
254         struct ring_buffer              *buffer;
255         spinlock_t                      reader_lock; /* serialize readers */
256         raw_spinlock_t                  lock;
257         struct lock_class_key           lock_key;
258         struct list_head                pages;
259         struct buffer_page              *head_page;     /* read from head */
260         struct buffer_page              *tail_page;     /* write to tail */
261         struct buffer_page              *commit_page;   /* commited pages */
262         struct buffer_page              *reader_page;
263         unsigned long                   overrun;
264         unsigned long                   entries;
265         u64                             write_stamp;
266         u64                             read_stamp;
267         atomic_t                        record_disabled;
268 };
269
270 struct ring_buffer {
271         unsigned                        pages;
272         unsigned                        flags;
273         int                             cpus;
274         cpumask_var_t                   cpumask;
275         atomic_t                        record_disabled;
276
277         struct mutex                    mutex;
278
279         struct ring_buffer_per_cpu      **buffers;
280 };
281
282 struct ring_buffer_iter {
283         struct ring_buffer_per_cpu      *cpu_buffer;
284         unsigned long                   head;
285         struct buffer_page              *head_page;
286         u64                             read_stamp;
287 };
288
289 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
290 #define RB_WARN_ON(buffer, cond)                                \
291         ({                                                      \
292                 int _____ret = unlikely(cond);                  \
293                 if (_____ret) {                                 \
294                         atomic_inc(&buffer->record_disabled);   \
295                         WARN_ON(1);                             \
296                 }                                               \
297                 _____ret;                                       \
298         })
299
300 /**
301  * check_pages - integrity check of buffer pages
302  * @cpu_buffer: CPU buffer with pages to test
303  *
304  * As a safty measure we check to make sure the data pages have not
305  * been corrupted.
306  */
307 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
308 {
309         struct list_head *head = &cpu_buffer->pages;
310         struct buffer_page *bpage, *tmp;
311
312         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
313                 return -1;
314         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
315                 return -1;
316
317         list_for_each_entry_safe(bpage, tmp, head, list) {
318                 if (RB_WARN_ON(cpu_buffer,
319                                bpage->list.next->prev != &bpage->list))
320                         return -1;
321                 if (RB_WARN_ON(cpu_buffer,
322                                bpage->list.prev->next != &bpage->list))
323                         return -1;
324         }
325
326         return 0;
327 }
328
329 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
330                              unsigned nr_pages)
331 {
332         struct list_head *head = &cpu_buffer->pages;
333         struct buffer_page *bpage, *tmp;
334         unsigned long addr;
335         LIST_HEAD(pages);
336         unsigned i;
337
338         for (i = 0; i < nr_pages; i++) {
339                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
340                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
341                 if (!bpage)
342                         goto free_pages;
343                 list_add(&bpage->list, &pages);
344
345                 addr = __get_free_page(GFP_KERNEL);
346                 if (!addr)
347                         goto free_pages;
348                 bpage->page = (void *)addr;
349                 rb_init_page(bpage->page);
350         }
351
352         list_splice(&pages, head);
353
354         rb_check_pages(cpu_buffer);
355
356         return 0;
357
358  free_pages:
359         list_for_each_entry_safe(bpage, tmp, &pages, list) {
360                 list_del_init(&bpage->list);
361                 free_buffer_page(bpage);
362         }
363         return -ENOMEM;
364 }
365
366 static struct ring_buffer_per_cpu *
367 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
368 {
369         struct ring_buffer_per_cpu *cpu_buffer;
370         struct buffer_page *bpage;
371         unsigned long addr;
372         int ret;
373
374         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
375                                   GFP_KERNEL, cpu_to_node(cpu));
376         if (!cpu_buffer)
377                 return NULL;
378
379         cpu_buffer->cpu = cpu;
380         cpu_buffer->buffer = buffer;
381         spin_lock_init(&cpu_buffer->reader_lock);
382         cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
383         INIT_LIST_HEAD(&cpu_buffer->pages);
384
385         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
386                             GFP_KERNEL, cpu_to_node(cpu));
387         if (!bpage)
388                 goto fail_free_buffer;
389
390         cpu_buffer->reader_page = bpage;
391         addr = __get_free_page(GFP_KERNEL);
392         if (!addr)
393                 goto fail_free_reader;
394         bpage->page = (void *)addr;
395         rb_init_page(bpage->page);
396
397         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
398
399         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
400         if (ret < 0)
401                 goto fail_free_reader;
402
403         cpu_buffer->head_page
404                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
405         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
406
407         return cpu_buffer;
408
409  fail_free_reader:
410         free_buffer_page(cpu_buffer->reader_page);
411
412  fail_free_buffer:
413         kfree(cpu_buffer);
414         return NULL;
415 }
416
417 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
418 {
419         struct list_head *head = &cpu_buffer->pages;
420         struct buffer_page *bpage, *tmp;
421
422         list_del_init(&cpu_buffer->reader_page->list);
423         free_buffer_page(cpu_buffer->reader_page);
424
425         list_for_each_entry_safe(bpage, tmp, head, list) {
426                 list_del_init(&bpage->list);
427                 free_buffer_page(bpage);
428         }
429         kfree(cpu_buffer);
430 }
431
432 /*
433  * Causes compile errors if the struct buffer_page gets bigger
434  * than the struct page.
435  */
436 extern int ring_buffer_page_too_big(void);
437
438 /**
439  * ring_buffer_alloc - allocate a new ring_buffer
440  * @size: the size in bytes per cpu that is needed.
441  * @flags: attributes to set for the ring buffer.
442  *
443  * Currently the only flag that is available is the RB_FL_OVERWRITE
444  * flag. This flag means that the buffer will overwrite old data
445  * when the buffer wraps. If this flag is not set, the buffer will
446  * drop data when the tail hits the head.
447  */
448 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
449 {
450         struct ring_buffer *buffer;
451         int bsize;
452         int cpu;
453
454         /* Paranoid! Optimizes out when all is well */
455         if (sizeof(struct buffer_page) > sizeof(struct page))
456                 ring_buffer_page_too_big();
457
458
459         /* keep it in its own cache line */
460         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
461                          GFP_KERNEL);
462         if (!buffer)
463                 return NULL;
464
465         if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
466                 goto fail_free_buffer;
467
468         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
469         buffer->flags = flags;
470
471         /* need at least two pages */
472         if (buffer->pages == 1)
473                 buffer->pages++;
474
475         cpumask_copy(buffer->cpumask, cpu_possible_mask);
476         buffer->cpus = nr_cpu_ids;
477
478         bsize = sizeof(void *) * nr_cpu_ids;
479         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
480                                   GFP_KERNEL);
481         if (!buffer->buffers)
482                 goto fail_free_cpumask;
483
484         for_each_buffer_cpu(buffer, cpu) {
485                 buffer->buffers[cpu] =
486                         rb_allocate_cpu_buffer(buffer, cpu);
487                 if (!buffer->buffers[cpu])
488                         goto fail_free_buffers;
489         }
490
491         mutex_init(&buffer->mutex);
492
493         return buffer;
494
495  fail_free_buffers:
496         for_each_buffer_cpu(buffer, cpu) {
497                 if (buffer->buffers[cpu])
498                         rb_free_cpu_buffer(buffer->buffers[cpu]);
499         }
500         kfree(buffer->buffers);
501
502  fail_free_cpumask:
503         free_cpumask_var(buffer->cpumask);
504
505  fail_free_buffer:
506         kfree(buffer);
507         return NULL;
508 }
509 EXPORT_SYMBOL_GPL(ring_buffer_alloc);
510
511 /**
512  * ring_buffer_free - free a ring buffer.
513  * @buffer: the buffer to free.
514  */
515 void
516 ring_buffer_free(struct ring_buffer *buffer)
517 {
518         int cpu;
519
520         for_each_buffer_cpu(buffer, cpu)
521                 rb_free_cpu_buffer(buffer->buffers[cpu]);
522
523         free_cpumask_var(buffer->cpumask);
524
525         kfree(buffer);
526 }
527 EXPORT_SYMBOL_GPL(ring_buffer_free);
528
529 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
530
531 static void
532 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
533 {
534         struct buffer_page *bpage;
535         struct list_head *p;
536         unsigned i;
537
538         atomic_inc(&cpu_buffer->record_disabled);
539         synchronize_sched();
540
541         for (i = 0; i < nr_pages; i++) {
542                 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
543                         return;
544                 p = cpu_buffer->pages.next;
545                 bpage = list_entry(p, struct buffer_page, list);
546                 list_del_init(&bpage->list);
547                 free_buffer_page(bpage);
548         }
549         if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
550                 return;
551
552         rb_reset_cpu(cpu_buffer);
553
554         rb_check_pages(cpu_buffer);
555
556         atomic_dec(&cpu_buffer->record_disabled);
557
558 }
559
560 static void
561 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
562                 struct list_head *pages, unsigned nr_pages)
563 {
564         struct buffer_page *bpage;
565         struct list_head *p;
566         unsigned i;
567
568         atomic_inc(&cpu_buffer->record_disabled);
569         synchronize_sched();
570
571         for (i = 0; i < nr_pages; i++) {
572                 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
573                         return;
574                 p = pages->next;
575                 bpage = list_entry(p, struct buffer_page, list);
576                 list_del_init(&bpage->list);
577                 list_add_tail(&bpage->list, &cpu_buffer->pages);
578         }
579         rb_reset_cpu(cpu_buffer);
580
581         rb_check_pages(cpu_buffer);
582
583         atomic_dec(&cpu_buffer->record_disabled);
584 }
585
586 /**
587  * ring_buffer_resize - resize the ring buffer
588  * @buffer: the buffer to resize.
589  * @size: the new size.
590  *
591  * The tracer is responsible for making sure that the buffer is
592  * not being used while changing the size.
593  * Note: We may be able to change the above requirement by using
594  *  RCU synchronizations.
595  *
596  * Minimum size is 2 * BUF_PAGE_SIZE.
597  *
598  * Returns -1 on failure.
599  */
600 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
601 {
602         struct ring_buffer_per_cpu *cpu_buffer;
603         unsigned nr_pages, rm_pages, new_pages;
604         struct buffer_page *bpage, *tmp;
605         unsigned long buffer_size;
606         unsigned long addr;
607         LIST_HEAD(pages);
608         int i, cpu;
609
610         /*
611          * Always succeed at resizing a non-existent buffer:
612          */
613         if (!buffer)
614                 return size;
615
616         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
617         size *= BUF_PAGE_SIZE;
618         buffer_size = buffer->pages * BUF_PAGE_SIZE;
619
620         /* we need a minimum of two pages */
621         if (size < BUF_PAGE_SIZE * 2)
622                 size = BUF_PAGE_SIZE * 2;
623
624         if (size == buffer_size)
625                 return size;
626
627         mutex_lock(&buffer->mutex);
628
629         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
630
631         if (size < buffer_size) {
632
633                 /* easy case, just free pages */
634                 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
635                         mutex_unlock(&buffer->mutex);
636                         return -1;
637                 }
638
639                 rm_pages = buffer->pages - nr_pages;
640
641                 for_each_buffer_cpu(buffer, cpu) {
642                         cpu_buffer = buffer->buffers[cpu];
643                         rb_remove_pages(cpu_buffer, rm_pages);
644                 }
645                 goto out;
646         }
647
648         /*
649          * This is a bit more difficult. We only want to add pages
650          * when we can allocate enough for all CPUs. We do this
651          * by allocating all the pages and storing them on a local
652          * link list. If we succeed in our allocation, then we
653          * add these pages to the cpu_buffers. Otherwise we just free
654          * them all and return -ENOMEM;
655          */
656         if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
657                 mutex_unlock(&buffer->mutex);
658                 return -1;
659         }
660
661         new_pages = nr_pages - buffer->pages;
662
663         for_each_buffer_cpu(buffer, cpu) {
664                 for (i = 0; i < new_pages; i++) {
665                         bpage = kzalloc_node(ALIGN(sizeof(*bpage),
666                                                   cache_line_size()),
667                                             GFP_KERNEL, cpu_to_node(cpu));
668                         if (!bpage)
669                                 goto free_pages;
670                         list_add(&bpage->list, &pages);
671                         addr = __get_free_page(GFP_KERNEL);
672                         if (!addr)
673                                 goto free_pages;
674                         bpage->page = (void *)addr;
675                         rb_init_page(bpage->page);
676                 }
677         }
678
679         for_each_buffer_cpu(buffer, cpu) {
680                 cpu_buffer = buffer->buffers[cpu];
681                 rb_insert_pages(cpu_buffer, &pages, new_pages);
682         }
683
684         if (RB_WARN_ON(buffer, !list_empty(&pages))) {
685                 mutex_unlock(&buffer->mutex);
686                 return -1;
687         }
688
689  out:
690         buffer->pages = nr_pages;
691         mutex_unlock(&buffer->mutex);
692
693         return size;
694
695  free_pages:
696         list_for_each_entry_safe(bpage, tmp, &pages, list) {
697                 list_del_init(&bpage->list);
698                 free_buffer_page(bpage);
699         }
700         mutex_unlock(&buffer->mutex);
701         return -ENOMEM;
702 }
703 EXPORT_SYMBOL_GPL(ring_buffer_resize);
704
705 static inline int rb_null_event(struct ring_buffer_event *event)
706 {
707         return event->type == RINGBUF_TYPE_PADDING;
708 }
709
710 static inline void *
711 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
712 {
713         return bpage->data + index;
714 }
715
716 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
717 {
718         return bpage->page->data + index;
719 }
720
721 static inline struct ring_buffer_event *
722 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
723 {
724         return __rb_page_index(cpu_buffer->reader_page,
725                                cpu_buffer->reader_page->read);
726 }
727
728 static inline struct ring_buffer_event *
729 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
730 {
731         return __rb_page_index(cpu_buffer->head_page,
732                                cpu_buffer->head_page->read);
733 }
734
735 static inline struct ring_buffer_event *
736 rb_iter_head_event(struct ring_buffer_iter *iter)
737 {
738         return __rb_page_index(iter->head_page, iter->head);
739 }
740
741 static inline unsigned rb_page_write(struct buffer_page *bpage)
742 {
743         return local_read(&bpage->write);
744 }
745
746 static inline unsigned rb_page_commit(struct buffer_page *bpage)
747 {
748         return local_read(&bpage->page->commit);
749 }
750
751 /* Size is determined by what has been commited */
752 static inline unsigned rb_page_size(struct buffer_page *bpage)
753 {
754         return rb_page_commit(bpage);
755 }
756
757 static inline unsigned
758 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
759 {
760         return rb_page_commit(cpu_buffer->commit_page);
761 }
762
763 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
764 {
765         return rb_page_commit(cpu_buffer->head_page);
766 }
767
768 /*
769  * When the tail hits the head and the buffer is in overwrite mode,
770  * the head jumps to the next page and all content on the previous
771  * page is discarded. But before doing so, we update the overrun
772  * variable of the buffer.
773  */
774 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
775 {
776         struct ring_buffer_event *event;
777         unsigned long head;
778
779         for (head = 0; head < rb_head_size(cpu_buffer);
780              head += rb_event_length(event)) {
781
782                 event = __rb_page_index(cpu_buffer->head_page, head);
783                 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
784                         return;
785                 /* Only count data entries */
786                 if (event->type != RINGBUF_TYPE_DATA)
787                         continue;
788                 cpu_buffer->overrun++;
789                 cpu_buffer->entries--;
790         }
791 }
792
793 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
794                                struct buffer_page **bpage)
795 {
796         struct list_head *p = (*bpage)->list.next;
797
798         if (p == &cpu_buffer->pages)
799                 p = p->next;
800
801         *bpage = list_entry(p, struct buffer_page, list);
802 }
803
804 static inline unsigned
805 rb_event_index(struct ring_buffer_event *event)
806 {
807         unsigned long addr = (unsigned long)event;
808
809         return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
810 }
811
812 static int
813 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
814              struct ring_buffer_event *event)
815 {
816         unsigned long addr = (unsigned long)event;
817         unsigned long index;
818
819         index = rb_event_index(event);
820         addr &= PAGE_MASK;
821
822         return cpu_buffer->commit_page->page == (void *)addr &&
823                 rb_commit_index(cpu_buffer) == index;
824 }
825
826 static void
827 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
828                     struct ring_buffer_event *event)
829 {
830         unsigned long addr = (unsigned long)event;
831         unsigned long index;
832
833         index = rb_event_index(event);
834         addr &= PAGE_MASK;
835
836         while (cpu_buffer->commit_page->page != (void *)addr) {
837                 if (RB_WARN_ON(cpu_buffer,
838                           cpu_buffer->commit_page == cpu_buffer->tail_page))
839                         return;
840                 cpu_buffer->commit_page->page->commit =
841                         cpu_buffer->commit_page->write;
842                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
843                 cpu_buffer->write_stamp =
844                         cpu_buffer->commit_page->page->time_stamp;
845         }
846
847         /* Now set the commit to the event's index */
848         local_set(&cpu_buffer->commit_page->page->commit, index);
849 }
850
851 static void
852 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
853 {
854         /*
855          * We only race with interrupts and NMIs on this CPU.
856          * If we own the commit event, then we can commit
857          * all others that interrupted us, since the interruptions
858          * are in stack format (they finish before they come
859          * back to us). This allows us to do a simple loop to
860          * assign the commit to the tail.
861          */
862  again:
863         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
864                 cpu_buffer->commit_page->page->commit =
865                         cpu_buffer->commit_page->write;
866                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
867                 cpu_buffer->write_stamp =
868                         cpu_buffer->commit_page->page->time_stamp;
869                 /* add barrier to keep gcc from optimizing too much */
870                 barrier();
871         }
872         while (rb_commit_index(cpu_buffer) !=
873                rb_page_write(cpu_buffer->commit_page)) {
874                 cpu_buffer->commit_page->page->commit =
875                         cpu_buffer->commit_page->write;
876                 barrier();
877         }
878
879         /* again, keep gcc from optimizing */
880         barrier();
881
882         /*
883          * If an interrupt came in just after the first while loop
884          * and pushed the tail page forward, we will be left with
885          * a dangling commit that will never go forward.
886          */
887         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
888                 goto again;
889 }
890
891 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
892 {
893         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
894         cpu_buffer->reader_page->read = 0;
895 }
896
897 static void rb_inc_iter(struct ring_buffer_iter *iter)
898 {
899         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
900
901         /*
902          * The iterator could be on the reader page (it starts there).
903          * But the head could have moved, since the reader was
904          * found. Check for this case and assign the iterator
905          * to the head page instead of next.
906          */
907         if (iter->head_page == cpu_buffer->reader_page)
908                 iter->head_page = cpu_buffer->head_page;
909         else
910                 rb_inc_page(cpu_buffer, &iter->head_page);
911
912         iter->read_stamp = iter->head_page->page->time_stamp;
913         iter->head = 0;
914 }
915
916 /**
917  * ring_buffer_update_event - update event type and data
918  * @event: the even to update
919  * @type: the type of event
920  * @length: the size of the event field in the ring buffer
921  *
922  * Update the type and data fields of the event. The length
923  * is the actual size that is written to the ring buffer,
924  * and with this, we can determine what to place into the
925  * data field.
926  */
927 static void
928 rb_update_event(struct ring_buffer_event *event,
929                          unsigned type, unsigned length)
930 {
931         event->type = type;
932
933         switch (type) {
934
935         case RINGBUF_TYPE_PADDING:
936                 break;
937
938         case RINGBUF_TYPE_TIME_EXTEND:
939                 event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
940                 break;
941
942         case RINGBUF_TYPE_TIME_STAMP:
943                 event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
944                 break;
945
946         case RINGBUF_TYPE_DATA:
947                 length -= RB_EVNT_HDR_SIZE;
948                 if (length > RB_MAX_SMALL_DATA) {
949                         event->len = 0;
950                         event->array[0] = length;
951                 } else
952                         event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
953                 break;
954         default:
955                 BUG();
956         }
957 }
958
959 static unsigned rb_calculate_event_length(unsigned length)
960 {
961         struct ring_buffer_event event; /* Used only for sizeof array */
962
963         /* zero length can cause confusions */
964         if (!length)
965                 length = 1;
966
967         if (length > RB_MAX_SMALL_DATA)
968                 length += sizeof(event.array[0]);
969
970         length += RB_EVNT_HDR_SIZE;
971         length = ALIGN(length, RB_ALIGNMENT);
972
973         return length;
974 }
975
976 static struct ring_buffer_event *
977 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
978                   unsigned type, unsigned long length, u64 *ts)
979 {
980         struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
981         unsigned long tail, write;
982         struct ring_buffer *buffer = cpu_buffer->buffer;
983         struct ring_buffer_event *event;
984         unsigned long flags;
985
986         commit_page = cpu_buffer->commit_page;
987         /* we just need to protect against interrupts */
988         barrier();
989         tail_page = cpu_buffer->tail_page;
990         write = local_add_return(length, &tail_page->write);
991         tail = write - length;
992
993         /* See if we shot pass the end of this buffer page */
994         if (write > BUF_PAGE_SIZE) {
995                 struct buffer_page *next_page = tail_page;
996
997                 local_irq_save(flags);
998                 __raw_spin_lock(&cpu_buffer->lock);
999
1000                 rb_inc_page(cpu_buffer, &next_page);
1001
1002                 head_page = cpu_buffer->head_page;
1003                 reader_page = cpu_buffer->reader_page;
1004
1005                 /* we grabbed the lock before incrementing */
1006                 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1007                         goto out_unlock;
1008
1009                 /*
1010                  * If for some reason, we had an interrupt storm that made
1011                  * it all the way around the buffer, bail, and warn
1012                  * about it.
1013                  */
1014                 if (unlikely(next_page == commit_page)) {
1015                         WARN_ON_ONCE(1);
1016                         goto out_unlock;
1017                 }
1018
1019                 if (next_page == head_page) {
1020                         if (!(buffer->flags & RB_FL_OVERWRITE))
1021                                 goto out_unlock;
1022
1023                         /* tail_page has not moved yet? */
1024                         if (tail_page == cpu_buffer->tail_page) {
1025                                 /* count overflows */
1026                                 rb_update_overflow(cpu_buffer);
1027
1028                                 rb_inc_page(cpu_buffer, &head_page);
1029                                 cpu_buffer->head_page = head_page;
1030                                 cpu_buffer->head_page->read = 0;
1031                         }
1032                 }
1033
1034                 /*
1035                  * If the tail page is still the same as what we think
1036                  * it is, then it is up to us to update the tail
1037                  * pointer.
1038                  */
1039                 if (tail_page == cpu_buffer->tail_page) {
1040                         local_set(&next_page->write, 0);
1041                         local_set(&next_page->page->commit, 0);
1042                         cpu_buffer->tail_page = next_page;
1043
1044                         /* reread the time stamp */
1045                         *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1046                         cpu_buffer->tail_page->page->time_stamp = *ts;
1047                 }
1048
1049                 /*
1050                  * The actual tail page has moved forward.
1051                  */
1052                 if (tail < BUF_PAGE_SIZE) {
1053                         /* Mark the rest of the page with padding */
1054                         event = __rb_page_index(tail_page, tail);
1055                         event->type = RINGBUF_TYPE_PADDING;
1056                 }
1057
1058                 if (tail <= BUF_PAGE_SIZE)
1059                         /* Set the write back to the previous setting */
1060                         local_set(&tail_page->write, tail);
1061
1062                 /*
1063                  * If this was a commit entry that failed,
1064                  * increment that too
1065                  */
1066                 if (tail_page == cpu_buffer->commit_page &&
1067                     tail == rb_commit_index(cpu_buffer)) {
1068                         rb_set_commit_to_write(cpu_buffer);
1069                 }
1070
1071                 __raw_spin_unlock(&cpu_buffer->lock);
1072                 local_irq_restore(flags);
1073
1074                 /* fail and let the caller try again */
1075                 return ERR_PTR(-EAGAIN);
1076         }
1077
1078         /* We reserved something on the buffer */
1079
1080         if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1081                 return NULL;
1082
1083         event = __rb_page_index(tail_page, tail);
1084         rb_update_event(event, type, length);
1085
1086         /*
1087          * If this is a commit and the tail is zero, then update
1088          * this page's time stamp.
1089          */
1090         if (!tail && rb_is_commit(cpu_buffer, event))
1091                 cpu_buffer->commit_page->page->time_stamp = *ts;
1092
1093         return event;
1094
1095  out_unlock:
1096         /* reset write */
1097         if (tail <= BUF_PAGE_SIZE)
1098                 local_set(&tail_page->write, tail);
1099
1100         __raw_spin_unlock(&cpu_buffer->lock);
1101         local_irq_restore(flags);
1102         return NULL;
1103 }
1104
1105 static int
1106 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1107                   u64 *ts, u64 *delta)
1108 {
1109         struct ring_buffer_event *event;
1110         static int once;
1111         int ret;
1112
1113         if (unlikely(*delta > (1ULL << 59) && !once++)) {
1114                 printk(KERN_WARNING "Delta way too big! %llu"
1115                        " ts=%llu write stamp = %llu\n",
1116                        (unsigned long long)*delta,
1117                        (unsigned long long)*ts,
1118                        (unsigned long long)cpu_buffer->write_stamp);
1119                 WARN_ON(1);
1120         }
1121
1122         /*
1123          * The delta is too big, we to add a
1124          * new timestamp.
1125          */
1126         event = __rb_reserve_next(cpu_buffer,
1127                                   RINGBUF_TYPE_TIME_EXTEND,
1128                                   RB_LEN_TIME_EXTEND,
1129                                   ts);
1130         if (!event)
1131                 return -EBUSY;
1132
1133         if (PTR_ERR(event) == -EAGAIN)
1134                 return -EAGAIN;
1135
1136         /* Only a commited time event can update the write stamp */
1137         if (rb_is_commit(cpu_buffer, event)) {
1138                 /*
1139                  * If this is the first on the page, then we need to
1140                  * update the page itself, and just put in a zero.
1141                  */
1142                 if (rb_event_index(event)) {
1143                         event->time_delta = *delta & TS_MASK;
1144                         event->array[0] = *delta >> TS_SHIFT;
1145                 } else {
1146                         cpu_buffer->commit_page->page->time_stamp = *ts;
1147                         event->time_delta = 0;
1148                         event->array[0] = 0;
1149                 }
1150                 cpu_buffer->write_stamp = *ts;
1151                 /* let the caller know this was the commit */
1152                 ret = 1;
1153         } else {
1154                 /* Darn, this is just wasted space */
1155                 event->time_delta = 0;
1156                 event->array[0] = 0;
1157                 ret = 0;
1158         }
1159
1160         *delta = 0;
1161
1162         return ret;
1163 }
1164
1165 static struct ring_buffer_event *
1166 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1167                       unsigned type, unsigned long length)
1168 {
1169         struct ring_buffer_event *event;
1170         u64 ts, delta;
1171         int commit = 0;
1172         int nr_loops = 0;
1173
1174  again:
1175         /*
1176          * We allow for interrupts to reenter here and do a trace.
1177          * If one does, it will cause this original code to loop
1178          * back here. Even with heavy interrupts happening, this
1179          * should only happen a few times in a row. If this happens
1180          * 1000 times in a row, there must be either an interrupt
1181          * storm or we have something buggy.
1182          * Bail!
1183          */
1184         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1185                 return NULL;
1186
1187         ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1188
1189         /*
1190          * Only the first commit can update the timestamp.
1191          * Yes there is a race here. If an interrupt comes in
1192          * just after the conditional and it traces too, then it
1193          * will also check the deltas. More than one timestamp may
1194          * also be made. But only the entry that did the actual
1195          * commit will be something other than zero.
1196          */
1197         if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1198             rb_page_write(cpu_buffer->tail_page) ==
1199             rb_commit_index(cpu_buffer)) {
1200
1201                 delta = ts - cpu_buffer->write_stamp;
1202
1203                 /* make sure this delta is calculated here */
1204                 barrier();
1205
1206                 /* Did the write stamp get updated already? */
1207                 if (unlikely(ts < cpu_buffer->write_stamp))
1208                         delta = 0;
1209
1210                 if (test_time_stamp(delta)) {
1211
1212                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1213
1214                         if (commit == -EBUSY)
1215                                 return NULL;
1216
1217                         if (commit == -EAGAIN)
1218                                 goto again;
1219
1220                         RB_WARN_ON(cpu_buffer, commit < 0);
1221                 }
1222         } else
1223                 /* Non commits have zero deltas */
1224                 delta = 0;
1225
1226         event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1227         if (PTR_ERR(event) == -EAGAIN)
1228                 goto again;
1229
1230         if (!event) {
1231                 if (unlikely(commit))
1232                         /*
1233                          * Ouch! We needed a timestamp and it was commited. But
1234                          * we didn't get our event reserved.
1235                          */
1236                         rb_set_commit_to_write(cpu_buffer);
1237                 return NULL;
1238         }
1239
1240         /*
1241          * If the timestamp was commited, make the commit our entry
1242          * now so that we will update it when needed.
1243          */
1244         if (commit)
1245                 rb_set_commit_event(cpu_buffer, event);
1246         else if (!rb_is_commit(cpu_buffer, event))
1247                 delta = 0;
1248
1249         event->time_delta = delta;
1250
1251         return event;
1252 }
1253
1254 static DEFINE_PER_CPU(int, rb_need_resched);
1255
1256 /**
1257  * ring_buffer_lock_reserve - reserve a part of the buffer
1258  * @buffer: the ring buffer to reserve from
1259  * @length: the length of the data to reserve (excluding event header)
1260  *
1261  * Returns a reseverd event on the ring buffer to copy directly to.
1262  * The user of this interface will need to get the body to write into
1263  * and can use the ring_buffer_event_data() interface.
1264  *
1265  * The length is the length of the data needed, not the event length
1266  * which also includes the event header.
1267  *
1268  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1269  * If NULL is returned, then nothing has been allocated or locked.
1270  */
1271 struct ring_buffer_event *
1272 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1273 {
1274         struct ring_buffer_per_cpu *cpu_buffer;
1275         struct ring_buffer_event *event;
1276         int cpu, resched;
1277
1278         if (ring_buffer_flags != RB_BUFFERS_ON)
1279                 return NULL;
1280
1281         if (atomic_read(&buffer->record_disabled))
1282                 return NULL;
1283
1284         /* If we are tracing schedule, we don't want to recurse */
1285         resched = ftrace_preempt_disable();
1286
1287         cpu = raw_smp_processor_id();
1288
1289         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1290                 goto out;
1291
1292         cpu_buffer = buffer->buffers[cpu];
1293
1294         if (atomic_read(&cpu_buffer->record_disabled))
1295                 goto out;
1296
1297         length = rb_calculate_event_length(length);
1298         if (length > BUF_PAGE_SIZE)
1299                 goto out;
1300
1301         event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1302         if (!event)
1303                 goto out;
1304
1305         /*
1306          * Need to store resched state on this cpu.
1307          * Only the first needs to.
1308          */
1309
1310         if (preempt_count() == 1)
1311                 per_cpu(rb_need_resched, cpu) = resched;
1312
1313         return event;
1314
1315  out:
1316         ftrace_preempt_enable(resched);
1317         return NULL;
1318 }
1319 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1320
1321 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1322                       struct ring_buffer_event *event)
1323 {
1324         cpu_buffer->entries++;
1325
1326         /* Only process further if we own the commit */
1327         if (!rb_is_commit(cpu_buffer, event))
1328                 return;
1329
1330         cpu_buffer->write_stamp += event->time_delta;
1331
1332         rb_set_commit_to_write(cpu_buffer);
1333 }
1334
1335 /**
1336  * ring_buffer_unlock_commit - commit a reserved
1337  * @buffer: The buffer to commit to
1338  * @event: The event pointer to commit.
1339  *
1340  * This commits the data to the ring buffer, and releases any locks held.
1341  *
1342  * Must be paired with ring_buffer_lock_reserve.
1343  */
1344 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1345                               struct ring_buffer_event *event)
1346 {
1347         struct ring_buffer_per_cpu *cpu_buffer;
1348         int cpu = raw_smp_processor_id();
1349
1350         cpu_buffer = buffer->buffers[cpu];
1351
1352         rb_commit(cpu_buffer, event);
1353
1354         /*
1355          * Only the last preempt count needs to restore preemption.
1356          */
1357         if (preempt_count() == 1)
1358                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1359         else
1360                 preempt_enable_no_resched_notrace();
1361
1362         return 0;
1363 }
1364 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1365
1366 /**
1367  * ring_buffer_write - write data to the buffer without reserving
1368  * @buffer: The ring buffer to write to.
1369  * @length: The length of the data being written (excluding the event header)
1370  * @data: The data to write to the buffer.
1371  *
1372  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1373  * one function. If you already have the data to write to the buffer, it
1374  * may be easier to simply call this function.
1375  *
1376  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1377  * and not the length of the event which would hold the header.
1378  */
1379 int ring_buffer_write(struct ring_buffer *buffer,
1380                         unsigned long length,
1381                         void *data)
1382 {
1383         struct ring_buffer_per_cpu *cpu_buffer;
1384         struct ring_buffer_event *event;
1385         unsigned long event_length;
1386         void *body;
1387         int ret = -EBUSY;
1388         int cpu, resched;
1389
1390         if (ring_buffer_flags != RB_BUFFERS_ON)
1391                 return -EBUSY;
1392
1393         if (atomic_read(&buffer->record_disabled))
1394                 return -EBUSY;
1395
1396         resched = ftrace_preempt_disable();
1397
1398         cpu = raw_smp_processor_id();
1399
1400         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1401                 goto out;
1402
1403         cpu_buffer = buffer->buffers[cpu];
1404
1405         if (atomic_read(&cpu_buffer->record_disabled))
1406                 goto out;
1407
1408         event_length = rb_calculate_event_length(length);
1409         event = rb_reserve_next_event(cpu_buffer,
1410                                       RINGBUF_TYPE_DATA, event_length);
1411         if (!event)
1412                 goto out;
1413
1414         body = rb_event_data(event);
1415
1416         memcpy(body, data, length);
1417
1418         rb_commit(cpu_buffer, event);
1419
1420         ret = 0;
1421  out:
1422         ftrace_preempt_enable(resched);
1423
1424         return ret;
1425 }
1426 EXPORT_SYMBOL_GPL(ring_buffer_write);
1427
1428 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1429 {
1430         struct buffer_page *reader = cpu_buffer->reader_page;
1431         struct buffer_page *head = cpu_buffer->head_page;
1432         struct buffer_page *commit = cpu_buffer->commit_page;
1433
1434         return reader->read == rb_page_commit(reader) &&
1435                 (commit == reader ||
1436                  (commit == head &&
1437                   head->read == rb_page_commit(commit)));
1438 }
1439
1440 /**
1441  * ring_buffer_record_disable - stop all writes into the buffer
1442  * @buffer: The ring buffer to stop writes to.
1443  *
1444  * This prevents all writes to the buffer. Any attempt to write
1445  * to the buffer after this will fail and return NULL.
1446  *
1447  * The caller should call synchronize_sched() after this.
1448  */
1449 void ring_buffer_record_disable(struct ring_buffer *buffer)
1450 {
1451         atomic_inc(&buffer->record_disabled);
1452 }
1453 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1454
1455 /**
1456  * ring_buffer_record_enable - enable writes to the buffer
1457  * @buffer: The ring buffer to enable writes
1458  *
1459  * Note, multiple disables will need the same number of enables
1460  * to truely enable the writing (much like preempt_disable).
1461  */
1462 void ring_buffer_record_enable(struct ring_buffer *buffer)
1463 {
1464         atomic_dec(&buffer->record_disabled);
1465 }
1466 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1467
1468 /**
1469  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1470  * @buffer: The ring buffer to stop writes to.
1471  * @cpu: The CPU buffer to stop
1472  *
1473  * This prevents all writes to the buffer. Any attempt to write
1474  * to the buffer after this will fail and return NULL.
1475  *
1476  * The caller should call synchronize_sched() after this.
1477  */
1478 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1479 {
1480         struct ring_buffer_per_cpu *cpu_buffer;
1481
1482         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1483                 return;
1484
1485         cpu_buffer = buffer->buffers[cpu];
1486         atomic_inc(&cpu_buffer->record_disabled);
1487 }
1488 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1489
1490 /**
1491  * ring_buffer_record_enable_cpu - enable writes to the buffer
1492  * @buffer: The ring buffer to enable writes
1493  * @cpu: The CPU to enable.
1494  *
1495  * Note, multiple disables will need the same number of enables
1496  * to truely enable the writing (much like preempt_disable).
1497  */
1498 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1499 {
1500         struct ring_buffer_per_cpu *cpu_buffer;
1501
1502         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1503                 return;
1504
1505         cpu_buffer = buffer->buffers[cpu];
1506         atomic_dec(&cpu_buffer->record_disabled);
1507 }
1508 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1509
1510 /**
1511  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1512  * @buffer: The ring buffer
1513  * @cpu: The per CPU buffer to get the entries from.
1514  */
1515 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1516 {
1517         struct ring_buffer_per_cpu *cpu_buffer;
1518
1519         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1520                 return 0;
1521
1522         cpu_buffer = buffer->buffers[cpu];
1523         return cpu_buffer->entries;
1524 }
1525 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1526
1527 /**
1528  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1529  * @buffer: The ring buffer
1530  * @cpu: The per CPU buffer to get the number of overruns from
1531  */
1532 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1533 {
1534         struct ring_buffer_per_cpu *cpu_buffer;
1535
1536         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1537                 return 0;
1538
1539         cpu_buffer = buffer->buffers[cpu];
1540         return cpu_buffer->overrun;
1541 }
1542 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1543
1544 /**
1545  * ring_buffer_entries - get the number of entries in a buffer
1546  * @buffer: The ring buffer
1547  *
1548  * Returns the total number of entries in the ring buffer
1549  * (all CPU entries)
1550  */
1551 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1552 {
1553         struct ring_buffer_per_cpu *cpu_buffer;
1554         unsigned long entries = 0;
1555         int cpu;
1556
1557         /* if you care about this being correct, lock the buffer */
1558         for_each_buffer_cpu(buffer, cpu) {
1559                 cpu_buffer = buffer->buffers[cpu];
1560                 entries += cpu_buffer->entries;
1561         }
1562
1563         return entries;
1564 }
1565 EXPORT_SYMBOL_GPL(ring_buffer_entries);
1566
1567 /**
1568  * ring_buffer_overrun_cpu - get the number of overruns in buffer
1569  * @buffer: The ring buffer
1570  *
1571  * Returns the total number of overruns in the ring buffer
1572  * (all CPU entries)
1573  */
1574 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1575 {
1576         struct ring_buffer_per_cpu *cpu_buffer;
1577         unsigned long overruns = 0;
1578         int cpu;
1579
1580         /* if you care about this being correct, lock the buffer */
1581         for_each_buffer_cpu(buffer, cpu) {
1582                 cpu_buffer = buffer->buffers[cpu];
1583                 overruns += cpu_buffer->overrun;
1584         }
1585
1586         return overruns;
1587 }
1588 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1589
1590 static void rb_iter_reset(struct ring_buffer_iter *iter)
1591 {
1592         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1593
1594         /* Iterator usage is expected to have record disabled */
1595         if (list_empty(&cpu_buffer->reader_page->list)) {
1596                 iter->head_page = cpu_buffer->head_page;
1597                 iter->head = cpu_buffer->head_page->read;
1598         } else {
1599                 iter->head_page = cpu_buffer->reader_page;
1600                 iter->head = cpu_buffer->reader_page->read;
1601         }
1602         if (iter->head)
1603                 iter->read_stamp = cpu_buffer->read_stamp;
1604         else
1605                 iter->read_stamp = iter->head_page->page->time_stamp;
1606 }
1607
1608 /**
1609  * ring_buffer_iter_reset - reset an iterator
1610  * @iter: The iterator to reset
1611  *
1612  * Resets the iterator, so that it will start from the beginning
1613  * again.
1614  */
1615 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1616 {
1617         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1618         unsigned long flags;
1619
1620         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1621         rb_iter_reset(iter);
1622         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1623 }
1624 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1625
1626 /**
1627  * ring_buffer_iter_empty - check if an iterator has no more to read
1628  * @iter: The iterator to check
1629  */
1630 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1631 {
1632         struct ring_buffer_per_cpu *cpu_buffer;
1633
1634         cpu_buffer = iter->cpu_buffer;
1635
1636         return iter->head_page == cpu_buffer->commit_page &&
1637                 iter->head == rb_commit_index(cpu_buffer);
1638 }
1639 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1640
1641 static void
1642 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1643                      struct ring_buffer_event *event)
1644 {
1645         u64 delta;
1646
1647         switch (event->type) {
1648         case RINGBUF_TYPE_PADDING:
1649                 return;
1650
1651         case RINGBUF_TYPE_TIME_EXTEND:
1652                 delta = event->array[0];
1653                 delta <<= TS_SHIFT;
1654                 delta += event->time_delta;
1655                 cpu_buffer->read_stamp += delta;
1656                 return;
1657
1658         case RINGBUF_TYPE_TIME_STAMP:
1659                 /* FIXME: not implemented */
1660                 return;
1661
1662         case RINGBUF_TYPE_DATA:
1663                 cpu_buffer->read_stamp += event->time_delta;
1664                 return;
1665
1666         default:
1667                 BUG();
1668         }
1669         return;
1670 }
1671
1672 static void
1673 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1674                           struct ring_buffer_event *event)
1675 {
1676         u64 delta;
1677
1678         switch (event->type) {
1679         case RINGBUF_TYPE_PADDING:
1680                 return;
1681
1682         case RINGBUF_TYPE_TIME_EXTEND:
1683                 delta = event->array[0];
1684                 delta <<= TS_SHIFT;
1685                 delta += event->time_delta;
1686                 iter->read_stamp += delta;
1687                 return;
1688
1689         case RINGBUF_TYPE_TIME_STAMP:
1690                 /* FIXME: not implemented */
1691                 return;
1692
1693         case RINGBUF_TYPE_DATA:
1694                 iter->read_stamp += event->time_delta;
1695                 return;
1696
1697         default:
1698                 BUG();
1699         }
1700         return;
1701 }
1702
1703 static struct buffer_page *
1704 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1705 {
1706         struct buffer_page *reader = NULL;
1707         unsigned long flags;
1708         int nr_loops = 0;
1709
1710         local_irq_save(flags);
1711         __raw_spin_lock(&cpu_buffer->lock);
1712
1713  again:
1714         /*
1715          * This should normally only loop twice. But because the
1716          * start of the reader inserts an empty page, it causes
1717          * a case where we will loop three times. There should be no
1718          * reason to loop four times (that I know of).
1719          */
1720         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1721                 reader = NULL;
1722                 goto out;
1723         }
1724
1725         reader = cpu_buffer->reader_page;
1726
1727         /* If there's more to read, return this page */
1728         if (cpu_buffer->reader_page->read < rb_page_size(reader))
1729                 goto out;
1730
1731         /* Never should we have an index greater than the size */
1732         if (RB_WARN_ON(cpu_buffer,
1733                        cpu_buffer->reader_page->read > rb_page_size(reader)))
1734                 goto out;
1735
1736         /* check if we caught up to the tail */
1737         reader = NULL;
1738         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1739                 goto out;
1740
1741         /*
1742          * Splice the empty reader page into the list around the head.
1743          * Reset the reader page to size zero.
1744          */
1745
1746         reader = cpu_buffer->head_page;
1747         cpu_buffer->reader_page->list.next = reader->list.next;
1748         cpu_buffer->reader_page->list.prev = reader->list.prev;
1749
1750         local_set(&cpu_buffer->reader_page->write, 0);
1751         local_set(&cpu_buffer->reader_page->page->commit, 0);
1752
1753         /* Make the reader page now replace the head */
1754         reader->list.prev->next = &cpu_buffer->reader_page->list;
1755         reader->list.next->prev = &cpu_buffer->reader_page->list;
1756
1757         /*
1758          * If the tail is on the reader, then we must set the head
1759          * to the inserted page, otherwise we set it one before.
1760          */
1761         cpu_buffer->head_page = cpu_buffer->reader_page;
1762
1763         if (cpu_buffer->commit_page != reader)
1764                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1765
1766         /* Finally update the reader page to the new head */
1767         cpu_buffer->reader_page = reader;
1768         rb_reset_reader_page(cpu_buffer);
1769
1770         goto again;
1771
1772  out:
1773         __raw_spin_unlock(&cpu_buffer->lock);
1774         local_irq_restore(flags);
1775
1776         return reader;
1777 }
1778
1779 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1780 {
1781         struct ring_buffer_event *event;
1782         struct buffer_page *reader;
1783         unsigned length;
1784
1785         reader = rb_get_reader_page(cpu_buffer);
1786
1787         /* This function should not be called when buffer is empty */
1788         if (RB_WARN_ON(cpu_buffer, !reader))
1789                 return;
1790
1791         event = rb_reader_event(cpu_buffer);
1792
1793         if (event->type == RINGBUF_TYPE_DATA)
1794                 cpu_buffer->entries--;
1795
1796         rb_update_read_stamp(cpu_buffer, event);
1797
1798         length = rb_event_length(event);
1799         cpu_buffer->reader_page->read += length;
1800 }
1801
1802 static void rb_advance_iter(struct ring_buffer_iter *iter)
1803 {
1804         struct ring_buffer *buffer;
1805         struct ring_buffer_per_cpu *cpu_buffer;
1806         struct ring_buffer_event *event;
1807         unsigned length;
1808
1809         cpu_buffer = iter->cpu_buffer;
1810         buffer = cpu_buffer->buffer;
1811
1812         /*
1813          * Check if we are at the end of the buffer.
1814          */
1815         if (iter->head >= rb_page_size(iter->head_page)) {
1816                 if (RB_WARN_ON(buffer,
1817                                iter->head_page == cpu_buffer->commit_page))
1818                         return;
1819                 rb_inc_iter(iter);
1820                 return;
1821         }
1822
1823         event = rb_iter_head_event(iter);
1824
1825         length = rb_event_length(event);
1826
1827         /*
1828          * This should not be called to advance the header if we are
1829          * at the tail of the buffer.
1830          */
1831         if (RB_WARN_ON(cpu_buffer,
1832                        (iter->head_page == cpu_buffer->commit_page) &&
1833                        (iter->head + length > rb_commit_index(cpu_buffer))))
1834                 return;
1835
1836         rb_update_iter_read_stamp(iter, event);
1837
1838         iter->head += length;
1839
1840         /* check for end of page padding */
1841         if ((iter->head >= rb_page_size(iter->head_page)) &&
1842             (iter->head_page != cpu_buffer->commit_page))
1843                 rb_advance_iter(iter);
1844 }
1845
1846 static struct ring_buffer_event *
1847 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1848 {
1849         struct ring_buffer_per_cpu *cpu_buffer;
1850         struct ring_buffer_event *event;
1851         struct buffer_page *reader;
1852         int nr_loops = 0;
1853
1854         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1855                 return NULL;
1856
1857         cpu_buffer = buffer->buffers[cpu];
1858
1859  again:
1860         /*
1861          * We repeat when a timestamp is encountered. It is possible
1862          * to get multiple timestamps from an interrupt entering just
1863          * as one timestamp is about to be written. The max times
1864          * that this can happen is the number of nested interrupts we
1865          * can have.  Nesting 10 deep of interrupts is clearly
1866          * an anomaly.
1867          */
1868         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1869                 return NULL;
1870
1871         reader = rb_get_reader_page(cpu_buffer);
1872         if (!reader)
1873                 return NULL;
1874
1875         event = rb_reader_event(cpu_buffer);
1876
1877         switch (event->type) {
1878         case RINGBUF_TYPE_PADDING:
1879                 RB_WARN_ON(cpu_buffer, 1);
1880                 rb_advance_reader(cpu_buffer);
1881                 return NULL;
1882
1883         case RINGBUF_TYPE_TIME_EXTEND:
1884                 /* Internal data, OK to advance */
1885                 rb_advance_reader(cpu_buffer);
1886                 goto again;
1887
1888         case RINGBUF_TYPE_TIME_STAMP:
1889                 /* FIXME: not implemented */
1890                 rb_advance_reader(cpu_buffer);
1891                 goto again;
1892
1893         case RINGBUF_TYPE_DATA:
1894                 if (ts) {
1895                         *ts = cpu_buffer->read_stamp + event->time_delta;
1896                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1897                 }
1898                 return event;
1899
1900         default:
1901                 BUG();
1902         }
1903
1904         return NULL;
1905 }
1906 EXPORT_SYMBOL_GPL(ring_buffer_peek);
1907
1908 static struct ring_buffer_event *
1909 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1910 {
1911         struct ring_buffer *buffer;
1912         struct ring_buffer_per_cpu *cpu_buffer;
1913         struct ring_buffer_event *event;
1914         int nr_loops = 0;
1915
1916         if (ring_buffer_iter_empty(iter))
1917                 return NULL;
1918
1919         cpu_buffer = iter->cpu_buffer;
1920         buffer = cpu_buffer->buffer;
1921
1922  again:
1923         /*
1924          * We repeat when a timestamp is encountered. It is possible
1925          * to get multiple timestamps from an interrupt entering just
1926          * as one timestamp is about to be written. The max times
1927          * that this can happen is the number of nested interrupts we
1928          * can have. Nesting 10 deep of interrupts is clearly
1929          * an anomaly.
1930          */
1931         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1932                 return NULL;
1933
1934         if (rb_per_cpu_empty(cpu_buffer))
1935                 return NULL;
1936
1937         event = rb_iter_head_event(iter);
1938
1939         switch (event->type) {
1940         case RINGBUF_TYPE_PADDING:
1941                 rb_inc_iter(iter);
1942                 goto again;
1943
1944         case RINGBUF_TYPE_TIME_EXTEND:
1945                 /* Internal data, OK to advance */
1946                 rb_advance_iter(iter);
1947                 goto again;
1948
1949         case RINGBUF_TYPE_TIME_STAMP:
1950                 /* FIXME: not implemented */
1951                 rb_advance_iter(iter);
1952                 goto again;
1953
1954         case RINGBUF_TYPE_DATA:
1955                 if (ts) {
1956                         *ts = iter->read_stamp + event->time_delta;
1957                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1958                 }
1959                 return event;
1960
1961         default:
1962                 BUG();
1963         }
1964
1965         return NULL;
1966 }
1967 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1968
1969 /**
1970  * ring_buffer_peek - peek at the next event to be read
1971  * @buffer: The ring buffer to read
1972  * @cpu: The cpu to peak at
1973  * @ts: The timestamp counter of this event.
1974  *
1975  * This will return the event that will be read next, but does
1976  * not consume the data.
1977  */
1978 struct ring_buffer_event *
1979 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1980 {
1981         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1982         struct ring_buffer_event *event;
1983         unsigned long flags;
1984
1985         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1986         event = rb_buffer_peek(buffer, cpu, ts);
1987         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1988
1989         return event;
1990 }
1991
1992 /**
1993  * ring_buffer_iter_peek - peek at the next event to be read
1994  * @iter: The ring buffer iterator
1995  * @ts: The timestamp counter of this event.
1996  *
1997  * This will return the event that will be read next, but does
1998  * not increment the iterator.
1999  */
2000 struct ring_buffer_event *
2001 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2002 {
2003         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2004         struct ring_buffer_event *event;
2005         unsigned long flags;
2006
2007         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2008         event = rb_iter_peek(iter, ts);
2009         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2010
2011         return event;
2012 }
2013
2014 /**
2015  * ring_buffer_consume - return an event and consume it
2016  * @buffer: The ring buffer to get the next event from
2017  *
2018  * Returns the next event in the ring buffer, and that event is consumed.
2019  * Meaning, that sequential reads will keep returning a different event,
2020  * and eventually empty the ring buffer if the producer is slower.
2021  */
2022 struct ring_buffer_event *
2023 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2024 {
2025         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2026         struct ring_buffer_event *event;
2027         unsigned long flags;
2028
2029         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2030                 return NULL;
2031
2032         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2033
2034         event = rb_buffer_peek(buffer, cpu, ts);
2035         if (!event)
2036                 goto out;
2037
2038         rb_advance_reader(cpu_buffer);
2039
2040  out:
2041         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2042
2043         return event;
2044 }
2045 EXPORT_SYMBOL_GPL(ring_buffer_consume);
2046
2047 /**
2048  * ring_buffer_read_start - start a non consuming read of the buffer
2049  * @buffer: The ring buffer to read from
2050  * @cpu: The cpu buffer to iterate over
2051  *
2052  * This starts up an iteration through the buffer. It also disables
2053  * the recording to the buffer until the reading is finished.
2054  * This prevents the reading from being corrupted. This is not
2055  * a consuming read, so a producer is not expected.
2056  *
2057  * Must be paired with ring_buffer_finish.
2058  */
2059 struct ring_buffer_iter *
2060 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2061 {
2062         struct ring_buffer_per_cpu *cpu_buffer;
2063         struct ring_buffer_iter *iter;
2064         unsigned long flags;
2065
2066         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2067                 return NULL;
2068
2069         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2070         if (!iter)
2071                 return NULL;
2072
2073         cpu_buffer = buffer->buffers[cpu];
2074
2075         iter->cpu_buffer = cpu_buffer;
2076
2077         atomic_inc(&cpu_buffer->record_disabled);
2078         synchronize_sched();
2079
2080         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2081         __raw_spin_lock(&cpu_buffer->lock);
2082         rb_iter_reset(iter);
2083         __raw_spin_unlock(&cpu_buffer->lock);
2084         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2085
2086         return iter;
2087 }
2088 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2089
2090 /**
2091  * ring_buffer_finish - finish reading the iterator of the buffer
2092  * @iter: The iterator retrieved by ring_buffer_start
2093  *
2094  * This re-enables the recording to the buffer, and frees the
2095  * iterator.
2096  */
2097 void
2098 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2099 {
2100         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2101
2102         atomic_dec(&cpu_buffer->record_disabled);
2103         kfree(iter);
2104 }
2105 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2106
2107 /**
2108  * ring_buffer_read - read the next item in the ring buffer by the iterator
2109  * @iter: The ring buffer iterator
2110  * @ts: The time stamp of the event read.
2111  *
2112  * This reads the next event in the ring buffer and increments the iterator.
2113  */
2114 struct ring_buffer_event *
2115 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2116 {
2117         struct ring_buffer_event *event;
2118         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2119         unsigned long flags;
2120
2121         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2122         event = rb_iter_peek(iter, ts);
2123         if (!event)
2124                 goto out;
2125
2126         rb_advance_iter(iter);
2127  out:
2128         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2129
2130         return event;
2131 }
2132 EXPORT_SYMBOL_GPL(ring_buffer_read);
2133
2134 /**
2135  * ring_buffer_size - return the size of the ring buffer (in bytes)
2136  * @buffer: The ring buffer.
2137  */
2138 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2139 {
2140         return BUF_PAGE_SIZE * buffer->pages;
2141 }
2142 EXPORT_SYMBOL_GPL(ring_buffer_size);
2143
2144 static void
2145 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2146 {
2147         cpu_buffer->head_page
2148                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2149         local_set(&cpu_buffer->head_page->write, 0);
2150         local_set(&cpu_buffer->head_page->page->commit, 0);
2151
2152         cpu_buffer->head_page->read = 0;
2153
2154         cpu_buffer->tail_page = cpu_buffer->head_page;
2155         cpu_buffer->commit_page = cpu_buffer->head_page;
2156
2157         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2158         local_set(&cpu_buffer->reader_page->write, 0);
2159         local_set(&cpu_buffer->reader_page->page->commit, 0);
2160         cpu_buffer->reader_page->read = 0;
2161
2162         cpu_buffer->overrun = 0;
2163         cpu_buffer->entries = 0;
2164
2165         cpu_buffer->write_stamp = 0;
2166         cpu_buffer->read_stamp = 0;
2167 }
2168
2169 /**
2170  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2171  * @buffer: The ring buffer to reset a per cpu buffer of
2172  * @cpu: The CPU buffer to be reset
2173  */
2174 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2175 {
2176         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2177         unsigned long flags;
2178
2179         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2180                 return;
2181
2182         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2183
2184         __raw_spin_lock(&cpu_buffer->lock);
2185
2186         rb_reset_cpu(cpu_buffer);
2187
2188         __raw_spin_unlock(&cpu_buffer->lock);
2189
2190         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2191 }
2192 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2193
2194 /**
2195  * ring_buffer_reset - reset a ring buffer
2196  * @buffer: The ring buffer to reset all cpu buffers
2197  */
2198 void ring_buffer_reset(struct ring_buffer *buffer)
2199 {
2200         int cpu;
2201
2202         for_each_buffer_cpu(buffer, cpu)
2203                 ring_buffer_reset_cpu(buffer, cpu);
2204 }
2205 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2206
2207 /**
2208  * rind_buffer_empty - is the ring buffer empty?
2209  * @buffer: The ring buffer to test
2210  */
2211 int ring_buffer_empty(struct ring_buffer *buffer)
2212 {
2213         struct ring_buffer_per_cpu *cpu_buffer;
2214         int cpu;
2215
2216         /* yes this is racy, but if you don't like the race, lock the buffer */
2217         for_each_buffer_cpu(buffer, cpu) {
2218                 cpu_buffer = buffer->buffers[cpu];
2219                 if (!rb_per_cpu_empty(cpu_buffer))
2220                         return 0;
2221         }
2222         return 1;
2223 }
2224 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2225
2226 /**
2227  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2228  * @buffer: The ring buffer
2229  * @cpu: The CPU buffer to test
2230  */
2231 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2232 {
2233         struct ring_buffer_per_cpu *cpu_buffer;
2234
2235         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2236                 return 1;
2237
2238         cpu_buffer = buffer->buffers[cpu];
2239         return rb_per_cpu_empty(cpu_buffer);
2240 }
2241 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2242
2243 /**
2244  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2245  * @buffer_a: One buffer to swap with
2246  * @buffer_b: The other buffer to swap with
2247  *
2248  * This function is useful for tracers that want to take a "snapshot"
2249  * of a CPU buffer and has another back up buffer lying around.
2250  * it is expected that the tracer handles the cpu buffer not being
2251  * used at the moment.
2252  */
2253 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2254                          struct ring_buffer *buffer_b, int cpu)
2255 {
2256         struct ring_buffer_per_cpu *cpu_buffer_a;
2257         struct ring_buffer_per_cpu *cpu_buffer_b;
2258
2259         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2260             !cpumask_test_cpu(cpu, buffer_b->cpumask))
2261                 return -EINVAL;
2262
2263         /* At least make sure the two buffers are somewhat the same */
2264         if (buffer_a->pages != buffer_b->pages)
2265                 return -EINVAL;
2266
2267         if (ring_buffer_flags != RB_BUFFERS_ON)
2268                 return -EAGAIN;
2269
2270         if (atomic_read(&buffer_a->record_disabled))
2271                 return -EAGAIN;
2272
2273         if (atomic_read(&buffer_b->record_disabled))
2274                 return -EAGAIN;
2275
2276         cpu_buffer_a = buffer_a->buffers[cpu];
2277         cpu_buffer_b = buffer_b->buffers[cpu];
2278
2279         if (atomic_read(&cpu_buffer_a->record_disabled))
2280                 return -EAGAIN;
2281
2282         if (atomic_read(&cpu_buffer_b->record_disabled))
2283                 return -EAGAIN;
2284
2285         /*
2286          * We can't do a synchronize_sched here because this
2287          * function can be called in atomic context.
2288          * Normally this will be called from the same CPU as cpu.
2289          * If not it's up to the caller to protect this.
2290          */
2291         atomic_inc(&cpu_buffer_a->record_disabled);
2292         atomic_inc(&cpu_buffer_b->record_disabled);
2293
2294         buffer_a->buffers[cpu] = cpu_buffer_b;
2295         buffer_b->buffers[cpu] = cpu_buffer_a;
2296
2297         cpu_buffer_b->buffer = buffer_a;
2298         cpu_buffer_a->buffer = buffer_b;
2299
2300         atomic_dec(&cpu_buffer_a->record_disabled);
2301         atomic_dec(&cpu_buffer_b->record_disabled);
2302
2303         return 0;
2304 }
2305 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2306
2307 static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2308                               struct buffer_data_page *bpage)
2309 {
2310         struct ring_buffer_event *event;
2311         unsigned long head;
2312
2313         __raw_spin_lock(&cpu_buffer->lock);
2314         for (head = 0; head < local_read(&bpage->commit);
2315              head += rb_event_length(event)) {
2316
2317                 event = __rb_data_page_index(bpage, head);
2318                 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2319                         return;
2320                 /* Only count data entries */
2321                 if (event->type != RINGBUF_TYPE_DATA)
2322                         continue;
2323                 cpu_buffer->entries--;
2324         }
2325         __raw_spin_unlock(&cpu_buffer->lock);
2326 }
2327
2328 /**
2329  * ring_buffer_alloc_read_page - allocate a page to read from buffer
2330  * @buffer: the buffer to allocate for.
2331  *
2332  * This function is used in conjunction with ring_buffer_read_page.
2333  * When reading a full page from the ring buffer, these functions
2334  * can be used to speed up the process. The calling function should
2335  * allocate a few pages first with this function. Then when it
2336  * needs to get pages from the ring buffer, it passes the result
2337  * of this function into ring_buffer_read_page, which will swap
2338  * the page that was allocated, with the read page of the buffer.
2339  *
2340  * Returns:
2341  *  The page allocated, or NULL on error.
2342  */
2343 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2344 {
2345         unsigned long addr;
2346         struct buffer_data_page *bpage;
2347
2348         addr = __get_free_page(GFP_KERNEL);
2349         if (!addr)
2350                 return NULL;
2351
2352         bpage = (void *)addr;
2353
2354         return bpage;
2355 }
2356
2357 /**
2358  * ring_buffer_free_read_page - free an allocated read page
2359  * @buffer: the buffer the page was allocate for
2360  * @data: the page to free
2361  *
2362  * Free a page allocated from ring_buffer_alloc_read_page.
2363  */
2364 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2365 {
2366         free_page((unsigned long)data);
2367 }
2368
2369 /**
2370  * ring_buffer_read_page - extract a page from the ring buffer
2371  * @buffer: buffer to extract from
2372  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2373  * @cpu: the cpu of the buffer to extract
2374  * @full: should the extraction only happen when the page is full.
2375  *
2376  * This function will pull out a page from the ring buffer and consume it.
2377  * @data_page must be the address of the variable that was returned
2378  * from ring_buffer_alloc_read_page. This is because the page might be used
2379  * to swap with a page in the ring buffer.
2380  *
2381  * for example:
2382  *      rpage = ring_buffer_alloc_page(buffer);
2383  *      if (!rpage)
2384  *              return error;
2385  *      ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2386  *      if (ret)
2387  *              process_page(rpage);
2388  *
2389  * When @full is set, the function will not return true unless
2390  * the writer is off the reader page.
2391  *
2392  * Note: it is up to the calling functions to handle sleeps and wakeups.
2393  *  The ring buffer can be used anywhere in the kernel and can not
2394  *  blindly call wake_up. The layer that uses the ring buffer must be
2395  *  responsible for that.
2396  *
2397  * Returns:
2398  *  1 if data has been transferred
2399  *  0 if no data has been transferred.
2400  */
2401 int ring_buffer_read_page(struct ring_buffer *buffer,
2402                             void **data_page, int cpu, int full)
2403 {
2404         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2405         struct ring_buffer_event *event;
2406         struct buffer_data_page *bpage;
2407         unsigned long flags;
2408         int ret = 0;
2409
2410         if (!data_page)
2411                 return 0;
2412
2413         bpage = *data_page;
2414         if (!bpage)
2415                 return 0;
2416
2417         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2418
2419         /*
2420          * rb_buffer_peek will get the next ring buffer if
2421          * the current reader page is empty.
2422          */
2423         event = rb_buffer_peek(buffer, cpu, NULL);
2424         if (!event)
2425                 goto out;
2426
2427         /* check for data */
2428         if (!local_read(&cpu_buffer->reader_page->page->commit))
2429                 goto out;
2430         /*
2431          * If the writer is already off of the read page, then simply
2432          * switch the read page with the given page. Otherwise
2433          * we need to copy the data from the reader to the writer.
2434          */
2435         if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2436                 unsigned int read = cpu_buffer->reader_page->read;
2437
2438                 if (full)
2439                         goto out;
2440                 /* The writer is still on the reader page, we must copy */
2441                 bpage = cpu_buffer->reader_page->page;
2442                 memcpy(bpage->data,
2443                        cpu_buffer->reader_page->page->data + read,
2444                        local_read(&bpage->commit) - read);
2445
2446                 /* consume what was read */
2447                 cpu_buffer->reader_page += read;
2448
2449         } else {
2450                 /* swap the pages */
2451                 rb_init_page(bpage);
2452                 bpage = cpu_buffer->reader_page->page;
2453                 cpu_buffer->reader_page->page = *data_page;
2454                 cpu_buffer->reader_page->read = 0;
2455                 *data_page = bpage;
2456         }
2457         ret = 1;
2458
2459         /* update the entry counter */
2460         rb_remove_entries(cpu_buffer, bpage);
2461  out:
2462         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2463
2464         return ret;
2465 }
2466
2467 static ssize_t
2468 rb_simple_read(struct file *filp, char __user *ubuf,
2469                size_t cnt, loff_t *ppos)
2470 {
2471         long *p = filp->private_data;
2472         char buf[64];
2473         int r;
2474
2475         if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2476                 r = sprintf(buf, "permanently disabled\n");
2477         else
2478                 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2479
2480         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2481 }
2482
2483 static ssize_t
2484 rb_simple_write(struct file *filp, const char __user *ubuf,
2485                 size_t cnt, loff_t *ppos)
2486 {
2487         long *p = filp->private_data;
2488         char buf[64];
2489         long val;
2490         int ret;
2491
2492         if (cnt >= sizeof(buf))
2493                 return -EINVAL;
2494
2495         if (copy_from_user(&buf, ubuf, cnt))
2496                 return -EFAULT;
2497
2498         buf[cnt] = 0;
2499
2500         ret = strict_strtoul(buf, 10, &val);
2501         if (ret < 0)
2502                 return ret;
2503
2504         if (val)
2505                 set_bit(RB_BUFFERS_ON_BIT, p);
2506         else
2507                 clear_bit(RB_BUFFERS_ON_BIT, p);
2508
2509         (*ppos)++;
2510
2511         return cnt;
2512 }
2513
2514 static struct file_operations rb_simple_fops = {
2515         .open           = tracing_open_generic,
2516         .read           = rb_simple_read,
2517         .write          = rb_simple_write,
2518 };
2519
2520
2521 static __init int rb_init_debugfs(void)
2522 {
2523         struct dentry *d_tracer;
2524         struct dentry *entry;
2525
2526         d_tracer = tracing_init_dentry();
2527
2528         entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2529                                     &ring_buffer_flags, &rb_simple_fops);
2530         if (!entry)
2531                 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2532
2533         return 0;
2534 }
2535
2536 fs_initcall(rb_init_debugfs);