Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[linux-2.6] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>        /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 /*
22  * A fast way to enable or disable all ring buffers is to
23  * call tracing_on or tracing_off. Turning off the ring buffers
24  * prevents all ring buffers from being recorded to.
25  * Turning this switch on, makes it OK to write to the
26  * ring buffer, if the ring buffer is enabled itself.
27  *
28  * There's three layers that must be on in order to write
29  * to the ring buffer.
30  *
31  * 1) This global flag must be set.
32  * 2) The ring buffer must be enabled for recording.
33  * 3) The per cpu buffer must be enabled for recording.
34  *
35  * In case of an anomaly, this global flag has a bit set that
36  * will permantly disable all ring buffers.
37  */
38
39 /*
40  * Global flag to disable all recording to ring buffers
41  *  This has two bits: ON, DISABLED
42  *
43  *  ON   DISABLED
44  * ---- ----------
45  *   0      0        : ring buffers are off
46  *   1      0        : ring buffers are on
47  *   X      1        : ring buffers are permanently disabled
48  */
49
50 enum {
51         RB_BUFFERS_ON_BIT       = 0,
52         RB_BUFFERS_DISABLED_BIT = 1,
53 };
54
55 enum {
56         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
57         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
58 };
59
60 static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
61
62 /**
63  * tracing_on - enable all tracing buffers
64  *
65  * This function enables all tracing buffers that may have been
66  * disabled with tracing_off.
67  */
68 void tracing_on(void)
69 {
70         set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
71 }
72 EXPORT_SYMBOL_GPL(tracing_on);
73
74 /**
75  * tracing_off - turn off all tracing buffers
76  *
77  * This function stops all tracing buffers from recording data.
78  * It does not disable any overhead the tracers themselves may
79  * be causing. This function simply causes all recording to
80  * the ring buffers to fail.
81  */
82 void tracing_off(void)
83 {
84         clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
85 }
86 EXPORT_SYMBOL_GPL(tracing_off);
87
88 /**
89  * tracing_off_permanent - permanently disable ring buffers
90  *
91  * This function, once called, will disable all ring buffers
92  * permanenty.
93  */
94 void tracing_off_permanent(void)
95 {
96         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
97 }
98
99 #include "trace.h"
100
101 /* Up this if you want to test the TIME_EXTENTS and normalization */
102 #define DEBUG_SHIFT 0
103
104 /* FIXME!!! */
105 u64 ring_buffer_time_stamp(int cpu)
106 {
107         u64 time;
108
109         preempt_disable_notrace();
110         /* shift to debug/test normalization and TIME_EXTENTS */
111         time = sched_clock() << DEBUG_SHIFT;
112         preempt_enable_no_resched_notrace();
113
114         return time;
115 }
116 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
117
118 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
119 {
120         /* Just stupid testing the normalize function and deltas */
121         *ts >>= DEBUG_SHIFT;
122 }
123 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
124
125 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
126 #define RB_ALIGNMENT_SHIFT      2
127 #define RB_ALIGNMENT            (1 << RB_ALIGNMENT_SHIFT)
128 #define RB_MAX_SMALL_DATA       28
129
130 enum {
131         RB_LEN_TIME_EXTEND = 8,
132         RB_LEN_TIME_STAMP = 16,
133 };
134
135 /* inline for ring buffer fast paths */
136 static inline unsigned
137 rb_event_length(struct ring_buffer_event *event)
138 {
139         unsigned length;
140
141         switch (event->type) {
142         case RINGBUF_TYPE_PADDING:
143                 /* undefined */
144                 return -1;
145
146         case RINGBUF_TYPE_TIME_EXTEND:
147                 return RB_LEN_TIME_EXTEND;
148
149         case RINGBUF_TYPE_TIME_STAMP:
150                 return RB_LEN_TIME_STAMP;
151
152         case RINGBUF_TYPE_DATA:
153                 if (event->len)
154                         length = event->len << RB_ALIGNMENT_SHIFT;
155                 else
156                         length = event->array[0];
157                 return length + RB_EVNT_HDR_SIZE;
158         default:
159                 BUG();
160         }
161         /* not hit */
162         return 0;
163 }
164
165 /**
166  * ring_buffer_event_length - return the length of the event
167  * @event: the event to get the length of
168  */
169 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
170 {
171         return rb_event_length(event);
172 }
173 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
174
175 /* inline for ring buffer fast paths */
176 static inline void *
177 rb_event_data(struct ring_buffer_event *event)
178 {
179         BUG_ON(event->type != RINGBUF_TYPE_DATA);
180         /* If length is in len field, then array[0] has the data */
181         if (event->len)
182                 return (void *)&event->array[0];
183         /* Otherwise length is in array[0] and array[1] has the data */
184         return (void *)&event->array[1];
185 }
186
187 /**
188  * ring_buffer_event_data - return the data of the event
189  * @event: the event to get the data from
190  */
191 void *ring_buffer_event_data(struct ring_buffer_event *event)
192 {
193         return rb_event_data(event);
194 }
195 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
196
197 #define for_each_buffer_cpu(buffer, cpu)                \
198         for_each_cpu_mask(cpu, buffer->cpumask)
199
200 #define TS_SHIFT        27
201 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
202 #define TS_DELTA_TEST   (~TS_MASK)
203
204 struct buffer_data_page {
205         u64              time_stamp;    /* page time stamp */
206         local_t          commit;        /* write commited index */
207         unsigned char    data[];        /* data of buffer page */
208 };
209
210 struct buffer_page {
211         local_t          write;         /* index for next write */
212         unsigned         read;          /* index for next read */
213         struct list_head list;          /* list of free pages */
214         struct buffer_data_page *page;  /* Actual data page */
215 };
216
217 static void rb_init_page(struct buffer_data_page *bpage)
218 {
219         local_set(&bpage->commit, 0);
220 }
221
222 /*
223  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
224  * this issue out.
225  */
226 static inline void free_buffer_page(struct buffer_page *bpage)
227 {
228         if (bpage->page)
229                 free_page((unsigned long)bpage->page);
230         kfree(bpage);
231 }
232
233 /*
234  * We need to fit the time_stamp delta into 27 bits.
235  */
236 static inline int test_time_stamp(u64 delta)
237 {
238         if (delta & TS_DELTA_TEST)
239                 return 1;
240         return 0;
241 }
242
243 #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
244
245 /*
246  * head_page == tail_page && head == tail then buffer is empty.
247  */
248 struct ring_buffer_per_cpu {
249         int                             cpu;
250         struct ring_buffer              *buffer;
251         spinlock_t                      reader_lock; /* serialize readers */
252         raw_spinlock_t                  lock;
253         struct lock_class_key           lock_key;
254         struct list_head                pages;
255         struct buffer_page              *head_page;     /* read from head */
256         struct buffer_page              *tail_page;     /* write to tail */
257         struct buffer_page              *commit_page;   /* commited pages */
258         struct buffer_page              *reader_page;
259         unsigned long                   overrun;
260         unsigned long                   entries;
261         u64                             write_stamp;
262         u64                             read_stamp;
263         atomic_t                        record_disabled;
264 };
265
266 struct ring_buffer {
267         unsigned                        pages;
268         unsigned                        flags;
269         int                             cpus;
270         cpumask_t                       cpumask;
271         atomic_t                        record_disabled;
272
273         struct mutex                    mutex;
274
275         struct ring_buffer_per_cpu      **buffers;
276 };
277
278 struct ring_buffer_iter {
279         struct ring_buffer_per_cpu      *cpu_buffer;
280         unsigned long                   head;
281         struct buffer_page              *head_page;
282         u64                             read_stamp;
283 };
284
285 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
286 #define RB_WARN_ON(buffer, cond)                                \
287         ({                                                      \
288                 int _____ret = unlikely(cond);                  \
289                 if (_____ret) {                                 \
290                         atomic_inc(&buffer->record_disabled);   \
291                         WARN_ON(1);                             \
292                 }                                               \
293                 _____ret;                                       \
294         })
295
296 /**
297  * check_pages - integrity check of buffer pages
298  * @cpu_buffer: CPU buffer with pages to test
299  *
300  * As a safty measure we check to make sure the data pages have not
301  * been corrupted.
302  */
303 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
304 {
305         struct list_head *head = &cpu_buffer->pages;
306         struct buffer_page *bpage, *tmp;
307
308         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
309                 return -1;
310         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
311                 return -1;
312
313         list_for_each_entry_safe(bpage, tmp, head, list) {
314                 if (RB_WARN_ON(cpu_buffer,
315                                bpage->list.next->prev != &bpage->list))
316                         return -1;
317                 if (RB_WARN_ON(cpu_buffer,
318                                bpage->list.prev->next != &bpage->list))
319                         return -1;
320         }
321
322         return 0;
323 }
324
325 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
326                              unsigned nr_pages)
327 {
328         struct list_head *head = &cpu_buffer->pages;
329         struct buffer_page *bpage, *tmp;
330         unsigned long addr;
331         LIST_HEAD(pages);
332         unsigned i;
333
334         for (i = 0; i < nr_pages; i++) {
335                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
336                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
337                 if (!bpage)
338                         goto free_pages;
339                 list_add(&bpage->list, &pages);
340
341                 addr = __get_free_page(GFP_KERNEL);
342                 if (!addr)
343                         goto free_pages;
344                 bpage->page = (void *)addr;
345                 rb_init_page(bpage->page);
346         }
347
348         list_splice(&pages, head);
349
350         rb_check_pages(cpu_buffer);
351
352         return 0;
353
354  free_pages:
355         list_for_each_entry_safe(bpage, tmp, &pages, list) {
356                 list_del_init(&bpage->list);
357                 free_buffer_page(bpage);
358         }
359         return -ENOMEM;
360 }
361
362 static struct ring_buffer_per_cpu *
363 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
364 {
365         struct ring_buffer_per_cpu *cpu_buffer;
366         struct buffer_page *bpage;
367         unsigned long addr;
368         int ret;
369
370         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
371                                   GFP_KERNEL, cpu_to_node(cpu));
372         if (!cpu_buffer)
373                 return NULL;
374
375         cpu_buffer->cpu = cpu;
376         cpu_buffer->buffer = buffer;
377         spin_lock_init(&cpu_buffer->reader_lock);
378         cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
379         INIT_LIST_HEAD(&cpu_buffer->pages);
380
381         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
382                             GFP_KERNEL, cpu_to_node(cpu));
383         if (!bpage)
384                 goto fail_free_buffer;
385
386         cpu_buffer->reader_page = bpage;
387         addr = __get_free_page(GFP_KERNEL);
388         if (!addr)
389                 goto fail_free_reader;
390         bpage->page = (void *)addr;
391         rb_init_page(bpage->page);
392
393         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
394
395         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
396         if (ret < 0)
397                 goto fail_free_reader;
398
399         cpu_buffer->head_page
400                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
401         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
402
403         return cpu_buffer;
404
405  fail_free_reader:
406         free_buffer_page(cpu_buffer->reader_page);
407
408  fail_free_buffer:
409         kfree(cpu_buffer);
410         return NULL;
411 }
412
413 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
414 {
415         struct list_head *head = &cpu_buffer->pages;
416         struct buffer_page *bpage, *tmp;
417
418         list_del_init(&cpu_buffer->reader_page->list);
419         free_buffer_page(cpu_buffer->reader_page);
420
421         list_for_each_entry_safe(bpage, tmp, head, list) {
422                 list_del_init(&bpage->list);
423                 free_buffer_page(bpage);
424         }
425         kfree(cpu_buffer);
426 }
427
428 /*
429  * Causes compile errors if the struct buffer_page gets bigger
430  * than the struct page.
431  */
432 extern int ring_buffer_page_too_big(void);
433
434 /**
435  * ring_buffer_alloc - allocate a new ring_buffer
436  * @size: the size in bytes per cpu that is needed.
437  * @flags: attributes to set for the ring buffer.
438  *
439  * Currently the only flag that is available is the RB_FL_OVERWRITE
440  * flag. This flag means that the buffer will overwrite old data
441  * when the buffer wraps. If this flag is not set, the buffer will
442  * drop data when the tail hits the head.
443  */
444 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
445 {
446         struct ring_buffer *buffer;
447         int bsize;
448         int cpu;
449
450         /* Paranoid! Optimizes out when all is well */
451         if (sizeof(struct buffer_page) > sizeof(struct page))
452                 ring_buffer_page_too_big();
453
454
455         /* keep it in its own cache line */
456         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
457                          GFP_KERNEL);
458         if (!buffer)
459                 return NULL;
460
461         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
462         buffer->flags = flags;
463
464         /* need at least two pages */
465         if (buffer->pages == 1)
466                 buffer->pages++;
467
468         buffer->cpumask = cpu_possible_map;
469         buffer->cpus = nr_cpu_ids;
470
471         bsize = sizeof(void *) * nr_cpu_ids;
472         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
473                                   GFP_KERNEL);
474         if (!buffer->buffers)
475                 goto fail_free_buffer;
476
477         for_each_buffer_cpu(buffer, cpu) {
478                 buffer->buffers[cpu] =
479                         rb_allocate_cpu_buffer(buffer, cpu);
480                 if (!buffer->buffers[cpu])
481                         goto fail_free_buffers;
482         }
483
484         mutex_init(&buffer->mutex);
485
486         return buffer;
487
488  fail_free_buffers:
489         for_each_buffer_cpu(buffer, cpu) {
490                 if (buffer->buffers[cpu])
491                         rb_free_cpu_buffer(buffer->buffers[cpu]);
492         }
493         kfree(buffer->buffers);
494
495  fail_free_buffer:
496         kfree(buffer);
497         return NULL;
498 }
499 EXPORT_SYMBOL_GPL(ring_buffer_alloc);
500
501 /**
502  * ring_buffer_free - free a ring buffer.
503  * @buffer: the buffer to free.
504  */
505 void
506 ring_buffer_free(struct ring_buffer *buffer)
507 {
508         int cpu;
509
510         for_each_buffer_cpu(buffer, cpu)
511                 rb_free_cpu_buffer(buffer->buffers[cpu]);
512
513         kfree(buffer);
514 }
515 EXPORT_SYMBOL_GPL(ring_buffer_free);
516
517 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
518
519 static void
520 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
521 {
522         struct buffer_page *bpage;
523         struct list_head *p;
524         unsigned i;
525
526         atomic_inc(&cpu_buffer->record_disabled);
527         synchronize_sched();
528
529         for (i = 0; i < nr_pages; i++) {
530                 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
531                         return;
532                 p = cpu_buffer->pages.next;
533                 bpage = list_entry(p, struct buffer_page, list);
534                 list_del_init(&bpage->list);
535                 free_buffer_page(bpage);
536         }
537         if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
538                 return;
539
540         rb_reset_cpu(cpu_buffer);
541
542         rb_check_pages(cpu_buffer);
543
544         atomic_dec(&cpu_buffer->record_disabled);
545
546 }
547
548 static void
549 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
550                 struct list_head *pages, unsigned nr_pages)
551 {
552         struct buffer_page *bpage;
553         struct list_head *p;
554         unsigned i;
555
556         atomic_inc(&cpu_buffer->record_disabled);
557         synchronize_sched();
558
559         for (i = 0; i < nr_pages; i++) {
560                 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
561                         return;
562                 p = pages->next;
563                 bpage = list_entry(p, struct buffer_page, list);
564                 list_del_init(&bpage->list);
565                 list_add_tail(&bpage->list, &cpu_buffer->pages);
566         }
567         rb_reset_cpu(cpu_buffer);
568
569         rb_check_pages(cpu_buffer);
570
571         atomic_dec(&cpu_buffer->record_disabled);
572 }
573
574 /**
575  * ring_buffer_resize - resize the ring buffer
576  * @buffer: the buffer to resize.
577  * @size: the new size.
578  *
579  * The tracer is responsible for making sure that the buffer is
580  * not being used while changing the size.
581  * Note: We may be able to change the above requirement by using
582  *  RCU synchronizations.
583  *
584  * Minimum size is 2 * BUF_PAGE_SIZE.
585  *
586  * Returns -1 on failure.
587  */
588 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
589 {
590         struct ring_buffer_per_cpu *cpu_buffer;
591         unsigned nr_pages, rm_pages, new_pages;
592         struct buffer_page *bpage, *tmp;
593         unsigned long buffer_size;
594         unsigned long addr;
595         LIST_HEAD(pages);
596         int i, cpu;
597
598         /*
599          * Always succeed at resizing a non-existent buffer:
600          */
601         if (!buffer)
602                 return size;
603
604         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
605         size *= BUF_PAGE_SIZE;
606         buffer_size = buffer->pages * BUF_PAGE_SIZE;
607
608         /* we need a minimum of two pages */
609         if (size < BUF_PAGE_SIZE * 2)
610                 size = BUF_PAGE_SIZE * 2;
611
612         if (size == buffer_size)
613                 return size;
614
615         mutex_lock(&buffer->mutex);
616
617         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
618
619         if (size < buffer_size) {
620
621                 /* easy case, just free pages */
622                 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
623                         mutex_unlock(&buffer->mutex);
624                         return -1;
625                 }
626
627                 rm_pages = buffer->pages - nr_pages;
628
629                 for_each_buffer_cpu(buffer, cpu) {
630                         cpu_buffer = buffer->buffers[cpu];
631                         rb_remove_pages(cpu_buffer, rm_pages);
632                 }
633                 goto out;
634         }
635
636         /*
637          * This is a bit more difficult. We only want to add pages
638          * when we can allocate enough for all CPUs. We do this
639          * by allocating all the pages and storing them on a local
640          * link list. If we succeed in our allocation, then we
641          * add these pages to the cpu_buffers. Otherwise we just free
642          * them all and return -ENOMEM;
643          */
644         if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
645                 mutex_unlock(&buffer->mutex);
646                 return -1;
647         }
648
649         new_pages = nr_pages - buffer->pages;
650
651         for_each_buffer_cpu(buffer, cpu) {
652                 for (i = 0; i < new_pages; i++) {
653                         bpage = kzalloc_node(ALIGN(sizeof(*bpage),
654                                                   cache_line_size()),
655                                             GFP_KERNEL, cpu_to_node(cpu));
656                         if (!bpage)
657                                 goto free_pages;
658                         list_add(&bpage->list, &pages);
659                         addr = __get_free_page(GFP_KERNEL);
660                         if (!addr)
661                                 goto free_pages;
662                         bpage->page = (void *)addr;
663                         rb_init_page(bpage->page);
664                 }
665         }
666
667         for_each_buffer_cpu(buffer, cpu) {
668                 cpu_buffer = buffer->buffers[cpu];
669                 rb_insert_pages(cpu_buffer, &pages, new_pages);
670         }
671
672         if (RB_WARN_ON(buffer, !list_empty(&pages))) {
673                 mutex_unlock(&buffer->mutex);
674                 return -1;
675         }
676
677  out:
678         buffer->pages = nr_pages;
679         mutex_unlock(&buffer->mutex);
680
681         return size;
682
683  free_pages:
684         list_for_each_entry_safe(bpage, tmp, &pages, list) {
685                 list_del_init(&bpage->list);
686                 free_buffer_page(bpage);
687         }
688         mutex_unlock(&buffer->mutex);
689         return -ENOMEM;
690 }
691 EXPORT_SYMBOL_GPL(ring_buffer_resize);
692
693 static inline int rb_null_event(struct ring_buffer_event *event)
694 {
695         return event->type == RINGBUF_TYPE_PADDING;
696 }
697
698 static inline void *
699 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
700 {
701         return bpage->data + index;
702 }
703
704 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
705 {
706         return bpage->page->data + index;
707 }
708
709 static inline struct ring_buffer_event *
710 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
711 {
712         return __rb_page_index(cpu_buffer->reader_page,
713                                cpu_buffer->reader_page->read);
714 }
715
716 static inline struct ring_buffer_event *
717 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
718 {
719         return __rb_page_index(cpu_buffer->head_page,
720                                cpu_buffer->head_page->read);
721 }
722
723 static inline struct ring_buffer_event *
724 rb_iter_head_event(struct ring_buffer_iter *iter)
725 {
726         return __rb_page_index(iter->head_page, iter->head);
727 }
728
729 static inline unsigned rb_page_write(struct buffer_page *bpage)
730 {
731         return local_read(&bpage->write);
732 }
733
734 static inline unsigned rb_page_commit(struct buffer_page *bpage)
735 {
736         return local_read(&bpage->page->commit);
737 }
738
739 /* Size is determined by what has been commited */
740 static inline unsigned rb_page_size(struct buffer_page *bpage)
741 {
742         return rb_page_commit(bpage);
743 }
744
745 static inline unsigned
746 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
747 {
748         return rb_page_commit(cpu_buffer->commit_page);
749 }
750
751 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
752 {
753         return rb_page_commit(cpu_buffer->head_page);
754 }
755
756 /*
757  * When the tail hits the head and the buffer is in overwrite mode,
758  * the head jumps to the next page and all content on the previous
759  * page is discarded. But before doing so, we update the overrun
760  * variable of the buffer.
761  */
762 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
763 {
764         struct ring_buffer_event *event;
765         unsigned long head;
766
767         for (head = 0; head < rb_head_size(cpu_buffer);
768              head += rb_event_length(event)) {
769
770                 event = __rb_page_index(cpu_buffer->head_page, head);
771                 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
772                         return;
773                 /* Only count data entries */
774                 if (event->type != RINGBUF_TYPE_DATA)
775                         continue;
776                 cpu_buffer->overrun++;
777                 cpu_buffer->entries--;
778         }
779 }
780
781 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
782                                struct buffer_page **bpage)
783 {
784         struct list_head *p = (*bpage)->list.next;
785
786         if (p == &cpu_buffer->pages)
787                 p = p->next;
788
789         *bpage = list_entry(p, struct buffer_page, list);
790 }
791
792 static inline unsigned
793 rb_event_index(struct ring_buffer_event *event)
794 {
795         unsigned long addr = (unsigned long)event;
796
797         return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
798 }
799
800 static inline int
801 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
802              struct ring_buffer_event *event)
803 {
804         unsigned long addr = (unsigned long)event;
805         unsigned long index;
806
807         index = rb_event_index(event);
808         addr &= PAGE_MASK;
809
810         return cpu_buffer->commit_page->page == (void *)addr &&
811                 rb_commit_index(cpu_buffer) == index;
812 }
813
814 static inline void
815 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
816                     struct ring_buffer_event *event)
817 {
818         unsigned long addr = (unsigned long)event;
819         unsigned long index;
820
821         index = rb_event_index(event);
822         addr &= PAGE_MASK;
823
824         while (cpu_buffer->commit_page->page != (void *)addr) {
825                 if (RB_WARN_ON(cpu_buffer,
826                           cpu_buffer->commit_page == cpu_buffer->tail_page))
827                         return;
828                 cpu_buffer->commit_page->page->commit =
829                         cpu_buffer->commit_page->write;
830                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
831                 cpu_buffer->write_stamp =
832                         cpu_buffer->commit_page->page->time_stamp;
833         }
834
835         /* Now set the commit to the event's index */
836         local_set(&cpu_buffer->commit_page->page->commit, index);
837 }
838
839 static inline void
840 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
841 {
842         /*
843          * We only race with interrupts and NMIs on this CPU.
844          * If we own the commit event, then we can commit
845          * all others that interrupted us, since the interruptions
846          * are in stack format (they finish before they come
847          * back to us). This allows us to do a simple loop to
848          * assign the commit to the tail.
849          */
850  again:
851         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
852                 cpu_buffer->commit_page->page->commit =
853                         cpu_buffer->commit_page->write;
854                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
855                 cpu_buffer->write_stamp =
856                         cpu_buffer->commit_page->page->time_stamp;
857                 /* add barrier to keep gcc from optimizing too much */
858                 barrier();
859         }
860         while (rb_commit_index(cpu_buffer) !=
861                rb_page_write(cpu_buffer->commit_page)) {
862                 cpu_buffer->commit_page->page->commit =
863                         cpu_buffer->commit_page->write;
864                 barrier();
865         }
866
867         /* again, keep gcc from optimizing */
868         barrier();
869
870         /*
871          * If an interrupt came in just after the first while loop
872          * and pushed the tail page forward, we will be left with
873          * a dangling commit that will never go forward.
874          */
875         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
876                 goto again;
877 }
878
879 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
880 {
881         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
882         cpu_buffer->reader_page->read = 0;
883 }
884
885 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
886 {
887         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
888
889         /*
890          * The iterator could be on the reader page (it starts there).
891          * But the head could have moved, since the reader was
892          * found. Check for this case and assign the iterator
893          * to the head page instead of next.
894          */
895         if (iter->head_page == cpu_buffer->reader_page)
896                 iter->head_page = cpu_buffer->head_page;
897         else
898                 rb_inc_page(cpu_buffer, &iter->head_page);
899
900         iter->read_stamp = iter->head_page->page->time_stamp;
901         iter->head = 0;
902 }
903
904 /**
905  * ring_buffer_update_event - update event type and data
906  * @event: the even to update
907  * @type: the type of event
908  * @length: the size of the event field in the ring buffer
909  *
910  * Update the type and data fields of the event. The length
911  * is the actual size that is written to the ring buffer,
912  * and with this, we can determine what to place into the
913  * data field.
914  */
915 static inline void
916 rb_update_event(struct ring_buffer_event *event,
917                          unsigned type, unsigned length)
918 {
919         event->type = type;
920
921         switch (type) {
922
923         case RINGBUF_TYPE_PADDING:
924                 break;
925
926         case RINGBUF_TYPE_TIME_EXTEND:
927                 event->len =
928                         (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
929                         >> RB_ALIGNMENT_SHIFT;
930                 break;
931
932         case RINGBUF_TYPE_TIME_STAMP:
933                 event->len =
934                         (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
935                         >> RB_ALIGNMENT_SHIFT;
936                 break;
937
938         case RINGBUF_TYPE_DATA:
939                 length -= RB_EVNT_HDR_SIZE;
940                 if (length > RB_MAX_SMALL_DATA) {
941                         event->len = 0;
942                         event->array[0] = length;
943                 } else
944                         event->len =
945                                 (length + (RB_ALIGNMENT-1))
946                                 >> RB_ALIGNMENT_SHIFT;
947                 break;
948         default:
949                 BUG();
950         }
951 }
952
953 static inline unsigned rb_calculate_event_length(unsigned length)
954 {
955         struct ring_buffer_event event; /* Used only for sizeof array */
956
957         /* zero length can cause confusions */
958         if (!length)
959                 length = 1;
960
961         if (length > RB_MAX_SMALL_DATA)
962                 length += sizeof(event.array[0]);
963
964         length += RB_EVNT_HDR_SIZE;
965         length = ALIGN(length, RB_ALIGNMENT);
966
967         return length;
968 }
969
970 static struct ring_buffer_event *
971 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
972                   unsigned type, unsigned long length, u64 *ts)
973 {
974         struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
975         unsigned long tail, write;
976         struct ring_buffer *buffer = cpu_buffer->buffer;
977         struct ring_buffer_event *event;
978         unsigned long flags;
979
980         commit_page = cpu_buffer->commit_page;
981         /* we just need to protect against interrupts */
982         barrier();
983         tail_page = cpu_buffer->tail_page;
984         write = local_add_return(length, &tail_page->write);
985         tail = write - length;
986
987         /* See if we shot pass the end of this buffer page */
988         if (write > BUF_PAGE_SIZE) {
989                 struct buffer_page *next_page = tail_page;
990
991                 local_irq_save(flags);
992                 __raw_spin_lock(&cpu_buffer->lock);
993
994                 rb_inc_page(cpu_buffer, &next_page);
995
996                 head_page = cpu_buffer->head_page;
997                 reader_page = cpu_buffer->reader_page;
998
999                 /* we grabbed the lock before incrementing */
1000                 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1001                         goto out_unlock;
1002
1003                 /*
1004                  * If for some reason, we had an interrupt storm that made
1005                  * it all the way around the buffer, bail, and warn
1006                  * about it.
1007                  */
1008                 if (unlikely(next_page == commit_page)) {
1009                         WARN_ON_ONCE(1);
1010                         goto out_unlock;
1011                 }
1012
1013                 if (next_page == head_page) {
1014                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
1015                                 /* reset write */
1016                                 if (tail <= BUF_PAGE_SIZE)
1017                                         local_set(&tail_page->write, tail);
1018                                 goto out_unlock;
1019                         }
1020
1021                         /* tail_page has not moved yet? */
1022                         if (tail_page == cpu_buffer->tail_page) {
1023                                 /* count overflows */
1024                                 rb_update_overflow(cpu_buffer);
1025
1026                                 rb_inc_page(cpu_buffer, &head_page);
1027                                 cpu_buffer->head_page = head_page;
1028                                 cpu_buffer->head_page->read = 0;
1029                         }
1030                 }
1031
1032                 /*
1033                  * If the tail page is still the same as what we think
1034                  * it is, then it is up to us to update the tail
1035                  * pointer.
1036                  */
1037                 if (tail_page == cpu_buffer->tail_page) {
1038                         local_set(&next_page->write, 0);
1039                         local_set(&next_page->page->commit, 0);
1040                         cpu_buffer->tail_page = next_page;
1041
1042                         /* reread the time stamp */
1043                         *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1044                         cpu_buffer->tail_page->page->time_stamp = *ts;
1045                 }
1046
1047                 /*
1048                  * The actual tail page has moved forward.
1049                  */
1050                 if (tail < BUF_PAGE_SIZE) {
1051                         /* Mark the rest of the page with padding */
1052                         event = __rb_page_index(tail_page, tail);
1053                         event->type = RINGBUF_TYPE_PADDING;
1054                 }
1055
1056                 if (tail <= BUF_PAGE_SIZE)
1057                         /* Set the write back to the previous setting */
1058                         local_set(&tail_page->write, tail);
1059
1060                 /*
1061                  * If this was a commit entry that failed,
1062                  * increment that too
1063                  */
1064                 if (tail_page == cpu_buffer->commit_page &&
1065                     tail == rb_commit_index(cpu_buffer)) {
1066                         rb_set_commit_to_write(cpu_buffer);
1067                 }
1068
1069                 __raw_spin_unlock(&cpu_buffer->lock);
1070                 local_irq_restore(flags);
1071
1072                 /* fail and let the caller try again */
1073                 return ERR_PTR(-EAGAIN);
1074         }
1075
1076         /* We reserved something on the buffer */
1077
1078         if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1079                 return NULL;
1080
1081         event = __rb_page_index(tail_page, tail);
1082         rb_update_event(event, type, length);
1083
1084         /*
1085          * If this is a commit and the tail is zero, then update
1086          * this page's time stamp.
1087          */
1088         if (!tail && rb_is_commit(cpu_buffer, event))
1089                 cpu_buffer->commit_page->page->time_stamp = *ts;
1090
1091         return event;
1092
1093  out_unlock:
1094         __raw_spin_unlock(&cpu_buffer->lock);
1095         local_irq_restore(flags);
1096         return NULL;
1097 }
1098
1099 static int
1100 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1101                   u64 *ts, u64 *delta)
1102 {
1103         struct ring_buffer_event *event;
1104         static int once;
1105         int ret;
1106
1107         if (unlikely(*delta > (1ULL << 59) && !once++)) {
1108                 printk(KERN_WARNING "Delta way too big! %llu"
1109                        " ts=%llu write stamp = %llu\n",
1110                        (unsigned long long)*delta,
1111                        (unsigned long long)*ts,
1112                        (unsigned long long)cpu_buffer->write_stamp);
1113                 WARN_ON(1);
1114         }
1115
1116         /*
1117          * The delta is too big, we to add a
1118          * new timestamp.
1119          */
1120         event = __rb_reserve_next(cpu_buffer,
1121                                   RINGBUF_TYPE_TIME_EXTEND,
1122                                   RB_LEN_TIME_EXTEND,
1123                                   ts);
1124         if (!event)
1125                 return -EBUSY;
1126
1127         if (PTR_ERR(event) == -EAGAIN)
1128                 return -EAGAIN;
1129
1130         /* Only a commited time event can update the write stamp */
1131         if (rb_is_commit(cpu_buffer, event)) {
1132                 /*
1133                  * If this is the first on the page, then we need to
1134                  * update the page itself, and just put in a zero.
1135                  */
1136                 if (rb_event_index(event)) {
1137                         event->time_delta = *delta & TS_MASK;
1138                         event->array[0] = *delta >> TS_SHIFT;
1139                 } else {
1140                         cpu_buffer->commit_page->page->time_stamp = *ts;
1141                         event->time_delta = 0;
1142                         event->array[0] = 0;
1143                 }
1144                 cpu_buffer->write_stamp = *ts;
1145                 /* let the caller know this was the commit */
1146                 ret = 1;
1147         } else {
1148                 /* Darn, this is just wasted space */
1149                 event->time_delta = 0;
1150                 event->array[0] = 0;
1151                 ret = 0;
1152         }
1153
1154         *delta = 0;
1155
1156         return ret;
1157 }
1158
1159 static struct ring_buffer_event *
1160 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1161                       unsigned type, unsigned long length)
1162 {
1163         struct ring_buffer_event *event;
1164         u64 ts, delta;
1165         int commit = 0;
1166         int nr_loops = 0;
1167
1168  again:
1169         /*
1170          * We allow for interrupts to reenter here and do a trace.
1171          * If one does, it will cause this original code to loop
1172          * back here. Even with heavy interrupts happening, this
1173          * should only happen a few times in a row. If this happens
1174          * 1000 times in a row, there must be either an interrupt
1175          * storm or we have something buggy.
1176          * Bail!
1177          */
1178         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1179                 return NULL;
1180
1181         ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1182
1183         /*
1184          * Only the first commit can update the timestamp.
1185          * Yes there is a race here. If an interrupt comes in
1186          * just after the conditional and it traces too, then it
1187          * will also check the deltas. More than one timestamp may
1188          * also be made. But only the entry that did the actual
1189          * commit will be something other than zero.
1190          */
1191         if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1192             rb_page_write(cpu_buffer->tail_page) ==
1193             rb_commit_index(cpu_buffer)) {
1194
1195                 delta = ts - cpu_buffer->write_stamp;
1196
1197                 /* make sure this delta is calculated here */
1198                 barrier();
1199
1200                 /* Did the write stamp get updated already? */
1201                 if (unlikely(ts < cpu_buffer->write_stamp))
1202                         delta = 0;
1203
1204                 if (test_time_stamp(delta)) {
1205
1206                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1207
1208                         if (commit == -EBUSY)
1209                                 return NULL;
1210
1211                         if (commit == -EAGAIN)
1212                                 goto again;
1213
1214                         RB_WARN_ON(cpu_buffer, commit < 0);
1215                 }
1216         } else
1217                 /* Non commits have zero deltas */
1218                 delta = 0;
1219
1220         event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1221         if (PTR_ERR(event) == -EAGAIN)
1222                 goto again;
1223
1224         if (!event) {
1225                 if (unlikely(commit))
1226                         /*
1227                          * Ouch! We needed a timestamp and it was commited. But
1228                          * we didn't get our event reserved.
1229                          */
1230                         rb_set_commit_to_write(cpu_buffer);
1231                 return NULL;
1232         }
1233
1234         /*
1235          * If the timestamp was commited, make the commit our entry
1236          * now so that we will update it when needed.
1237          */
1238         if (commit)
1239                 rb_set_commit_event(cpu_buffer, event);
1240         else if (!rb_is_commit(cpu_buffer, event))
1241                 delta = 0;
1242
1243         event->time_delta = delta;
1244
1245         return event;
1246 }
1247
1248 static DEFINE_PER_CPU(int, rb_need_resched);
1249
1250 /**
1251  * ring_buffer_lock_reserve - reserve a part of the buffer
1252  * @buffer: the ring buffer to reserve from
1253  * @length: the length of the data to reserve (excluding event header)
1254  * @flags: a pointer to save the interrupt flags
1255  *
1256  * Returns a reseverd event on the ring buffer to copy directly to.
1257  * The user of this interface will need to get the body to write into
1258  * and can use the ring_buffer_event_data() interface.
1259  *
1260  * The length is the length of the data needed, not the event length
1261  * which also includes the event header.
1262  *
1263  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1264  * If NULL is returned, then nothing has been allocated or locked.
1265  */
1266 struct ring_buffer_event *
1267 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1268                          unsigned long length,
1269                          unsigned long *flags)
1270 {
1271         struct ring_buffer_per_cpu *cpu_buffer;
1272         struct ring_buffer_event *event;
1273         int cpu, resched;
1274
1275         if (ring_buffer_flags != RB_BUFFERS_ON)
1276                 return NULL;
1277
1278         if (atomic_read(&buffer->record_disabled))
1279                 return NULL;
1280
1281         /* If we are tracing schedule, we don't want to recurse */
1282         resched = ftrace_preempt_disable();
1283
1284         cpu = raw_smp_processor_id();
1285
1286         if (!cpu_isset(cpu, buffer->cpumask))
1287                 goto out;
1288
1289         cpu_buffer = buffer->buffers[cpu];
1290
1291         if (atomic_read(&cpu_buffer->record_disabled))
1292                 goto out;
1293
1294         length = rb_calculate_event_length(length);
1295         if (length > BUF_PAGE_SIZE)
1296                 goto out;
1297
1298         event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1299         if (!event)
1300                 goto out;
1301
1302         /*
1303          * Need to store resched state on this cpu.
1304          * Only the first needs to.
1305          */
1306
1307         if (preempt_count() == 1)
1308                 per_cpu(rb_need_resched, cpu) = resched;
1309
1310         return event;
1311
1312  out:
1313         ftrace_preempt_enable(resched);
1314         return NULL;
1315 }
1316 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1317
1318 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1319                       struct ring_buffer_event *event)
1320 {
1321         cpu_buffer->entries++;
1322
1323         /* Only process further if we own the commit */
1324         if (!rb_is_commit(cpu_buffer, event))
1325                 return;
1326
1327         cpu_buffer->write_stamp += event->time_delta;
1328
1329         rb_set_commit_to_write(cpu_buffer);
1330 }
1331
1332 /**
1333  * ring_buffer_unlock_commit - commit a reserved
1334  * @buffer: The buffer to commit to
1335  * @event: The event pointer to commit.
1336  * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1337  *
1338  * This commits the data to the ring buffer, and releases any locks held.
1339  *
1340  * Must be paired with ring_buffer_lock_reserve.
1341  */
1342 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1343                               struct ring_buffer_event *event,
1344                               unsigned long flags)
1345 {
1346         struct ring_buffer_per_cpu *cpu_buffer;
1347         int cpu = raw_smp_processor_id();
1348
1349         cpu_buffer = buffer->buffers[cpu];
1350
1351         rb_commit(cpu_buffer, event);
1352
1353         /*
1354          * Only the last preempt count needs to restore preemption.
1355          */
1356         if (preempt_count() == 1)
1357                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1358         else
1359                 preempt_enable_no_resched_notrace();
1360
1361         return 0;
1362 }
1363 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1364
1365 /**
1366  * ring_buffer_write - write data to the buffer without reserving
1367  * @buffer: The ring buffer to write to.
1368  * @length: The length of the data being written (excluding the event header)
1369  * @data: The data to write to the buffer.
1370  *
1371  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1372  * one function. If you already have the data to write to the buffer, it
1373  * may be easier to simply call this function.
1374  *
1375  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1376  * and not the length of the event which would hold the header.
1377  */
1378 int ring_buffer_write(struct ring_buffer *buffer,
1379                         unsigned long length,
1380                         void *data)
1381 {
1382         struct ring_buffer_per_cpu *cpu_buffer;
1383         struct ring_buffer_event *event;
1384         unsigned long event_length;
1385         void *body;
1386         int ret = -EBUSY;
1387         int cpu, resched;
1388
1389         if (ring_buffer_flags != RB_BUFFERS_ON)
1390                 return -EBUSY;
1391
1392         if (atomic_read(&buffer->record_disabled))
1393                 return -EBUSY;
1394
1395         resched = ftrace_preempt_disable();
1396
1397         cpu = raw_smp_processor_id();
1398
1399         if (!cpu_isset(cpu, buffer->cpumask))
1400                 goto out;
1401
1402         cpu_buffer = buffer->buffers[cpu];
1403
1404         if (atomic_read(&cpu_buffer->record_disabled))
1405                 goto out;
1406
1407         event_length = rb_calculate_event_length(length);
1408         event = rb_reserve_next_event(cpu_buffer,
1409                                       RINGBUF_TYPE_DATA, event_length);
1410         if (!event)
1411                 goto out;
1412
1413         body = rb_event_data(event);
1414
1415         memcpy(body, data, length);
1416
1417         rb_commit(cpu_buffer, event);
1418
1419         ret = 0;
1420  out:
1421         ftrace_preempt_enable(resched);
1422
1423         return ret;
1424 }
1425 EXPORT_SYMBOL_GPL(ring_buffer_write);
1426
1427 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1428 {
1429         struct buffer_page *reader = cpu_buffer->reader_page;
1430         struct buffer_page *head = cpu_buffer->head_page;
1431         struct buffer_page *commit = cpu_buffer->commit_page;
1432
1433         return reader->read == rb_page_commit(reader) &&
1434                 (commit == reader ||
1435                  (commit == head &&
1436                   head->read == rb_page_commit(commit)));
1437 }
1438
1439 /**
1440  * ring_buffer_record_disable - stop all writes into the buffer
1441  * @buffer: The ring buffer to stop writes to.
1442  *
1443  * This prevents all writes to the buffer. Any attempt to write
1444  * to the buffer after this will fail and return NULL.
1445  *
1446  * The caller should call synchronize_sched() after this.
1447  */
1448 void ring_buffer_record_disable(struct ring_buffer *buffer)
1449 {
1450         atomic_inc(&buffer->record_disabled);
1451 }
1452 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1453
1454 /**
1455  * ring_buffer_record_enable - enable writes to the buffer
1456  * @buffer: The ring buffer to enable writes
1457  *
1458  * Note, multiple disables will need the same number of enables
1459  * to truely enable the writing (much like preempt_disable).
1460  */
1461 void ring_buffer_record_enable(struct ring_buffer *buffer)
1462 {
1463         atomic_dec(&buffer->record_disabled);
1464 }
1465 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1466
1467 /**
1468  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1469  * @buffer: The ring buffer to stop writes to.
1470  * @cpu: The CPU buffer to stop
1471  *
1472  * This prevents all writes to the buffer. Any attempt to write
1473  * to the buffer after this will fail and return NULL.
1474  *
1475  * The caller should call synchronize_sched() after this.
1476  */
1477 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1478 {
1479         struct ring_buffer_per_cpu *cpu_buffer;
1480
1481         if (!cpu_isset(cpu, buffer->cpumask))
1482                 return;
1483
1484         cpu_buffer = buffer->buffers[cpu];
1485         atomic_inc(&cpu_buffer->record_disabled);
1486 }
1487 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1488
1489 /**
1490  * ring_buffer_record_enable_cpu - enable writes to the buffer
1491  * @buffer: The ring buffer to enable writes
1492  * @cpu: The CPU to enable.
1493  *
1494  * Note, multiple disables will need the same number of enables
1495  * to truely enable the writing (much like preempt_disable).
1496  */
1497 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1498 {
1499         struct ring_buffer_per_cpu *cpu_buffer;
1500
1501         if (!cpu_isset(cpu, buffer->cpumask))
1502                 return;
1503
1504         cpu_buffer = buffer->buffers[cpu];
1505         atomic_dec(&cpu_buffer->record_disabled);
1506 }
1507 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1508
1509 /**
1510  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1511  * @buffer: The ring buffer
1512  * @cpu: The per CPU buffer to get the entries from.
1513  */
1514 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1515 {
1516         struct ring_buffer_per_cpu *cpu_buffer;
1517
1518         if (!cpu_isset(cpu, buffer->cpumask))
1519                 return 0;
1520
1521         cpu_buffer = buffer->buffers[cpu];
1522         return cpu_buffer->entries;
1523 }
1524 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1525
1526 /**
1527  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1528  * @buffer: The ring buffer
1529  * @cpu: The per CPU buffer to get the number of overruns from
1530  */
1531 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1532 {
1533         struct ring_buffer_per_cpu *cpu_buffer;
1534
1535         if (!cpu_isset(cpu, buffer->cpumask))
1536                 return 0;
1537
1538         cpu_buffer = buffer->buffers[cpu];
1539         return cpu_buffer->overrun;
1540 }
1541 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1542
1543 /**
1544  * ring_buffer_entries - get the number of entries in a buffer
1545  * @buffer: The ring buffer
1546  *
1547  * Returns the total number of entries in the ring buffer
1548  * (all CPU entries)
1549  */
1550 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1551 {
1552         struct ring_buffer_per_cpu *cpu_buffer;
1553         unsigned long entries = 0;
1554         int cpu;
1555
1556         /* if you care about this being correct, lock the buffer */
1557         for_each_buffer_cpu(buffer, cpu) {
1558                 cpu_buffer = buffer->buffers[cpu];
1559                 entries += cpu_buffer->entries;
1560         }
1561
1562         return entries;
1563 }
1564 EXPORT_SYMBOL_GPL(ring_buffer_entries);
1565
1566 /**
1567  * ring_buffer_overrun_cpu - get the number of overruns in buffer
1568  * @buffer: The ring buffer
1569  *
1570  * Returns the total number of overruns in the ring buffer
1571  * (all CPU entries)
1572  */
1573 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1574 {
1575         struct ring_buffer_per_cpu *cpu_buffer;
1576         unsigned long overruns = 0;
1577         int cpu;
1578
1579         /* if you care about this being correct, lock the buffer */
1580         for_each_buffer_cpu(buffer, cpu) {
1581                 cpu_buffer = buffer->buffers[cpu];
1582                 overruns += cpu_buffer->overrun;
1583         }
1584
1585         return overruns;
1586 }
1587 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1588
1589 static void rb_iter_reset(struct ring_buffer_iter *iter)
1590 {
1591         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1592
1593         /* Iterator usage is expected to have record disabled */
1594         if (list_empty(&cpu_buffer->reader_page->list)) {
1595                 iter->head_page = cpu_buffer->head_page;
1596                 iter->head = cpu_buffer->head_page->read;
1597         } else {
1598                 iter->head_page = cpu_buffer->reader_page;
1599                 iter->head = cpu_buffer->reader_page->read;
1600         }
1601         if (iter->head)
1602                 iter->read_stamp = cpu_buffer->read_stamp;
1603         else
1604                 iter->read_stamp = iter->head_page->page->time_stamp;
1605 }
1606
1607 /**
1608  * ring_buffer_iter_reset - reset an iterator
1609  * @iter: The iterator to reset
1610  *
1611  * Resets the iterator, so that it will start from the beginning
1612  * again.
1613  */
1614 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1615 {
1616         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1617         unsigned long flags;
1618
1619         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1620         rb_iter_reset(iter);
1621         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1622 }
1623 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1624
1625 /**
1626  * ring_buffer_iter_empty - check if an iterator has no more to read
1627  * @iter: The iterator to check
1628  */
1629 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1630 {
1631         struct ring_buffer_per_cpu *cpu_buffer;
1632
1633         cpu_buffer = iter->cpu_buffer;
1634
1635         return iter->head_page == cpu_buffer->commit_page &&
1636                 iter->head == rb_commit_index(cpu_buffer);
1637 }
1638 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1639
1640 static void
1641 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1642                      struct ring_buffer_event *event)
1643 {
1644         u64 delta;
1645
1646         switch (event->type) {
1647         case RINGBUF_TYPE_PADDING:
1648                 return;
1649
1650         case RINGBUF_TYPE_TIME_EXTEND:
1651                 delta = event->array[0];
1652                 delta <<= TS_SHIFT;
1653                 delta += event->time_delta;
1654                 cpu_buffer->read_stamp += delta;
1655                 return;
1656
1657         case RINGBUF_TYPE_TIME_STAMP:
1658                 /* FIXME: not implemented */
1659                 return;
1660
1661         case RINGBUF_TYPE_DATA:
1662                 cpu_buffer->read_stamp += event->time_delta;
1663                 return;
1664
1665         default:
1666                 BUG();
1667         }
1668         return;
1669 }
1670
1671 static void
1672 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1673                           struct ring_buffer_event *event)
1674 {
1675         u64 delta;
1676
1677         switch (event->type) {
1678         case RINGBUF_TYPE_PADDING:
1679                 return;
1680
1681         case RINGBUF_TYPE_TIME_EXTEND:
1682                 delta = event->array[0];
1683                 delta <<= TS_SHIFT;
1684                 delta += event->time_delta;
1685                 iter->read_stamp += delta;
1686                 return;
1687
1688         case RINGBUF_TYPE_TIME_STAMP:
1689                 /* FIXME: not implemented */
1690                 return;
1691
1692         case RINGBUF_TYPE_DATA:
1693                 iter->read_stamp += event->time_delta;
1694                 return;
1695
1696         default:
1697                 BUG();
1698         }
1699         return;
1700 }
1701
1702 static struct buffer_page *
1703 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1704 {
1705         struct buffer_page *reader = NULL;
1706         unsigned long flags;
1707         int nr_loops = 0;
1708
1709         local_irq_save(flags);
1710         __raw_spin_lock(&cpu_buffer->lock);
1711
1712  again:
1713         /*
1714          * This should normally only loop twice. But because the
1715          * start of the reader inserts an empty page, it causes
1716          * a case where we will loop three times. There should be no
1717          * reason to loop four times (that I know of).
1718          */
1719         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1720                 reader = NULL;
1721                 goto out;
1722         }
1723
1724         reader = cpu_buffer->reader_page;
1725
1726         /* If there's more to read, return this page */
1727         if (cpu_buffer->reader_page->read < rb_page_size(reader))
1728                 goto out;
1729
1730         /* Never should we have an index greater than the size */
1731         if (RB_WARN_ON(cpu_buffer,
1732                        cpu_buffer->reader_page->read > rb_page_size(reader)))
1733                 goto out;
1734
1735         /* check if we caught up to the tail */
1736         reader = NULL;
1737         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1738                 goto out;
1739
1740         /*
1741          * Splice the empty reader page into the list around the head.
1742          * Reset the reader page to size zero.
1743          */
1744
1745         reader = cpu_buffer->head_page;
1746         cpu_buffer->reader_page->list.next = reader->list.next;
1747         cpu_buffer->reader_page->list.prev = reader->list.prev;
1748
1749         local_set(&cpu_buffer->reader_page->write, 0);
1750         local_set(&cpu_buffer->reader_page->page->commit, 0);
1751
1752         /* Make the reader page now replace the head */
1753         reader->list.prev->next = &cpu_buffer->reader_page->list;
1754         reader->list.next->prev = &cpu_buffer->reader_page->list;
1755
1756         /*
1757          * If the tail is on the reader, then we must set the head
1758          * to the inserted page, otherwise we set it one before.
1759          */
1760         cpu_buffer->head_page = cpu_buffer->reader_page;
1761
1762         if (cpu_buffer->commit_page != reader)
1763                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1764
1765         /* Finally update the reader page to the new head */
1766         cpu_buffer->reader_page = reader;
1767         rb_reset_reader_page(cpu_buffer);
1768
1769         goto again;
1770
1771  out:
1772         __raw_spin_unlock(&cpu_buffer->lock);
1773         local_irq_restore(flags);
1774
1775         return reader;
1776 }
1777
1778 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1779 {
1780         struct ring_buffer_event *event;
1781         struct buffer_page *reader;
1782         unsigned length;
1783
1784         reader = rb_get_reader_page(cpu_buffer);
1785
1786         /* This function should not be called when buffer is empty */
1787         if (RB_WARN_ON(cpu_buffer, !reader))
1788                 return;
1789
1790         event = rb_reader_event(cpu_buffer);
1791
1792         if (event->type == RINGBUF_TYPE_DATA)
1793                 cpu_buffer->entries--;
1794
1795         rb_update_read_stamp(cpu_buffer, event);
1796
1797         length = rb_event_length(event);
1798         cpu_buffer->reader_page->read += length;
1799 }
1800
1801 static void rb_advance_iter(struct ring_buffer_iter *iter)
1802 {
1803         struct ring_buffer *buffer;
1804         struct ring_buffer_per_cpu *cpu_buffer;
1805         struct ring_buffer_event *event;
1806         unsigned length;
1807
1808         cpu_buffer = iter->cpu_buffer;
1809         buffer = cpu_buffer->buffer;
1810
1811         /*
1812          * Check if we are at the end of the buffer.
1813          */
1814         if (iter->head >= rb_page_size(iter->head_page)) {
1815                 if (RB_WARN_ON(buffer,
1816                                iter->head_page == cpu_buffer->commit_page))
1817                         return;
1818                 rb_inc_iter(iter);
1819                 return;
1820         }
1821
1822         event = rb_iter_head_event(iter);
1823
1824         length = rb_event_length(event);
1825
1826         /*
1827          * This should not be called to advance the header if we are
1828          * at the tail of the buffer.
1829          */
1830         if (RB_WARN_ON(cpu_buffer,
1831                        (iter->head_page == cpu_buffer->commit_page) &&
1832                        (iter->head + length > rb_commit_index(cpu_buffer))))
1833                 return;
1834
1835         rb_update_iter_read_stamp(iter, event);
1836
1837         iter->head += length;
1838
1839         /* check for end of page padding */
1840         if ((iter->head >= rb_page_size(iter->head_page)) &&
1841             (iter->head_page != cpu_buffer->commit_page))
1842                 rb_advance_iter(iter);
1843 }
1844
1845 static struct ring_buffer_event *
1846 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1847 {
1848         struct ring_buffer_per_cpu *cpu_buffer;
1849         struct ring_buffer_event *event;
1850         struct buffer_page *reader;
1851         int nr_loops = 0;
1852
1853         if (!cpu_isset(cpu, buffer->cpumask))
1854                 return NULL;
1855
1856         cpu_buffer = buffer->buffers[cpu];
1857
1858  again:
1859         /*
1860          * We repeat when a timestamp is encountered. It is possible
1861          * to get multiple timestamps from an interrupt entering just
1862          * as one timestamp is about to be written. The max times
1863          * that this can happen is the number of nested interrupts we
1864          * can have.  Nesting 10 deep of interrupts is clearly
1865          * an anomaly.
1866          */
1867         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1868                 return NULL;
1869
1870         reader = rb_get_reader_page(cpu_buffer);
1871         if (!reader)
1872                 return NULL;
1873
1874         event = rb_reader_event(cpu_buffer);
1875
1876         switch (event->type) {
1877         case RINGBUF_TYPE_PADDING:
1878                 RB_WARN_ON(cpu_buffer, 1);
1879                 rb_advance_reader(cpu_buffer);
1880                 return NULL;
1881
1882         case RINGBUF_TYPE_TIME_EXTEND:
1883                 /* Internal data, OK to advance */
1884                 rb_advance_reader(cpu_buffer);
1885                 goto again;
1886
1887         case RINGBUF_TYPE_TIME_STAMP:
1888                 /* FIXME: not implemented */
1889                 rb_advance_reader(cpu_buffer);
1890                 goto again;
1891
1892         case RINGBUF_TYPE_DATA:
1893                 if (ts) {
1894                         *ts = cpu_buffer->read_stamp + event->time_delta;
1895                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1896                 }
1897                 return event;
1898
1899         default:
1900                 BUG();
1901         }
1902
1903         return NULL;
1904 }
1905 EXPORT_SYMBOL_GPL(ring_buffer_peek);
1906
1907 static struct ring_buffer_event *
1908 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1909 {
1910         struct ring_buffer *buffer;
1911         struct ring_buffer_per_cpu *cpu_buffer;
1912         struct ring_buffer_event *event;
1913         int nr_loops = 0;
1914
1915         if (ring_buffer_iter_empty(iter))
1916                 return NULL;
1917
1918         cpu_buffer = iter->cpu_buffer;
1919         buffer = cpu_buffer->buffer;
1920
1921  again:
1922         /*
1923          * We repeat when a timestamp is encountered. It is possible
1924          * to get multiple timestamps from an interrupt entering just
1925          * as one timestamp is about to be written. The max times
1926          * that this can happen is the number of nested interrupts we
1927          * can have. Nesting 10 deep of interrupts is clearly
1928          * an anomaly.
1929          */
1930         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1931                 return NULL;
1932
1933         if (rb_per_cpu_empty(cpu_buffer))
1934                 return NULL;
1935
1936         event = rb_iter_head_event(iter);
1937
1938         switch (event->type) {
1939         case RINGBUF_TYPE_PADDING:
1940                 rb_inc_iter(iter);
1941                 goto again;
1942
1943         case RINGBUF_TYPE_TIME_EXTEND:
1944                 /* Internal data, OK to advance */
1945                 rb_advance_iter(iter);
1946                 goto again;
1947
1948         case RINGBUF_TYPE_TIME_STAMP:
1949                 /* FIXME: not implemented */
1950                 rb_advance_iter(iter);
1951                 goto again;
1952
1953         case RINGBUF_TYPE_DATA:
1954                 if (ts) {
1955                         *ts = iter->read_stamp + event->time_delta;
1956                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1957                 }
1958                 return event;
1959
1960         default:
1961                 BUG();
1962         }
1963
1964         return NULL;
1965 }
1966 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1967
1968 /**
1969  * ring_buffer_peek - peek at the next event to be read
1970  * @buffer: The ring buffer to read
1971  * @cpu: The cpu to peak at
1972  * @ts: The timestamp counter of this event.
1973  *
1974  * This will return the event that will be read next, but does
1975  * not consume the data.
1976  */
1977 struct ring_buffer_event *
1978 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1979 {
1980         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1981         struct ring_buffer_event *event;
1982         unsigned long flags;
1983
1984         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1985         event = rb_buffer_peek(buffer, cpu, ts);
1986         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1987
1988         return event;
1989 }
1990
1991 /**
1992  * ring_buffer_iter_peek - peek at the next event to be read
1993  * @iter: The ring buffer iterator
1994  * @ts: The timestamp counter of this event.
1995  *
1996  * This will return the event that will be read next, but does
1997  * not increment the iterator.
1998  */
1999 struct ring_buffer_event *
2000 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2001 {
2002         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2003         struct ring_buffer_event *event;
2004         unsigned long flags;
2005
2006         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2007         event = rb_iter_peek(iter, ts);
2008         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2009
2010         return event;
2011 }
2012
2013 /**
2014  * ring_buffer_consume - return an event and consume it
2015  * @buffer: The ring buffer to get the next event from
2016  *
2017  * Returns the next event in the ring buffer, and that event is consumed.
2018  * Meaning, that sequential reads will keep returning a different event,
2019  * and eventually empty the ring buffer if the producer is slower.
2020  */
2021 struct ring_buffer_event *
2022 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2023 {
2024         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2025         struct ring_buffer_event *event;
2026         unsigned long flags;
2027
2028         if (!cpu_isset(cpu, buffer->cpumask))
2029                 return NULL;
2030
2031         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2032
2033         event = rb_buffer_peek(buffer, cpu, ts);
2034         if (!event)
2035                 goto out;
2036
2037         rb_advance_reader(cpu_buffer);
2038
2039  out:
2040         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2041
2042         return event;
2043 }
2044 EXPORT_SYMBOL_GPL(ring_buffer_consume);
2045
2046 /**
2047  * ring_buffer_read_start - start a non consuming read of the buffer
2048  * @buffer: The ring buffer to read from
2049  * @cpu: The cpu buffer to iterate over
2050  *
2051  * This starts up an iteration through the buffer. It also disables
2052  * the recording to the buffer until the reading is finished.
2053  * This prevents the reading from being corrupted. This is not
2054  * a consuming read, so a producer is not expected.
2055  *
2056  * Must be paired with ring_buffer_finish.
2057  */
2058 struct ring_buffer_iter *
2059 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2060 {
2061         struct ring_buffer_per_cpu *cpu_buffer;
2062         struct ring_buffer_iter *iter;
2063         unsigned long flags;
2064
2065         if (!cpu_isset(cpu, buffer->cpumask))
2066                 return NULL;
2067
2068         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2069         if (!iter)
2070                 return NULL;
2071
2072         cpu_buffer = buffer->buffers[cpu];
2073
2074         iter->cpu_buffer = cpu_buffer;
2075
2076         atomic_inc(&cpu_buffer->record_disabled);
2077         synchronize_sched();
2078
2079         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2080         __raw_spin_lock(&cpu_buffer->lock);
2081         rb_iter_reset(iter);
2082         __raw_spin_unlock(&cpu_buffer->lock);
2083         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2084
2085         return iter;
2086 }
2087 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2088
2089 /**
2090  * ring_buffer_finish - finish reading the iterator of the buffer
2091  * @iter: The iterator retrieved by ring_buffer_start
2092  *
2093  * This re-enables the recording to the buffer, and frees the
2094  * iterator.
2095  */
2096 void
2097 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2098 {
2099         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2100
2101         atomic_dec(&cpu_buffer->record_disabled);
2102         kfree(iter);
2103 }
2104 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2105
2106 /**
2107  * ring_buffer_read - read the next item in the ring buffer by the iterator
2108  * @iter: The ring buffer iterator
2109  * @ts: The time stamp of the event read.
2110  *
2111  * This reads the next event in the ring buffer and increments the iterator.
2112  */
2113 struct ring_buffer_event *
2114 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2115 {
2116         struct ring_buffer_event *event;
2117         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2118         unsigned long flags;
2119
2120         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2121         event = rb_iter_peek(iter, ts);
2122         if (!event)
2123                 goto out;
2124
2125         rb_advance_iter(iter);
2126  out:
2127         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2128
2129         return event;
2130 }
2131 EXPORT_SYMBOL_GPL(ring_buffer_read);
2132
2133 /**
2134  * ring_buffer_size - return the size of the ring buffer (in bytes)
2135  * @buffer: The ring buffer.
2136  */
2137 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2138 {
2139         return BUF_PAGE_SIZE * buffer->pages;
2140 }
2141 EXPORT_SYMBOL_GPL(ring_buffer_size);
2142
2143 static void
2144 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2145 {
2146         cpu_buffer->head_page
2147                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2148         local_set(&cpu_buffer->head_page->write, 0);
2149         local_set(&cpu_buffer->head_page->page->commit, 0);
2150
2151         cpu_buffer->head_page->read = 0;
2152
2153         cpu_buffer->tail_page = cpu_buffer->head_page;
2154         cpu_buffer->commit_page = cpu_buffer->head_page;
2155
2156         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2157         local_set(&cpu_buffer->reader_page->write, 0);
2158         local_set(&cpu_buffer->reader_page->page->commit, 0);
2159         cpu_buffer->reader_page->read = 0;
2160
2161         cpu_buffer->overrun = 0;
2162         cpu_buffer->entries = 0;
2163 }
2164
2165 /**
2166  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2167  * @buffer: The ring buffer to reset a per cpu buffer of
2168  * @cpu: The CPU buffer to be reset
2169  */
2170 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2171 {
2172         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2173         unsigned long flags;
2174
2175         if (!cpu_isset(cpu, buffer->cpumask))
2176                 return;
2177
2178         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2179
2180         __raw_spin_lock(&cpu_buffer->lock);
2181
2182         rb_reset_cpu(cpu_buffer);
2183
2184         __raw_spin_unlock(&cpu_buffer->lock);
2185
2186         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2187 }
2188 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2189
2190 /**
2191  * ring_buffer_reset - reset a ring buffer
2192  * @buffer: The ring buffer to reset all cpu buffers
2193  */
2194 void ring_buffer_reset(struct ring_buffer *buffer)
2195 {
2196         int cpu;
2197
2198         for_each_buffer_cpu(buffer, cpu)
2199                 ring_buffer_reset_cpu(buffer, cpu);
2200 }
2201 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2202
2203 /**
2204  * rind_buffer_empty - is the ring buffer empty?
2205  * @buffer: The ring buffer to test
2206  */
2207 int ring_buffer_empty(struct ring_buffer *buffer)
2208 {
2209         struct ring_buffer_per_cpu *cpu_buffer;
2210         int cpu;
2211
2212         /* yes this is racy, but if you don't like the race, lock the buffer */
2213         for_each_buffer_cpu(buffer, cpu) {
2214                 cpu_buffer = buffer->buffers[cpu];
2215                 if (!rb_per_cpu_empty(cpu_buffer))
2216                         return 0;
2217         }
2218         return 1;
2219 }
2220 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2221
2222 /**
2223  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2224  * @buffer: The ring buffer
2225  * @cpu: The CPU buffer to test
2226  */
2227 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2228 {
2229         struct ring_buffer_per_cpu *cpu_buffer;
2230
2231         if (!cpu_isset(cpu, buffer->cpumask))
2232                 return 1;
2233
2234         cpu_buffer = buffer->buffers[cpu];
2235         return rb_per_cpu_empty(cpu_buffer);
2236 }
2237 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2238
2239 /**
2240  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2241  * @buffer_a: One buffer to swap with
2242  * @buffer_b: The other buffer to swap with
2243  *
2244  * This function is useful for tracers that want to take a "snapshot"
2245  * of a CPU buffer and has another back up buffer lying around.
2246  * it is expected that the tracer handles the cpu buffer not being
2247  * used at the moment.
2248  */
2249 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2250                          struct ring_buffer *buffer_b, int cpu)
2251 {
2252         struct ring_buffer_per_cpu *cpu_buffer_a;
2253         struct ring_buffer_per_cpu *cpu_buffer_b;
2254
2255         if (!cpu_isset(cpu, buffer_a->cpumask) ||
2256             !cpu_isset(cpu, buffer_b->cpumask))
2257                 return -EINVAL;
2258
2259         /* At least make sure the two buffers are somewhat the same */
2260         if (buffer_a->pages != buffer_b->pages)
2261                 return -EINVAL;
2262
2263         cpu_buffer_a = buffer_a->buffers[cpu];
2264         cpu_buffer_b = buffer_b->buffers[cpu];
2265
2266         /*
2267          * We can't do a synchronize_sched here because this
2268          * function can be called in atomic context.
2269          * Normally this will be called from the same CPU as cpu.
2270          * If not it's up to the caller to protect this.
2271          */
2272         atomic_inc(&cpu_buffer_a->record_disabled);
2273         atomic_inc(&cpu_buffer_b->record_disabled);
2274
2275         buffer_a->buffers[cpu] = cpu_buffer_b;
2276         buffer_b->buffers[cpu] = cpu_buffer_a;
2277
2278         cpu_buffer_b->buffer = buffer_a;
2279         cpu_buffer_a->buffer = buffer_b;
2280
2281         atomic_dec(&cpu_buffer_a->record_disabled);
2282         atomic_dec(&cpu_buffer_b->record_disabled);
2283
2284         return 0;
2285 }
2286 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2287
2288 static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2289                               struct buffer_data_page *bpage)
2290 {
2291         struct ring_buffer_event *event;
2292         unsigned long head;
2293
2294         __raw_spin_lock(&cpu_buffer->lock);
2295         for (head = 0; head < local_read(&bpage->commit);
2296              head += rb_event_length(event)) {
2297
2298                 event = __rb_data_page_index(bpage, head);
2299                 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2300                         return;
2301                 /* Only count data entries */
2302                 if (event->type != RINGBUF_TYPE_DATA)
2303                         continue;
2304                 cpu_buffer->entries--;
2305         }
2306         __raw_spin_unlock(&cpu_buffer->lock);
2307 }
2308
2309 /**
2310  * ring_buffer_alloc_read_page - allocate a page to read from buffer
2311  * @buffer: the buffer to allocate for.
2312  *
2313  * This function is used in conjunction with ring_buffer_read_page.
2314  * When reading a full page from the ring buffer, these functions
2315  * can be used to speed up the process. The calling function should
2316  * allocate a few pages first with this function. Then when it
2317  * needs to get pages from the ring buffer, it passes the result
2318  * of this function into ring_buffer_read_page, which will swap
2319  * the page that was allocated, with the read page of the buffer.
2320  *
2321  * Returns:
2322  *  The page allocated, or NULL on error.
2323  */
2324 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2325 {
2326         unsigned long addr;
2327         struct buffer_data_page *bpage;
2328
2329         addr = __get_free_page(GFP_KERNEL);
2330         if (!addr)
2331                 return NULL;
2332
2333         bpage = (void *)addr;
2334
2335         return bpage;
2336 }
2337
2338 /**
2339  * ring_buffer_free_read_page - free an allocated read page
2340  * @buffer: the buffer the page was allocate for
2341  * @data: the page to free
2342  *
2343  * Free a page allocated from ring_buffer_alloc_read_page.
2344  */
2345 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2346 {
2347         free_page((unsigned long)data);
2348 }
2349
2350 /**
2351  * ring_buffer_read_page - extract a page from the ring buffer
2352  * @buffer: buffer to extract from
2353  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2354  * @cpu: the cpu of the buffer to extract
2355  * @full: should the extraction only happen when the page is full.
2356  *
2357  * This function will pull out a page from the ring buffer and consume it.
2358  * @data_page must be the address of the variable that was returned
2359  * from ring_buffer_alloc_read_page. This is because the page might be used
2360  * to swap with a page in the ring buffer.
2361  *
2362  * for example:
2363  *      rpage = ring_buffer_alloc_page(buffer);
2364  *      if (!rpage)
2365  *              return error;
2366  *      ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2367  *      if (ret)
2368  *              process_page(rpage);
2369  *
2370  * When @full is set, the function will not return true unless
2371  * the writer is off the reader page.
2372  *
2373  * Note: it is up to the calling functions to handle sleeps and wakeups.
2374  *  The ring buffer can be used anywhere in the kernel and can not
2375  *  blindly call wake_up. The layer that uses the ring buffer must be
2376  *  responsible for that.
2377  *
2378  * Returns:
2379  *  1 if data has been transferred
2380  *  0 if no data has been transferred.
2381  */
2382 int ring_buffer_read_page(struct ring_buffer *buffer,
2383                             void **data_page, int cpu, int full)
2384 {
2385         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2386         struct ring_buffer_event *event;
2387         struct buffer_data_page *bpage;
2388         unsigned long flags;
2389         int ret = 0;
2390
2391         if (!data_page)
2392                 return 0;
2393
2394         bpage = *data_page;
2395         if (!bpage)
2396                 return 0;
2397
2398         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2399
2400         /*
2401          * rb_buffer_peek will get the next ring buffer if
2402          * the current reader page is empty.
2403          */
2404         event = rb_buffer_peek(buffer, cpu, NULL);
2405         if (!event)
2406                 goto out;
2407
2408         /* check for data */
2409         if (!local_read(&cpu_buffer->reader_page->page->commit))
2410                 goto out;
2411         /*
2412          * If the writer is already off of the read page, then simply
2413          * switch the read page with the given page. Otherwise
2414          * we need to copy the data from the reader to the writer.
2415          */
2416         if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2417                 unsigned int read = cpu_buffer->reader_page->read;
2418
2419                 if (full)
2420                         goto out;
2421                 /* The writer is still on the reader page, we must copy */
2422                 bpage = cpu_buffer->reader_page->page;
2423                 memcpy(bpage->data,
2424                        cpu_buffer->reader_page->page->data + read,
2425                        local_read(&bpage->commit) - read);
2426
2427                 /* consume what was read */
2428                 cpu_buffer->reader_page += read;
2429
2430         } else {
2431                 /* swap the pages */
2432                 rb_init_page(bpage);
2433                 bpage = cpu_buffer->reader_page->page;
2434                 cpu_buffer->reader_page->page = *data_page;
2435                 cpu_buffer->reader_page->read = 0;
2436                 *data_page = bpage;
2437         }
2438         ret = 1;
2439
2440         /* update the entry counter */
2441         rb_remove_entries(cpu_buffer, bpage);
2442  out:
2443         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2444
2445         return ret;
2446 }
2447
2448 static ssize_t
2449 rb_simple_read(struct file *filp, char __user *ubuf,
2450                size_t cnt, loff_t *ppos)
2451 {
2452         long *p = filp->private_data;
2453         char buf[64];
2454         int r;
2455
2456         if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2457                 r = sprintf(buf, "permanently disabled\n");
2458         else
2459                 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2460
2461         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2462 }
2463
2464 static ssize_t
2465 rb_simple_write(struct file *filp, const char __user *ubuf,
2466                 size_t cnt, loff_t *ppos)
2467 {
2468         long *p = filp->private_data;
2469         char buf[64];
2470         long val;
2471         int ret;
2472
2473         if (cnt >= sizeof(buf))
2474                 return -EINVAL;
2475
2476         if (copy_from_user(&buf, ubuf, cnt))
2477                 return -EFAULT;
2478
2479         buf[cnt] = 0;
2480
2481         ret = strict_strtoul(buf, 10, &val);
2482         if (ret < 0)
2483                 return ret;
2484
2485         if (val)
2486                 set_bit(RB_BUFFERS_ON_BIT, p);
2487         else
2488                 clear_bit(RB_BUFFERS_ON_BIT, p);
2489
2490         (*ppos)++;
2491
2492         return cnt;
2493 }
2494
2495 static struct file_operations rb_simple_fops = {
2496         .open           = tracing_open_generic,
2497         .read           = rb_simple_read,
2498         .write          = rb_simple_write,
2499 };
2500
2501
2502 static __init int rb_init_debugfs(void)
2503 {
2504         struct dentry *d_tracer;
2505         struct dentry *entry;
2506
2507         d_tracer = tracing_init_dentry();
2508
2509         entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2510                                     &ring_buffer_flags, &rb_simple_fops);
2511         if (!entry)
2512                 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2513
2514         return 0;
2515 }
2516
2517 fs_initcall(rb_init_debugfs);