4 * It is designed to allow debugging traces of kvm to be generated
5 * on UP / SMP machines. Each trace entry can be timestamped so that
6 * it's possible to reconstruct a chronological record of trace events.
7 * The implementation refers to blktrace kernel support.
9 * Copyright (c) 2008 Intel Corporation
10 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
12 * Authors: Feng(Eric) Liu, eric.e.liu@intel.com
17 #include <linux/module.h>
18 #include <linux/relay.h>
19 #include <linux/debugfs.h>
21 #include <linux/kvm_host.h>
23 #define KVM_TRACE_STATE_RUNNING (1 << 0)
24 #define KVM_TRACE_STATE_PAUSE (1 << 1)
25 #define KVM_TRACE_STATE_CLEARUP (1 << 2)
30 struct dentry *lost_file;
31 atomic_t lost_records;
33 static struct kvm_trace *kvm_trace;
35 struct kvm_trace_probe {
39 marker_probe_func *probe_func;
42 static inline int calc_rec_size(int cycle, int extra)
44 int rec_size = KVM_TRC_HEAD_SIZE;
47 return cycle ? rec_size += KVM_TRC_CYCLE_SIZE : rec_size;
50 static void kvm_add_trace(void *probe_private, void *call_data,
51 const char *format, va_list *args)
53 struct kvm_trace_probe *p = probe_private;
54 struct kvm_trace *kt = kvm_trace;
55 struct kvm_trace_rec rec;
56 struct kvm_vcpu *vcpu;
59 if (unlikely(kt->trace_state != KVM_TRACE_STATE_RUNNING))
62 rec.event = va_arg(*args, u32);
63 vcpu = va_arg(*args, struct kvm_vcpu *);
64 rec.pid = current->tgid;
65 rec.vcpu_id = vcpu->vcpu_id;
67 extra = va_arg(*args, u32);
68 WARN_ON(!(extra <= KVM_TRC_EXTRA_MAX));
69 extra = min_t(u32, extra, KVM_TRC_EXTRA_MAX);
70 rec.extra_u32 = extra;
72 rec.cycle_in = p->cycle_in;
78 rec.u.cycle.cycle_lo = (u32)cycle;
79 rec.u.cycle.cycle_hi = (u32)(cycle >> 32);
81 for (i = 0; i < rec.extra_u32; i++)
82 rec.u.cycle.extra_u32[i] = va_arg(*args, u32);
84 for (i = 0; i < rec.extra_u32; i++)
85 rec.u.nocycle.extra_u32[i] = va_arg(*args, u32);
88 size = calc_rec_size(rec.cycle_in, rec.extra_u32 * sizeof(u32));
89 relay_write(kt->rchan, &rec, size);
92 static struct kvm_trace_probe kvm_trace_probes[] = {
93 { "kvm_trace_entryexit", "%u %p %u %u %u %u %u %u", 1, kvm_add_trace },
94 { "kvm_trace_handler", "%u %p %u %u %u %u %u %u", 0, kvm_add_trace },
97 static int lost_records_get(void *data, u64 *val)
99 struct kvm_trace *kt = data;
101 *val = atomic_read(&kt->lost_records);
105 DEFINE_SIMPLE_ATTRIBUTE(kvm_trace_lost_ops, lost_records_get, NULL, "%llu\n");
108 * The relay channel is used in "no-overwrite" mode, it keeps trace of how
109 * many times we encountered a full subbuffer, to tell user space app the
110 * lost records there were.
112 static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
113 void *prev_subbuf, size_t prev_padding)
115 struct kvm_trace *kt;
117 if (!relay_buf_full(buf))
120 kt = buf->chan->private_data;
121 atomic_inc(&kt->lost_records);
126 static struct dentry *kvm_create_buf_file_callack(const char *filename,
127 struct dentry *parent,
129 struct rchan_buf *buf,
132 return debugfs_create_file(filename, mode, parent, buf,
133 &relay_file_operations);
136 static int kvm_remove_buf_file_callback(struct dentry *dentry)
138 debugfs_remove(dentry);
142 static struct rchan_callbacks kvm_relay_callbacks = {
143 .subbuf_start = kvm_subbuf_start_callback,
144 .create_buf_file = kvm_create_buf_file_callack,
145 .remove_buf_file = kvm_remove_buf_file_callback,
148 static int do_kvm_trace_enable(struct kvm_user_trace_setup *kuts)
150 struct kvm_trace *kt;
153 if (!kuts->buf_size || !kuts->buf_nr)
156 kt = kzalloc(sizeof(*kt), GFP_KERNEL);
161 atomic_set(&kt->lost_records, 0);
162 kt->lost_file = debugfs_create_file("lost_records", 0444, kvm_debugfs_dir,
163 kt, &kvm_trace_lost_ops);
167 kt->rchan = relay_open("trace", kvm_debugfs_dir, kuts->buf_size,
168 kuts->buf_nr, &kvm_relay_callbacks, kt);
174 for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
175 struct kvm_trace_probe *p = &kvm_trace_probes[i];
177 r = marker_probe_register(p->name, p->format, p->probe_func, p);
179 printk(KERN_INFO "Unable to register probe %s\n",
183 kvm_trace->trace_state = KVM_TRACE_STATE_RUNNING;
189 debugfs_remove(kt->lost_file);
191 relay_close(kt->rchan);
197 static int kvm_trace_enable(char __user *arg)
199 struct kvm_user_trace_setup kuts;
202 ret = copy_from_user(&kuts, arg, sizeof(kuts));
206 ret = do_kvm_trace_enable(&kuts);
213 static int kvm_trace_pause(void)
215 struct kvm_trace *kt = kvm_trace;
221 if (kt->trace_state == KVM_TRACE_STATE_RUNNING) {
222 kt->trace_state = KVM_TRACE_STATE_PAUSE;
223 relay_flush(kt->rchan);
230 void kvm_trace_cleanup(void)
232 struct kvm_trace *kt = kvm_trace;
238 if (kt->trace_state == KVM_TRACE_STATE_RUNNING ||
239 kt->trace_state == KVM_TRACE_STATE_PAUSE) {
241 kt->trace_state = KVM_TRACE_STATE_CLEARUP;
243 for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
244 struct kvm_trace_probe *p = &kvm_trace_probes[i];
245 marker_probe_unregister(p->name, p->probe_func, p);
248 relay_close(kt->rchan);
249 debugfs_remove(kt->lost_file);
254 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
256 void __user *argp = (void __user *)arg;
259 if (!capable(CAP_SYS_ADMIN))
263 case KVM_TRACE_ENABLE:
264 r = kvm_trace_enable(argp);
266 case KVM_TRACE_PAUSE:
267 r = kvm_trace_pause();
269 case KVM_TRACE_DISABLE: