2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
11 #include <sys/syscall.h>
18 struct aio_thread_req {
21 unsigned long long offset;
24 struct aio_context *aio;
27 static int aio_req_fd_r = -1;
28 static int aio_req_fd_w = -1;
30 #if defined(HAVE_AIO_ABI)
31 #include <linux/aio_abi.h>
33 /* If we have the headers, we are going to build with AIO enabled.
34 * If we don't have aio in libc, we define the necessary stubs here.
37 #if !defined(HAVE_AIO_LIBC)
39 static long io_setup(int n, aio_context_t *ctxp)
41 return syscall(__NR_io_setup, n, ctxp);
44 static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp)
46 return syscall(__NR_io_submit, ctx, nr, iocbpp);
49 static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
50 struct io_event *events, struct timespec *timeout)
52 return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout);
57 /* The AIO_MMAP cases force the mmapped page into memory here
58 * rather than in whatever place first touches the data. I used
59 * to do this by touching the page, but that's delicate because
60 * gcc is prone to optimizing that away. So, what's done here
61 * is we read from the descriptor from which the page was
62 * mapped. The caller is required to pass an offset which is
63 * inside the page that was mapped. Thus, when the read
64 * returns, we know that the page is in the page cache, and
65 * that it now backs the mmapped area.
68 static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf,
69 int len, unsigned long long offset, struct aio_context *aio)
71 struct iocb iocb, *iocbp = &iocb;
75 iocb = ((struct iocb) { .aio_data = (unsigned long) aio,
78 .aio_buf = (unsigned long) buf,
83 .aio_reserved3 = 0 });
87 iocb.aio_lio_opcode = IOCB_CMD_PREAD;
88 err = io_submit(ctx, 1, &iocbp);
91 iocb.aio_lio_opcode = IOCB_CMD_PWRITE;
92 err = io_submit(ctx, 1, &iocbp);
95 iocb.aio_lio_opcode = IOCB_CMD_PREAD;
96 iocb.aio_buf = (unsigned long) &c;
97 iocb.aio_nbytes = sizeof(c);
98 err = io_submit(ctx, 1, &iocbp);
101 printk("Bogus op in do_aio - %d\n", type);
114 static aio_context_t ctx = 0;
116 static int aio_thread(void *arg)
118 struct aio_thread_reply reply;
119 struct io_event event;
120 int err, n, reply_fd;
122 signal(SIGWINCH, SIG_IGN);
125 n = io_getevents(ctx, 1, 1, &event, NULL);
129 printk("aio_thread - io_getevents failed, "
130 "errno = %d\n", errno);
133 reply = ((struct aio_thread_reply)
134 { .data = (void *) (long) event.data,
136 reply_fd = ((struct aio_context *) reply.data)->reply_fd;
137 err = os_write_file(reply_fd, &reply, sizeof(reply));
138 if(err != sizeof(reply))
139 printk("aio_thread - write failed, fd = %d, "
140 "err = %d\n", aio_req_fd_r, -err);
148 static int do_not_aio(struct aio_thread_req *req)
155 err = os_seek_file(req->io_fd, req->offset);
159 err = os_read_file(req->io_fd, req->buf, req->len);
162 err = os_seek_file(req->io_fd, req->offset);
166 err = os_write_file(req->io_fd, req->buf, req->len);
169 err = os_seek_file(req->io_fd, req->offset);
173 err = os_read_file(req->io_fd, &c, sizeof(c));
176 printk("do_not_aio - bad request type : %d\n", req->type);
185 static int not_aio_thread(void *arg)
187 struct aio_thread_req req;
188 struct aio_thread_reply reply;
191 signal(SIGWINCH, SIG_IGN);
193 err = os_read_file(aio_req_fd_r, &req, sizeof(req));
194 if(err != sizeof(req)){
196 printk("not_aio_thread - read failed, "
197 "fd = %d, err = %d\n", aio_req_fd_r,
200 printk("not_aio_thread - short read, fd = %d, "
201 "length = %d\n", aio_req_fd_r, err);
205 err = do_not_aio(&req);
206 reply = ((struct aio_thread_reply) { .data = req.aio,
208 err = os_write_file(req.aio->reply_fd, &reply, sizeof(reply));
209 if(err != sizeof(reply))
210 printk("not_aio_thread - write failed, fd = %d, "
211 "err = %d\n", aio_req_fd_r, -err);
217 static int aio_pid = -1;
219 static int init_aio_24(void)
224 err = os_pipe(fds, 1, 1);
228 aio_req_fd_w = fds[0];
229 aio_req_fd_r = fds[1];
230 err = run_helper_thread(not_aio_thread, NULL,
231 CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
239 os_close_file(fds[0]);
240 os_close_file(fds[1]);
245 printk("/usr/include/linux/aio_abi.h not present during build\n");
247 printk("2.6 host AIO support not used - falling back to I/O "
253 #define DEFAULT_24_AIO 0
254 static int init_aio_26(void)
259 if(io_setup(256, &ctx)){
261 printk("aio_thread failed to initialize context, err = %d\n",
266 err = run_helper_thread(aio_thread, NULL,
267 CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
273 printk("Using 2.6 host AIO\n");
277 static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
278 unsigned long long offset, struct aio_context *aio)
280 struct aio_thread_reply reply;
283 err = do_aio(ctx, type, io_fd, buf, len, offset, aio);
285 reply = ((struct aio_thread_reply) { .data = aio,
287 err = os_write_file(aio->reply_fd, &reply, sizeof(reply));
288 if(err != sizeof(reply))
289 printk("submit_aio_26 - write failed, "
290 "fd = %d, err = %d\n", aio->reply_fd, -err);
298 #define DEFAULT_24_AIO 1
299 static int init_aio_26(void)
304 static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
305 unsigned long long offset, struct aio_context *aio)
311 static int aio_24 = DEFAULT_24_AIO;
313 static int __init set_aio_24(char *name, int *add)
319 __uml_setup("aio=2.4", set_aio_24,
321 " This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n"
322 " available. 2.4 AIO is a single thread that handles one request at a\n"
323 " time, synchronously. 2.6 AIO is a thread which uses the 2.6 AIO \n"
324 " interface to handle an arbitrary number of pending requests. 2.6 AIO \n"
325 " is not available in tt mode, on 2.4 hosts, or when UML is built with\n"
326 " /usr/include/linux/aio_abi.h not available. Many distributions don't\n"
327 " include aio_abi.h, so you will need to copy it from a kernel tree to\n"
328 " your /usr/include/linux in order to build an AIO-capable UML\n\n"
331 static int init_aio(void)
335 CHOOSE_MODE(({ if(!aio_24){
336 printk("Disabling 2.6 AIO in tt mode\n");
342 if(err && (errno == ENOSYS)){
343 printk("2.6 AIO not supported on the host - "
344 "reverting to 2.4 AIO\n");
351 return init_aio_24();
356 /* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
357 * needs to be called when the kernel is running because it calls run_helper,
358 * which needs get_free_page. exit_aio is a __uml_exitcall because the generic
359 * kernel does not run __exitcalls on shutdown, and can't because many of them
360 * break when called outside of module unloading.
362 __initcall(init_aio);
364 static void exit_aio(void)
367 os_kill_process(aio_pid, 1);
370 __uml_exitcall(exit_aio);
372 static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len,
373 unsigned long long offset, struct aio_context *aio)
375 struct aio_thread_req req = { .type = type,
384 err = os_write_file(aio_req_fd_w, &req, sizeof(req));
385 if(err == sizeof(req))
391 int submit_aio(enum aio_type type, int io_fd, char *buf, int len,
392 unsigned long long offset, int reply_fd,
393 struct aio_context *aio)
395 aio->reply_fd = reply_fd;
397 return submit_aio_24(type, io_fd, buf, len, offset, aio);
399 return submit_aio_26(type, io_fd, buf, len, offset, aio);