2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
12 #include <sys/syscall.h>
20 static int aio_req_fd_r = -1;
21 static int aio_req_fd_w = -1;
23 static int update_aio(struct aio_context *aio, int res)
27 else if((res == 0) && (aio->type == AIO_READ)){
28 /* This is the EOF case - we have hit the end of the file
29 * and it ends in a partial block, so we fill the end of
30 * the block with zeros and claim success.
32 memset(aio->data, 0, aio->len);
45 #if defined(HAVE_AIO_ABI)
46 #include <linux/aio_abi.h>
48 /* If we have the headers, we are going to build with AIO enabled.
49 * If we don't have aio in libc, we define the necessary stubs here.
52 #if !defined(HAVE_AIO_LIBC)
54 static long io_setup(int n, aio_context_t *ctxp)
56 return syscall(__NR_io_setup, n, ctxp);
59 static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp)
61 return syscall(__NR_io_submit, ctx, nr, iocbpp);
64 static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
65 struct io_event *events, struct timespec *timeout)
67 return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout);
72 /* The AIO_MMAP cases force the mmapped page into memory here
73 * rather than in whatever place first touches the data. I used
74 * to do this by touching the page, but that's delicate because
75 * gcc is prone to optimizing that away. So, what's done here
76 * is we read from the descriptor from which the page was
77 * mapped. The caller is required to pass an offset which is
78 * inside the page that was mapped. Thus, when the read
79 * returns, we know that the page is in the page cache, and
80 * that it now backs the mmapped area.
83 static int do_aio(aio_context_t ctx, struct aio_context *aio)
85 struct iocb iocb, *iocbp = &iocb;
89 iocb = ((struct iocb) { .aio_data = (unsigned long) aio,
91 .aio_fildes = aio->fd,
92 .aio_buf = (unsigned long) aio->data,
93 .aio_nbytes = aio->len,
94 .aio_offset = aio->offset,
97 .aio_reserved3 = 0 });
101 iocb.aio_lio_opcode = IOCB_CMD_PREAD;
104 iocb.aio_lio_opcode = IOCB_CMD_PWRITE;
107 iocb.aio_lio_opcode = IOCB_CMD_PREAD;
108 iocb.aio_buf = (unsigned long) &c;
109 iocb.aio_nbytes = sizeof(c);
112 printk("Bogus op in do_aio - %d\n", aio->type);
117 err = io_submit(ctx, 1, &iocbp);
127 static aio_context_t ctx = 0;
129 static int aio_thread(void *arg)
131 struct aio_thread_reply reply;
132 struct aio_context *aio;
133 struct io_event event;
136 signal(SIGWINCH, SIG_IGN);
139 n = io_getevents(ctx, 1, 1, &event, NULL);
143 printk("aio_thread - io_getevents failed, "
144 "errno = %d\n", errno);
147 /* This is safe as we've just a pointer here. */
148 aio = (struct aio_context *) (long) event.data;
149 if(update_aio(aio, event.res)){
154 reply = ((struct aio_thread_reply)
157 err = os_write_file(aio->reply_fd, &reply,
159 if(err != sizeof(reply))
160 printk("aio_thread - write failed, "
161 "fd = %d, err = %d\n", aio->reply_fd,
170 static int do_not_aio(struct aio_context *aio)
177 err = os_seek_file(aio->fd, aio->offset);
181 err = os_read_file(aio->fd, aio->data, aio->len);
184 err = os_seek_file(aio->fd, aio->offset);
188 err = os_write_file(aio->fd, aio->data, aio->len);
191 err = os_seek_file(aio->fd, aio->offset);
195 err = os_read_file(aio->fd, &c, sizeof(c));
198 printk("do_not_aio - bad request type : %d\n", aio->type);
207 static int not_aio_thread(void *arg)
209 struct aio_context *aio;
210 struct aio_thread_reply reply;
213 signal(SIGWINCH, SIG_IGN);
215 err = os_read_file(aio_req_fd_r, &aio, sizeof(aio));
216 if(err != sizeof(aio)){
218 printk("not_aio_thread - read failed, "
219 "fd = %d, err = %d\n", aio_req_fd_r,
222 printk("not_aio_thread - short read, fd = %d, "
223 "length = %d\n", aio_req_fd_r, err);
228 err = do_not_aio(aio);
230 if(update_aio(aio, err))
233 reply = ((struct aio_thread_reply) { .data = aio,
235 err = os_write_file(aio->reply_fd, &reply, sizeof(reply));
236 if(err != sizeof(reply))
237 printk("not_aio_thread - write failed, fd = %d, "
238 "err = %d\n", aio_req_fd_r, -err);
242 static int submit_aio_24(struct aio_context *aio)
246 err = os_write_file(aio_req_fd_w, &aio, sizeof(aio));
247 if(err == sizeof(aio))
253 static int aio_pid = -1;
254 static int (*submit_proc)(struct aio_context *aio);
256 static int init_aio_24(void)
261 err = os_pipe(fds, 1, 1);
265 aio_req_fd_w = fds[0];
266 aio_req_fd_r = fds[1];
267 err = run_helper_thread(not_aio_thread, NULL,
268 CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
276 os_close_file(fds[0]);
277 os_close_file(fds[1]);
282 printk("/usr/include/linux/aio_abi.h not present during build\n");
284 printk("2.6 host AIO support not used - falling back to I/O "
287 submit_proc = submit_aio_24;
293 #define DEFAULT_24_AIO 0
294 static int submit_aio_26(struct aio_context *aio)
296 struct aio_thread_reply reply;
299 err = do_aio(ctx, aio);
301 reply = ((struct aio_thread_reply) { .data = aio,
303 err = os_write_file(aio->reply_fd, &reply, sizeof(reply));
304 if(err != sizeof(reply))
305 printk("submit_aio_26 - write failed, "
306 "fd = %d, err = %d\n", aio->reply_fd, -err);
313 static int init_aio_26(void)
318 if(io_setup(256, &ctx)){
320 printk("aio_thread failed to initialize context, err = %d\n",
325 err = run_helper_thread(aio_thread, NULL,
326 CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
332 printk("Using 2.6 host AIO\n");
334 submit_proc = submit_aio_26;
340 #define DEFAULT_24_AIO 1
341 static int submit_aio_26(struct aio_context *aio)
346 static int init_aio_26(void)
348 submit_proc = submit_aio_26;
353 static int aio_24 = DEFAULT_24_AIO;
355 static int __init set_aio_24(char *name, int *add)
361 __uml_setup("aio=2.4", set_aio_24,
363 " This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n"
364 " available. 2.4 AIO is a single thread that handles one request at a\n"
365 " time, synchronously. 2.6 AIO is a thread which uses the 2.6 AIO \n"
366 " interface to handle an arbitrary number of pending requests. 2.6 AIO \n"
367 " is not available in tt mode, on 2.4 hosts, or when UML is built with\n"
368 " /usr/include/linux/aio_abi.h not available. Many distributions don't\n"
369 " include aio_abi.h, so you will need to copy it from a kernel tree to\n"
370 " your /usr/include/linux in order to build an AIO-capable UML\n\n"
373 static int init_aio(void)
379 printk("Disabling 2.6 AIO in tt mode\n");
385 if(err && (errno == ENOSYS)){
386 printk("2.6 AIO not supported on the host - "
387 "reverting to 2.4 AIO\n");
394 return init_aio_24();
399 /* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
400 * needs to be called when the kernel is running because it calls run_helper,
401 * which needs get_free_page. exit_aio is a __uml_exitcall because the generic
402 * kernel does not run __exitcalls on shutdown, and can't because many of them
403 * break when called outside of module unloading.
405 __initcall(init_aio);
407 static void exit_aio(void)
410 os_kill_process(aio_pid, 1);
413 __uml_exitcall(exit_aio);
415 int submit_aio(struct aio_context *aio)
417 return (*submit_proc)(aio);