2 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
11 #include <asm/unistd.h>
14 #include "kern_constants.h"
18 struct aio_thread_req {
21 unsigned long long offset;
24 struct aio_context *aio;
27 #if defined(HAVE_AIO_ABI)
28 #include <linux/aio_abi.h>
31 * If we have the headers, we are going to build with AIO enabled.
32 * If we don't have aio in libc, we define the necessary stubs here.
35 #if !defined(HAVE_AIO_LIBC)
37 static long io_setup(int n, aio_context_t *ctxp)
39 return syscall(__NR_io_setup, n, ctxp);
42 static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp)
44 return syscall(__NR_io_submit, ctx, nr, iocbpp);
47 static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
48 struct io_event *events, struct timespec *timeout)
50 return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout);
56 * The AIO_MMAP cases force the mmapped page into memory here
57 * rather than in whatever place first touches the data. I used
58 * to do this by touching the page, but that's delicate because
59 * gcc is prone to optimizing that away. So, what's done here
60 * is we read from the descriptor from which the page was
61 * mapped. The caller is required to pass an offset which is
62 * inside the page that was mapped. Thus, when the read
63 * returns, we know that the page is in the page cache, and
64 * that it now backs the mmapped area.
67 static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf,
68 int len, unsigned long long offset, struct aio_context *aio)
70 struct iocb *iocbp = & ((struct iocb) {
71 .aio_data = (unsigned long) aio,
73 .aio_buf = (unsigned long) buf,
81 iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
84 iocbp->aio_lio_opcode = IOCB_CMD_PWRITE;
87 iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
88 iocbp->aio_buf = (unsigned long) &c;
89 iocbp->aio_nbytes = sizeof(c);
92 printk(UM_KERN_ERR "Bogus op in do_aio - %d\n", type);
96 return (io_submit(ctx, 1, &iocbp) > 0) ? 0 : -errno;
99 /* Initialized in an initcall and unchanged thereafter */
100 static aio_context_t ctx = 0;
102 static int aio_thread(void *arg)
104 struct aio_thread_reply reply;
105 struct io_event event;
106 int err, n, reply_fd;
108 signal(SIGWINCH, SIG_IGN);
111 n = io_getevents(ctx, 1, 1, &event, NULL);
115 printk(UM_KERN_ERR "aio_thread - io_getevents failed, "
116 "errno = %d\n", errno);
119 reply = ((struct aio_thread_reply)
120 { .data = (void *) (long) event.data,
122 reply_fd = ((struct aio_context *) reply.data)->reply_fd;
123 err = write(reply_fd, &reply, sizeof(reply));
124 if (err != sizeof(reply))
125 printk(UM_KERN_ERR "aio_thread - write failed, "
126 "fd = %d, err = %d\n", reply_fd, errno);
134 static int do_not_aio(struct aio_thread_req *req)
137 unsigned long long actual;
140 actual = lseek64(req->io_fd, req->offset, SEEK_SET);
141 if (actual != req->offset)
146 n = read(req->io_fd, req->buf, req->len);
149 n = write(req->io_fd, req->buf, req->len);
152 n = read(req->io_fd, &c, sizeof(c));
155 printk(UM_KERN_ERR "do_not_aio - bad request type : %d\n",
165 /* These are initialized in initcalls and not changed */
166 static int aio_req_fd_r = -1;
167 static int aio_req_fd_w = -1;
168 static int aio_pid = -1;
169 static unsigned long aio_stack;
171 static int not_aio_thread(void *arg)
173 struct aio_thread_req req;
174 struct aio_thread_reply reply;
177 signal(SIGWINCH, SIG_IGN);
179 err = read(aio_req_fd_r, &req, sizeof(req));
180 if (err != sizeof(req)) {
182 printk(UM_KERN_ERR "not_aio_thread - "
183 "read failed, fd = %d, err = %d\n",
187 printk(UM_KERN_ERR "not_aio_thread - short "
188 "read, fd = %d, length = %d\n",
193 err = do_not_aio(&req);
194 reply = ((struct aio_thread_reply) { .data = req.aio,
196 err = write(req.aio->reply_fd, &reply, sizeof(reply));
197 if (err != sizeof(reply))
198 printk(UM_KERN_ERR "not_aio_thread - write failed, "
199 "fd = %d, err = %d\n", req.aio->reply_fd, errno);
205 static int init_aio_24(void)
209 err = os_pipe(fds, 1, 1);
213 aio_req_fd_w = fds[0];
214 aio_req_fd_r = fds[1];
216 err = os_set_fd_block(aio_req_fd_w, 0);
220 err = run_helper_thread(not_aio_thread, NULL,
221 CLONE_FILES | CLONE_VM | SIGCHLD, &aio_stack);
235 printk(UM_KERN_INFO "/usr/include/linux/aio_abi.h not present during "
238 printk(UM_KERN_INFO "2.6 host AIO support not used - falling back to "
244 #define DEFAULT_24_AIO 0
245 static int init_aio_26(void)
249 if (io_setup(256, &ctx)) {
251 printk(UM_KERN_ERR "aio_thread failed to initialize context, "
252 "err = %d\n", errno);
256 err = run_helper_thread(aio_thread, NULL,
257 CLONE_FILES | CLONE_VM | SIGCHLD, &aio_stack);
263 printk(UM_KERN_INFO "Using 2.6 host AIO\n");
267 static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
268 unsigned long long offset, struct aio_context *aio)
270 struct aio_thread_reply reply;
273 err = do_aio(ctx, type, io_fd, buf, len, offset, aio);
275 reply = ((struct aio_thread_reply) { .data = aio,
277 err = write(aio->reply_fd, &reply, sizeof(reply));
278 if (err != sizeof(reply)) {
280 printk(UM_KERN_ERR "submit_aio_26 - write failed, "
281 "fd = %d, err = %d\n", aio->reply_fd, -err);
290 #define DEFAULT_24_AIO 1
291 static int init_aio_26(void)
296 static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
297 unsigned long long offset, struct aio_context *aio)
303 /* Initialized in an initcall and unchanged thereafter */
304 static int aio_24 = DEFAULT_24_AIO;
306 static int __init set_aio_24(char *name, int *add)
312 __uml_setup("aio=2.4", set_aio_24,
314 " This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n"
315 " available. 2.4 AIO is a single thread that handles one request at a\n"
316 " time, synchronously. 2.6 AIO is a thread which uses the 2.6 AIO \n"
317 " interface to handle an arbitrary number of pending requests. 2.6 AIO \n"
318 " is not available in tt mode, on 2.4 hosts, or when UML is built with\n"
319 " /usr/include/linux/aio_abi.h not available. Many distributions don't\n"
320 " include aio_abi.h, so you will need to copy it from a kernel tree to\n"
321 " your /usr/include/linux in order to build an AIO-capable UML\n\n"
324 static int init_aio(void)
330 if (err && (errno == ENOSYS)) {
331 printk(UM_KERN_INFO "2.6 AIO not supported on the "
332 "host - reverting to 2.4 AIO\n");
339 return init_aio_24();
345 * The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
346 * needs to be called when the kernel is running because it calls run_helper,
347 * which needs get_free_page. exit_aio is a __uml_exitcall because the generic
348 * kernel does not run __exitcalls on shutdown, and can't because many of them
349 * break when called outside of module unloading.
351 __initcall(init_aio);
353 static void exit_aio(void)
356 os_kill_process(aio_pid, 1);
357 free_stack(aio_stack, 0);
361 __uml_exitcall(exit_aio);
363 static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len,
364 unsigned long long offset, struct aio_context *aio)
366 struct aio_thread_req req = { .type = type,
375 err = write(aio_req_fd_w, &req, sizeof(req));
376 if (err == sizeof(req))
383 int submit_aio(enum aio_type type, int io_fd, char *buf, int len,
384 unsigned long long offset, int reply_fd,
385 struct aio_context *aio)
387 aio->reply_fd = reply_fd;
389 return submit_aio_24(type, io_fd, buf, len, offset, aio);
391 return submit_aio_26(type, io_fd, buf, len, offset, aio);