up_read(¤t->mm->mmap_sem);
if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) {
- struct page *page = ZERO_PAGE(dio->curr_user_address);
+ struct page *page = ZERO_PAGE(0);
/*
* A memory fault, but the filesystem has some outstanding
* mapped blocks. We need to use those blocks up to avoid
/*
* Asynchronous IO callback.
*/
-static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
+static void dio_bio_end_aio(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long remaining;
unsigned long flags;
- if (bio->bi_size)
- return 1;
-
/* cleanup the bio */
dio_bio_complete(dio, bio);
aio_complete(dio->iocb, ret, 0);
kfree(dio);
}
-
- return 0;
}
/*
* During I/O bi_private points at the dio. After I/O, bi_private is used to
* implement a singly-linked list of completed BIOs, at dio->bio_list.
*/
-static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
+static void dio_bio_end_io(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long flags;
- if (bio->bi_size)
- return 1;
-
spin_lock_irqsave(&dio->bio_lock, flags);
bio->bi_private = dio->bio_list;
dio->bio_list = bio;
if (--dio->refcount == 1 && dio->waiter)
wake_up_process(dio->waiter);
spin_unlock_irqrestore(&dio->bio_lock, flags);
- return 0;
}
static int
* Wait on and process all in-flight BIOs. This must only be called once
* all bios have been issued so that the refcount can only decrease.
* This just waits for all bios to make it through dio_bio_complete. IO
- * errors are propogated through dio->io_error and should be propogated via
+ * errors are propagated through dio->io_error and should be propagated via
* dio_complete().
*/
static void dio_await_completion(struct dio *dio)
this_chunk_bytes = this_chunk_blocks << dio->blkbits;
- page = ZERO_PAGE(dio->curr_user_address);
+ page = ZERO_PAGE(0);
if (submit_page_section(dio, page, 0, this_chunk_bytes,
dio->next_block_for_io))
return;
do_holes:
/* Handle holes */
if (!buffer_mapped(map_bh)) {
- char *kaddr;
loff_t i_size_aligned;
/* AKPM: eargh, -ENOTBLK is a hack */
page_cache_release(page);
goto out;
}
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + (block_in_page << blkbits),
- 0, 1 << blkbits);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, block_in_page << blkbits,
+ 1 << blkbits, KM_USER0);
dio->block_in_file++;
block_in_page++;
goto next_block;
ssize_t ret2;
size_t bytes;
- dio->bio = NULL;
dio->inode = inode;
dio->rw = rw;
dio->blkbits = blkbits;
dio->blkfactor = inode->i_blkbits - blkbits;
- dio->start_zero_done = 0;
- dio->size = 0;
dio->block_in_file = offset >> blkbits;
- dio->blocks_available = 0;
- dio->cur_page = NULL;
- dio->boundary = 0;
- dio->reap_counter = 0;
dio->get_block = get_block;
dio->end_io = end_io;
- dio->map_bh.b_private = NULL;
dio->final_block_in_bio = -1;
dio->next_block_for_io = -1;
- dio->page_errors = 0;
- dio->io_error = 0;
- dio->result = 0;
dio->iocb = iocb;
dio->i_size = i_size_read(inode);
spin_lock_init(&dio->bio_lock);
dio->refcount = 1;
- dio->bio_list = NULL;
- dio->waiter = NULL;
/*
* In case of non-aligned buffers, we may need 2 more
*/
if (unlikely(dio->blkfactor))
dio->pages_in_io = 2;
- else
- dio->pages_in_io = 0;
for (seg = 0; seg < nr_segs; seg++) {
user_addr = (unsigned long)iov[seg].iov_base;
spin_lock_irqsave(&dio->bio_lock, flags);
ret2 = --dio->refcount;
spin_unlock_irqrestore(&dio->bio_lock, flags);
- BUG_ON(!dio->is_async && ret2 != 0);
+
if (ret2 == 0) {
ret = dio_complete(dio, offset, ret);
kfree(dio);
}
}
- dio = kmalloc(sizeof(*dio), GFP_KERNEL);
+ dio = kzalloc(sizeof(*dio), GFP_KERNEL);
retval = -ENOMEM;
if (!dio)
goto out;