4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
12 #include <linux/device-mapper.h>
13 #include "dm-bio-list.h"
14 #include <linux/blkdev.h>
15 #include <linux/workqueue.h>
17 struct exception_table {
20 struct list_head *table;
24 * The snapshot code deals with largish chunks of the disk at a
25 * time. Typically 32k - 512k.
27 typedef sector_t chunk_t;
30 * An exception is used where an old chunk of data has been
31 * replaced by a new one.
32 * If chunk_t is 64 bits in size, the top 8 bits of new_chunk hold the number
33 * of chunks that follow contiguously. Remaining bits hold the number of the
34 * chunk within the device.
36 struct dm_snap_exception {
37 struct list_head hash_list;
44 * Funtions to manipulate consecutive chunks
46 # if defined(CONFIG_LBD) || (BITS_PER_LONG == 64)
47 # define DM_CHUNK_CONSECUTIVE_BITS 8
48 # define DM_CHUNK_NUMBER_BITS 56
50 static inline chunk_t dm_chunk_number(chunk_t chunk)
52 return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
55 static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
57 return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
60 static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
62 e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
64 BUG_ON(!dm_consecutive_chunk_count(e));
68 # define DM_CHUNK_CONSECUTIVE_BITS 0
70 static inline chunk_t dm_chunk_number(chunk_t chunk)
75 static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
80 static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
87 * Abstraction to handle the meta/layout of exception stores (the
90 struct exception_store {
93 * Destroys this object when you've finished with it.
95 void (*destroy) (struct exception_store *store);
98 * The target shouldn't read the COW device until this is
101 int (*read_metadata) (struct exception_store *store);
104 * Find somewhere to store the next exception.
106 int (*prepare_exception) (struct exception_store *store,
107 struct dm_snap_exception *e);
110 * Update the metadata with this exception.
112 void (*commit_exception) (struct exception_store *store,
113 struct dm_snap_exception *e,
114 void (*callback) (void *, int success),
115 void *callback_context);
118 * The snapshot is invalid, note this in the metadata.
120 void (*drop_snapshot) (struct exception_store *store);
123 * Return how full the snapshot is.
125 void (*fraction_full) (struct exception_store *store,
127 sector_t *denominator);
129 struct dm_snapshot *snap;
133 #define DM_TRACKED_CHUNK_HASH_SIZE 16
134 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
135 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
138 struct rw_semaphore lock;
139 struct dm_target *ti;
141 struct dm_dev *origin;
144 /* List of snapshots per Origin */
145 struct list_head list;
147 /* Size of data blocks saved - must be a power of 2 */
152 /* You can't use a snapshot if this is 0 (e.g. if full) */
155 /* Origin writes don't trigger exceptions until this is set */
158 /* Used for display of table */
161 mempool_t *pending_pool;
163 atomic_t pending_exceptions_count;
165 struct exception_table pending;
166 struct exception_table complete;
169 * pe_lock protects all pending_exception operations and access
170 * as well as the snapshot_bios list.
174 /* The on disk metadata handler */
175 struct exception_store store;
177 struct dm_kcopyd_client *kcopyd_client;
179 /* Queue of snapshot writes for ksnapd to flush */
180 struct bio_list queued_bios;
181 struct work_struct queued_bios_work;
183 /* Chunks with outstanding reads */
184 mempool_t *tracked_chunk_pool;
185 spinlock_t tracked_chunk_lock;
186 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
190 * Used by the exception stores to load exceptions hen
193 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new);
196 * Constructor and destructor for the default persistent
199 int dm_create_persistent(struct exception_store *store);
201 int dm_create_transient(struct exception_store *store);
204 * Return the number of sectors in the device.
206 static inline sector_t get_dev_size(struct block_device *bdev)
208 return bdev->bd_inode->i_size >> SECTOR_SHIFT;
211 static inline chunk_t sector_to_chunk(struct dm_snapshot *s, sector_t sector)
213 return (sector & ~s->chunk_mask) >> s->chunk_shift;
216 static inline sector_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk)
218 return chunk << s->chunk_shift;
221 static inline int bdev_equal(struct block_device *lhs, struct block_device *rhs)
224 * There is only ever one instance of a particular block
225 * device so we can compare pointers safely.