2 * Copyright (c) International Business Machines Corp., 2006
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Author: Artem Bityutskiy (Битюцкий Артём)
22 * This file includes implementation of UBI character device operations.
24 * There are two kinds of character devices in UBI: UBI character devices and
25 * UBI volume character devices. UBI character devices allow users to
26 * manipulate whole volumes: create, remove, and re-size them. Volume character
27 * devices provide volume I/O capabilities.
29 * Major and minor numbers are assigned dynamically to both UBI and volume
32 * Well, there is the third kind of character devices - the UBI control
33 * character device, which allows to manipulate by UBI devices - create and
34 * delete them. In other words, it is used for attaching and detaching MTD
38 #include <linux/module.h>
39 #include <linux/stat.h>
40 #include <linux/ioctl.h>
41 #include <linux/capability.h>
42 #include <linux/uaccess.h>
43 #include <linux/compat.h>
44 #include <linux/math64.h>
45 #include <mtd/ubi-user.h>
49 * get_exclusive - get exclusive access to an UBI volume.
50 * @desc: volume descriptor
52 * This function changes UBI volume open mode to "exclusive". Returns previous
53 * mode value (positive integer) in case of success and a negative error code
56 static int get_exclusive(struct ubi_volume_desc *desc)
59 struct ubi_volume *vol = desc->vol;
61 spin_lock(&vol->ubi->volumes_lock);
62 users = vol->readers + vol->writers + vol->exclusive;
63 ubi_assert(users > 0);
65 dbg_err("%d users for volume %d", users, vol->vol_id);
68 vol->readers = vol->writers = 0;
71 desc->mode = UBI_EXCLUSIVE;
73 spin_unlock(&vol->ubi->volumes_lock);
79 * revoke_exclusive - revoke exclusive mode.
80 * @desc: volume descriptor
81 * @mode: new mode to switch to
83 static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
85 struct ubi_volume *vol = desc->vol;
87 spin_lock(&vol->ubi->volumes_lock);
88 ubi_assert(vol->readers == 0 && vol->writers == 0);
89 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
91 if (mode == UBI_READONLY)
93 else if (mode == UBI_READWRITE)
97 spin_unlock(&vol->ubi->volumes_lock);
102 static int vol_cdev_open(struct inode *inode, struct file *file)
104 struct ubi_volume_desc *desc;
105 int vol_id = iminor(inode) - 1, mode, ubi_num;
107 ubi_num = ubi_major2num(imajor(inode));
111 if (file->f_mode & FMODE_WRITE)
112 mode = UBI_READWRITE;
116 dbg_gen("open volume %d, mode %d", vol_id, mode);
118 desc = ubi_open_volume(ubi_num, vol_id, mode);
120 return PTR_ERR(desc);
122 file->private_data = desc;
126 static int vol_cdev_release(struct inode *inode, struct file *file)
128 struct ubi_volume_desc *desc = file->private_data;
129 struct ubi_volume *vol = desc->vol;
131 dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode);
134 ubi_warn("update of volume %d not finished, volume is damaged",
136 ubi_assert(!vol->changing_leb);
139 } else if (vol->changing_leb) {
140 dbg_gen("only %lld of %lld bytes received for atomic LEB change"
141 " for volume %d:%d, cancel", vol->upd_received,
142 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
143 vol->changing_leb = 0;
147 ubi_close_volume(desc);
151 static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
153 struct ubi_volume_desc *desc = file->private_data;
154 struct ubi_volume *vol = desc->vol;
158 /* Update is in progress, seeking is prohibited */
164 case 0: /* SEEK_SET */
167 case 1: /* SEEK_CUR */
168 new_offset = file->f_pos + offset;
170 case 2: /* SEEK_END */
171 new_offset = vol->used_bytes + offset;
177 if (new_offset < 0 || new_offset > vol->used_bytes) {
178 dbg_err("bad seek %lld", new_offset);
182 dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld",
183 vol->vol_id, offset, origin, new_offset);
185 file->f_pos = new_offset;
189 static int vol_cdev_fsync(struct file *file, struct dentry *dentry,
192 struct ubi_volume_desc *desc = file->private_data;
193 struct ubi_device *ubi = desc->vol->ubi;
195 return ubi_sync(ubi->ubi_num);
199 static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
202 struct ubi_volume_desc *desc = file->private_data;
203 struct ubi_volume *vol = desc->vol;
204 struct ubi_device *ubi = vol->ubi;
205 int err, lnum, off, len, tbuf_size;
206 size_t count_save = count;
209 dbg_gen("read %zd bytes from offset %lld of volume %d",
210 count, *offp, vol->vol_id);
216 if (vol->upd_marker) {
217 dbg_err("damaged volume, update marker is set");
220 if (*offp == vol->used_bytes || count == 0)
224 dbg_gen("read from corrupted volume %d", vol->vol_id);
226 if (*offp + count > vol->used_bytes)
227 count_save = count = vol->used_bytes - *offp;
229 tbuf_size = vol->usable_leb_size;
230 if (count < tbuf_size)
231 tbuf_size = ALIGN(count, ubi->min_io_size);
232 tbuf = vmalloc(tbuf_size);
236 len = count > tbuf_size ? tbuf_size : count;
237 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
242 if (off + len >= vol->usable_leb_size)
243 len = vol->usable_leb_size - off;
245 err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
250 if (off == vol->usable_leb_size) {
252 off -= vol->usable_leb_size;
258 err = copy_to_user(buf, tbuf, len);
265 len = count > tbuf_size ? tbuf_size : count;
269 return err ? err : count_save - count;
273 * This function allows to directly write to dynamic UBI volumes, without
274 * issuing the volume update operation.
276 static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
277 size_t count, loff_t *offp)
279 struct ubi_volume_desc *desc = file->private_data;
280 struct ubi_volume *vol = desc->vol;
281 struct ubi_device *ubi = vol->ubi;
282 int lnum, off, len, tbuf_size, err = 0;
283 size_t count_save = count;
286 if (!vol->direct_writes)
289 dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
290 count, *offp, vol->vol_id);
292 if (vol->vol_type == UBI_STATIC_VOLUME)
295 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
296 if (off & (ubi->min_io_size - 1)) {
297 dbg_err("unaligned position");
301 if (*offp + count > vol->used_bytes)
302 count_save = count = vol->used_bytes - *offp;
304 /* We can write only in fractions of the minimum I/O unit */
305 if (count & (ubi->min_io_size - 1)) {
306 dbg_err("unaligned write length");
310 tbuf_size = vol->usable_leb_size;
311 if (count < tbuf_size)
312 tbuf_size = ALIGN(count, ubi->min_io_size);
313 tbuf = vmalloc(tbuf_size);
317 len = count > tbuf_size ? tbuf_size : count;
322 if (off + len >= vol->usable_leb_size)
323 len = vol->usable_leb_size - off;
325 err = copy_from_user(tbuf, buf, len);
331 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
337 if (off == vol->usable_leb_size) {
339 off -= vol->usable_leb_size;
345 len = count > tbuf_size ? tbuf_size : count;
349 return err ? err : count_save - count;
352 static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
353 size_t count, loff_t *offp)
356 struct ubi_volume_desc *desc = file->private_data;
357 struct ubi_volume *vol = desc->vol;
358 struct ubi_device *ubi = vol->ubi;
360 if (!vol->updating && !vol->changing_leb)
361 return vol_cdev_direct_write(file, buf, count, offp);
364 err = ubi_more_update_data(ubi, vol, buf, count);
366 err = ubi_more_leb_change_data(ubi, vol, buf, count);
369 ubi_err("cannot accept more %zd bytes of data, error %d",
376 * The operation is finished, @err contains number of actually
381 if (vol->changing_leb) {
382 revoke_exclusive(desc, UBI_READWRITE);
386 err = ubi_check_volume(ubi, vol->vol_id);
391 ubi_warn("volume %d on UBI device %d is corrupted",
392 vol->vol_id, ubi->ubi_num);
396 ubi_gluebi_updated(vol);
397 revoke_exclusive(desc, UBI_READWRITE);
403 static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
407 struct ubi_volume_desc *desc = file->private_data;
408 struct ubi_volume *vol = desc->vol;
409 struct ubi_device *ubi = vol->ubi;
410 void __user *argp = (void __user *)arg;
413 /* Volume update command */
416 int64_t bytes, rsvd_bytes;
418 if (!capable(CAP_SYS_RESOURCE)) {
423 err = copy_from_user(&bytes, argp, sizeof(int64_t));
429 if (desc->mode == UBI_READONLY) {
434 rsvd_bytes = (long long)vol->reserved_pebs *
435 ubi->leb_size-vol->data_pad;
436 if (bytes < 0 || bytes > rsvd_bytes) {
441 err = get_exclusive(desc);
445 err = ubi_start_update(ubi, vol, bytes);
447 revoke_exclusive(desc, UBI_READWRITE);
451 /* Atomic logical eraseblock change command */
454 struct ubi_leb_change_req req;
456 err = copy_from_user(&req, argp,
457 sizeof(struct ubi_leb_change_req));
463 if (desc->mode == UBI_READONLY ||
464 vol->vol_type == UBI_STATIC_VOLUME) {
469 /* Validate the request */
471 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
472 req.bytes < 0 || req.lnum >= vol->usable_leb_size)
474 if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
475 req.dtype != UBI_UNKNOWN)
478 err = get_exclusive(desc);
482 err = ubi_start_leb_change(ubi, vol, &req);
484 revoke_exclusive(desc, UBI_READWRITE);
488 /* Logical eraseblock erasure command */
493 err = get_user(lnum, (__user int32_t *)argp);
499 if (desc->mode == UBI_READONLY ||
500 vol->vol_type == UBI_STATIC_VOLUME) {
505 if (lnum < 0 || lnum >= vol->reserved_pebs) {
510 dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
511 err = ubi_eba_unmap_leb(ubi, vol, lnum);
515 err = ubi_wl_flush(ubi);
519 /* Logical eraseblock map command */
522 struct ubi_map_req req;
524 err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
529 err = ubi_leb_map(desc, req.lnum, req.dtype);
533 /* Logical eraseblock un-map command */
538 err = get_user(lnum, (__user int32_t *)argp);
543 err = ubi_leb_unmap(desc, lnum);
547 /* Check if logical eraseblock is mapped command */
552 err = get_user(lnum, (__user int32_t *)argp);
557 err = ubi_is_mapped(desc, lnum);
561 /* Set volume property command*/
564 struct ubi_set_prop_req req;
566 err = copy_from_user(&req, argp,
567 sizeof(struct ubi_set_prop_req));
572 switch (req.property) {
573 case UBI_PROP_DIRECT_WRITE:
574 mutex_lock(&ubi->volumes_mutex);
575 desc->vol->direct_writes = !!req.value;
576 mutex_unlock(&ubi->volumes_mutex);
593 * verify_mkvol_req - verify volume creation request.
594 * @ubi: UBI device description object
595 * @req: the request to check
597 * This function zero if the request is correct, and %-EINVAL if not.
599 static int verify_mkvol_req(const struct ubi_device *ubi,
600 const struct ubi_mkvol_req *req)
602 int n, err = -EINVAL;
604 if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
608 if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
609 req->vol_id != UBI_VOL_NUM_AUTO)
612 if (req->alignment == 0)
618 if (req->vol_type != UBI_DYNAMIC_VOLUME &&
619 req->vol_type != UBI_STATIC_VOLUME)
622 if (req->alignment > ubi->leb_size)
625 n = req->alignment & (ubi->min_io_size - 1);
626 if (req->alignment != 1 && n)
629 if (req->name_len > UBI_VOL_NAME_MAX) {
634 n = strnlen(req->name, req->name_len + 1);
635 if (n != req->name_len)
641 dbg_err("bad volume creation request");
642 ubi_dbg_dump_mkvol_req(req);
647 * verify_rsvol_req - verify volume re-size request.
648 * @ubi: UBI device description object
649 * @req: the request to check
651 * This function returns zero if the request is correct, and %-EINVAL if not.
653 static int verify_rsvol_req(const struct ubi_device *ubi,
654 const struct ubi_rsvol_req *req)
659 if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
666 * rename_volumes - rename UBI volumes.
667 * @ubi: UBI device description object
668 * @req: volumes re-name request
670 * This is a helper function for the volume re-name IOCTL which validates the
671 * the request, opens the volume and calls corresponding volumes management
672 * function. Returns zero in case of success and a negative error code in case
675 static int rename_volumes(struct ubi_device *ubi,
676 struct ubi_rnvol_req *req)
679 struct list_head rename_list;
680 struct ubi_rename_entry *re, *re1;
682 if (req->count < 0 || req->count > UBI_MAX_RNVOL)
688 /* Validate volume IDs and names in the request */
689 for (i = 0; i < req->count; i++) {
690 if (req->ents[i].vol_id < 0 ||
691 req->ents[i].vol_id >= ubi->vtbl_slots)
693 if (req->ents[i].name_len < 0)
695 if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
696 return -ENAMETOOLONG;
697 req->ents[i].name[req->ents[i].name_len] = '\0';
698 n = strlen(req->ents[i].name);
699 if (n != req->ents[i].name_len)
703 /* Make sure volume IDs and names are unique */
704 for (i = 0; i < req->count - 1; i++) {
705 for (n = i + 1; n < req->count; n++) {
706 if (req->ents[i].vol_id == req->ents[n].vol_id) {
707 dbg_err("duplicated volume id %d",
708 req->ents[i].vol_id);
711 if (!strcmp(req->ents[i].name, req->ents[n].name)) {
712 dbg_err("duplicated volume name \"%s\"",
719 /* Create the re-name list */
720 INIT_LIST_HEAD(&rename_list);
721 for (i = 0; i < req->count; i++) {
722 int vol_id = req->ents[i].vol_id;
723 int name_len = req->ents[i].name_len;
724 const char *name = req->ents[i].name;
726 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
732 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
733 if (IS_ERR(re->desc)) {
734 err = PTR_ERR(re->desc);
735 dbg_err("cannot open volume %d, error %d", vol_id, err);
740 /* Skip this re-naming if the name does not really change */
741 if (re->desc->vol->name_len == name_len &&
742 !memcmp(re->desc->vol->name, name, name_len)) {
743 ubi_close_volume(re->desc);
748 re->new_name_len = name_len;
749 memcpy(re->new_name, name, name_len);
750 list_add_tail(&re->list, &rename_list);
751 dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
752 vol_id, re->desc->vol->name, name);
755 if (list_empty(&rename_list))
758 /* Find out the volumes which have to be removed */
759 list_for_each_entry(re, &rename_list, list) {
760 struct ubi_volume_desc *desc;
761 int no_remove_needed = 0;
764 * Volume @re->vol_id is going to be re-named to
765 * @re->new_name, while its current name is @name. If a volume
766 * with name @re->new_name currently exists, it has to be
767 * removed, unless it is also re-named in the request (@req).
769 list_for_each_entry(re1, &rename_list, list) {
770 if (re->new_name_len == re1->desc->vol->name_len &&
771 !memcmp(re->new_name, re1->desc->vol->name,
772 re1->desc->vol->name_len)) {
773 no_remove_needed = 1;
778 if (no_remove_needed)
782 * It seems we need to remove volume with name @re->new_name,
785 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
790 /* Re-naming into a non-existing volume name */
793 /* The volume exists but busy, or an error occurred */
794 dbg_err("cannot open volume \"%s\", error %d",
799 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
802 ubi_close_volume(desc);
808 list_add(&re->list, &rename_list);
809 dbg_msg("will remove volume %d, name \"%s\"",
810 re->desc->vol->vol_id, re->desc->vol->name);
813 mutex_lock(&ubi->volumes_mutex);
814 err = ubi_rename_volumes(ubi, &rename_list);
815 mutex_unlock(&ubi->volumes_mutex);
818 list_for_each_entry_safe(re, re1, &rename_list, list) {
819 ubi_close_volume(re->desc);
826 static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
830 struct ubi_device *ubi;
831 struct ubi_volume_desc *desc;
832 void __user *argp = (void __user *)arg;
834 if (!capable(CAP_SYS_RESOURCE))
837 ubi = ubi_get_by_major(imajor(file->f_mapping->host));
842 /* Create volume command */
845 struct ubi_mkvol_req req;
847 dbg_gen("create volume");
848 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
854 req.name[req.name_len] = '\0';
855 err = verify_mkvol_req(ubi, &req);
859 mutex_lock(&ubi->volumes_mutex);
860 err = ubi_create_volume(ubi, &req);
861 mutex_unlock(&ubi->volumes_mutex);
865 err = put_user(req.vol_id, (__user int32_t *)argp);
872 /* Remove volume command */
877 dbg_gen("remove volume");
878 err = get_user(vol_id, (__user int32_t *)argp);
884 desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
890 mutex_lock(&ubi->volumes_mutex);
891 err = ubi_remove_volume(desc, 0);
892 mutex_unlock(&ubi->volumes_mutex);
895 * The volume is deleted (unless an error occurred), and the
896 * 'struct ubi_volume' object will be freed when
897 * 'ubi_close_volume()' will call 'put_device()'.
899 ubi_close_volume(desc);
903 /* Re-size volume command */
907 struct ubi_rsvol_req req;
909 dbg_gen("re-size volume");
910 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
916 err = verify_rsvol_req(ubi, &req);
920 desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
926 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
927 desc->vol->usable_leb_size);
929 mutex_lock(&ubi->volumes_mutex);
930 err = ubi_resize_volume(desc, pebs);
931 mutex_unlock(&ubi->volumes_mutex);
932 ubi_close_volume(desc);
936 /* Re-name volumes command */
939 struct ubi_rnvol_req *req;
941 dbg_msg("re-name volumes");
942 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
948 err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
955 mutex_lock(&ubi->mult_mutex);
956 err = rename_volumes(ubi, req);
957 mutex_unlock(&ubi->mult_mutex);
971 static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
975 void __user *argp = (void __user *)arg;
977 if (!capable(CAP_SYS_RESOURCE))
981 /* Attach an MTD device command */
984 struct ubi_attach_req req;
985 struct mtd_info *mtd;
987 dbg_gen("attach MTD device");
988 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
994 if (req.mtd_num < 0 ||
995 (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
1000 mtd = get_mtd_device(NULL, req.mtd_num);
1007 * Note, further request verification is done by
1008 * 'ubi_attach_mtd_dev()'.
1010 mutex_lock(&ubi_devices_mutex);
1011 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
1012 mutex_unlock(&ubi_devices_mutex);
1014 put_mtd_device(mtd);
1016 /* @err contains UBI device number */
1017 err = put_user(err, (__user int32_t *)argp);
1022 /* Detach an MTD device command */
1027 dbg_gen("dettach MTD device");
1028 err = get_user(ubi_num, (__user int32_t *)argp);
1034 mutex_lock(&ubi_devices_mutex);
1035 err = ubi_detach_mtd_dev(ubi_num, 0);
1036 mutex_unlock(&ubi_devices_mutex);
1048 #ifdef CONFIG_COMPAT
1049 static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
1052 unsigned long translated_arg = (unsigned long)compat_ptr(arg);
1054 return vol_cdev_ioctl(file, cmd, translated_arg);
1057 static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
1060 unsigned long translated_arg = (unsigned long)compat_ptr(arg);
1062 return ubi_cdev_ioctl(file, cmd, translated_arg);
1065 static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
1068 unsigned long translated_arg = (unsigned long)compat_ptr(arg);
1070 return ctrl_cdev_ioctl(file, cmd, translated_arg);
1073 #define vol_cdev_compat_ioctl NULL
1074 #define ubi_cdev_compat_ioctl NULL
1075 #define ctrl_cdev_compat_ioctl NULL
1078 /* UBI volume character device operations */
1079 const struct file_operations ubi_vol_cdev_operations = {
1080 .owner = THIS_MODULE,
1081 .open = vol_cdev_open,
1082 .release = vol_cdev_release,
1083 .llseek = vol_cdev_llseek,
1084 .read = vol_cdev_read,
1085 .write = vol_cdev_write,
1086 .fsync = vol_cdev_fsync,
1087 .unlocked_ioctl = vol_cdev_ioctl,
1088 .compat_ioctl = vol_cdev_compat_ioctl,
1091 /* UBI character device operations */
1092 const struct file_operations ubi_cdev_operations = {
1093 .owner = THIS_MODULE,
1094 .llseek = no_llseek,
1095 .unlocked_ioctl = ubi_cdev_ioctl,
1096 .compat_ioctl = ubi_cdev_compat_ioctl,
1099 /* UBI control character device operations */
1100 const struct file_operations ubi_ctrl_cdev_operations = {
1101 .owner = THIS_MODULE,
1102 .unlocked_ioctl = ctrl_cdev_ioctl,
1103 .compat_ioctl = ctrl_cdev_compat_ioctl,