3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 * Pavel Emelianov <xemul@openvz.org>
24 #include <linux/slab.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
41 #include <linux/ipc_namespace.h>
43 #include <asm/uaccess.h>
47 struct shm_file_data {
49 struct ipc_namespace *ns;
51 const struct vm_operations_struct *vm_ops;
54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
56 static const struct file_operations shm_file_operations;
57 static struct vm_operations_struct shm_vm_ops;
59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
61 #define shm_unlock(shp) \
62 ipc_unlock(&(shp)->shm_perm)
64 static int newseg(struct ipc_namespace *, struct ipc_params *);
65 static void shm_open(struct vm_area_struct *vma);
66 static void shm_close(struct vm_area_struct *vma);
67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
72 void shm_init_ns(struct ipc_namespace *ns)
74 ns->shm_ctlmax = SHMMAX;
75 ns->shm_ctlall = SHMALL;
76 ns->shm_ctlmni = SHMMNI;
78 ipc_init_ids(&ns->ids[IPC_SHM_IDS]);
82 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
83 * Only shm_ids.rw_mutex remains locked on exit.
85 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
87 struct shmid_kernel *shp;
88 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
91 shp->shm_perm.mode |= SHM_DEST;
92 /* Do not find it any more */
93 shp->shm_perm.key = IPC_PRIVATE;
100 void shm_exit_ns(struct ipc_namespace *ns)
102 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
106 void __init shm_init (void)
108 shm_init_ns(&init_ipc_ns);
109 ipc_init_proc_interface("sysvipc/shm",
110 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
111 IPC_SHM_IDS, sysvipc_shm_proc_show);
115 * shm_lock_(check_) routines are called in the paths where the rw_mutex
116 * is not necessarily held.
118 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
120 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
123 return (struct shmid_kernel *)ipcp;
125 return container_of(ipcp, struct shmid_kernel, shm_perm);
128 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
131 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
134 return (struct shmid_kernel *)ipcp;
136 return container_of(ipcp, struct shmid_kernel, shm_perm);
139 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
141 ipc_rmid(&shm_ids(ns), &s->shm_perm);
145 /* This is called by fork, once for every shm attach. */
146 static void shm_open(struct vm_area_struct *vma)
148 struct file *file = vma->vm_file;
149 struct shm_file_data *sfd = shm_file_data(file);
150 struct shmid_kernel *shp;
152 shp = shm_lock(sfd->ns, sfd->id);
154 shp->shm_atim = get_seconds();
155 shp->shm_lprid = task_tgid_vnr(current);
161 * shm_destroy - free the struct shmid_kernel
164 * @shp: struct to free
166 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
167 * but returns with shp unlocked and freed.
169 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
171 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
174 if (!is_file_hugepages(shp->shm_file))
175 shmem_lock(shp->shm_file, 0, shp->mlock_user);
177 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
179 fput (shp->shm_file);
180 security_shm_free(shp);
185 * remove the attach descriptor vma.
186 * free memory for segment if it is marked destroyed.
187 * The descriptor has already been removed from the current->mm->mmap list
188 * and will later be kfree()d.
190 static void shm_close(struct vm_area_struct *vma)
192 struct file * file = vma->vm_file;
193 struct shm_file_data *sfd = shm_file_data(file);
194 struct shmid_kernel *shp;
195 struct ipc_namespace *ns = sfd->ns;
197 down_write(&shm_ids(ns).rw_mutex);
198 /* remove from the list of attaches of the shm segment */
199 shp = shm_lock(ns, sfd->id);
201 shp->shm_lprid = task_tgid_vnr(current);
202 shp->shm_dtim = get_seconds();
204 if(shp->shm_nattch == 0 &&
205 shp->shm_perm.mode & SHM_DEST)
206 shm_destroy(ns, shp);
209 up_write(&shm_ids(ns).rw_mutex);
212 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
214 struct file *file = vma->vm_file;
215 struct shm_file_data *sfd = shm_file_data(file);
217 return sfd->vm_ops->fault(vma, vmf);
221 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
223 struct file *file = vma->vm_file;
224 struct shm_file_data *sfd = shm_file_data(file);
226 if (sfd->vm_ops->set_policy)
227 err = sfd->vm_ops->set_policy(vma, new);
231 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
234 struct file *file = vma->vm_file;
235 struct shm_file_data *sfd = shm_file_data(file);
236 struct mempolicy *pol = NULL;
238 if (sfd->vm_ops->get_policy)
239 pol = sfd->vm_ops->get_policy(vma, addr);
240 else if (vma->vm_policy)
241 pol = vma->vm_policy;
247 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
249 struct shm_file_data *sfd = shm_file_data(file);
252 ret = sfd->file->f_op->mmap(sfd->file, vma);
255 sfd->vm_ops = vma->vm_ops;
257 BUG_ON(!sfd->vm_ops->fault);
259 vma->vm_ops = &shm_vm_ops;
265 static int shm_release(struct inode *ino, struct file *file)
267 struct shm_file_data *sfd = shm_file_data(file);
270 shm_file_data(file) = NULL;
275 static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
277 int (*fsync) (struct file *, struct dentry *, int datasync);
278 struct shm_file_data *sfd = shm_file_data(file);
281 fsync = sfd->file->f_op->fsync;
283 ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
287 static unsigned long shm_get_unmapped_area(struct file *file,
288 unsigned long addr, unsigned long len, unsigned long pgoff,
291 struct shm_file_data *sfd = shm_file_data(file);
292 return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
295 int is_file_shm_hugepages(struct file *file)
299 if (file->f_op == &shm_file_operations) {
300 struct shm_file_data *sfd;
301 sfd = shm_file_data(file);
302 ret = is_file_hugepages(sfd->file);
307 static const struct file_operations shm_file_operations = {
310 .release = shm_release,
311 .get_unmapped_area = shm_get_unmapped_area,
314 static struct vm_operations_struct shm_vm_ops = {
315 .open = shm_open, /* callback for a new vm-area open */
316 .close = shm_close, /* callback for when the vm-area is released */
318 #if defined(CONFIG_NUMA)
319 .set_policy = shm_set_policy,
320 .get_policy = shm_get_policy,
325 * newseg - Create a new shared memory segment
327 * @params: ptr to the structure that contains key, size and shmflg
329 * Called with shm_ids.rw_mutex held as a writer.
332 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
334 key_t key = params->key;
335 int shmflg = params->flg;
336 size_t size = params->u.size;
338 struct shmid_kernel *shp;
339 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
344 if (size < SHMMIN || size > ns->shm_ctlmax)
347 if (ns->shm_tot + numpages > ns->shm_ctlall)
350 shp = ipc_rcu_alloc(sizeof(*shp));
354 shp->shm_perm.key = key;
355 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
356 shp->mlock_user = NULL;
358 shp->shm_perm.security = NULL;
359 error = security_shm_alloc(shp);
365 sprintf (name, "SYSV%08x", key);
366 if (shmflg & SHM_HUGETLB) {
367 /* hugetlb_file_setup takes care of mlock user accounting */
368 file = hugetlb_file_setup(name, size);
369 shp->mlock_user = current_user();
371 int acctflag = VM_ACCOUNT;
373 * Do not allow no accounting for OVERCOMMIT_NEVER, even
376 if ((shmflg & SHM_NORESERVE) &&
377 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
379 file = shmem_file_setup(name, size, acctflag);
381 error = PTR_ERR(file);
385 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
391 shp->shm_cprid = task_tgid_vnr(current);
393 shp->shm_atim = shp->shm_dtim = 0;
394 shp->shm_ctim = get_seconds();
395 shp->shm_segsz = size;
397 shp->shm_file = file;
399 * shmid gets reported as "inode#" in /proc/pid/maps.
400 * proc-ps tools use this. Changing this will break them.
402 file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
404 ns->shm_tot += numpages;
405 error = shp->shm_perm.id;
412 security_shm_free(shp);
418 * Called with shm_ids.rw_mutex and ipcp locked.
420 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
422 struct shmid_kernel *shp;
424 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
425 return security_shm_associate(shp, shmflg);
429 * Called with shm_ids.rw_mutex and ipcp locked.
431 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
432 struct ipc_params *params)
434 struct shmid_kernel *shp;
436 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
437 if (shp->shm_segsz < params->u.size)
443 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
445 struct ipc_namespace *ns;
446 struct ipc_ops shm_ops;
447 struct ipc_params shm_params;
449 ns = current->nsproxy->ipc_ns;
451 shm_ops.getnew = newseg;
452 shm_ops.associate = shm_security;
453 shm_ops.more_checks = shm_more_checks;
455 shm_params.key = key;
456 shm_params.flg = shmflg;
457 shm_params.u.size = size;
459 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
462 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
466 return copy_to_user(buf, in, sizeof(*in));
471 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
472 out.shm_segsz = in->shm_segsz;
473 out.shm_atime = in->shm_atime;
474 out.shm_dtime = in->shm_dtime;
475 out.shm_ctime = in->shm_ctime;
476 out.shm_cpid = in->shm_cpid;
477 out.shm_lpid = in->shm_lpid;
478 out.shm_nattch = in->shm_nattch;
480 return copy_to_user(buf, &out, sizeof(out));
487 static inline unsigned long
488 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
492 if (copy_from_user(out, buf, sizeof(*out)))
497 struct shmid_ds tbuf_old;
499 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
502 out->shm_perm.uid = tbuf_old.shm_perm.uid;
503 out->shm_perm.gid = tbuf_old.shm_perm.gid;
504 out->shm_perm.mode = tbuf_old.shm_perm.mode;
513 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
517 return copy_to_user(buf, in, sizeof(*in));
522 if(in->shmmax > INT_MAX)
523 out.shmmax = INT_MAX;
525 out.shmmax = (int)in->shmmax;
527 out.shmmin = in->shmmin;
528 out.shmmni = in->shmmni;
529 out.shmseg = in->shmseg;
530 out.shmall = in->shmall;
532 return copy_to_user(buf, &out, sizeof(out));
540 * Called with shm_ids.rw_mutex held as a reader
542 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
551 in_use = shm_ids(ns).in_use;
553 for (total = 0, next_id = 0; total < in_use; next_id++) {
554 struct shmid_kernel *shp;
557 shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
561 inode = shp->shm_file->f_path.dentry->d_inode;
563 if (is_file_hugepages(shp->shm_file)) {
564 struct address_space *mapping = inode->i_mapping;
565 struct hstate *h = hstate_file(shp->shm_file);
566 *rss += pages_per_huge_page(h) * mapping->nrpages;
568 struct shmem_inode_info *info = SHMEM_I(inode);
569 spin_lock(&info->lock);
570 *rss += inode->i_mapping->nrpages;
571 *swp += info->swapped;
572 spin_unlock(&info->lock);
580 * This function handles some shmctl commands which require the rw_mutex
581 * to be held in write mode.
582 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
584 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
585 struct shmid_ds __user *buf, int version)
587 struct kern_ipc_perm *ipcp;
588 struct shmid64_ds shmid64;
589 struct shmid_kernel *shp;
592 if (cmd == IPC_SET) {
593 if (copy_shmid_from_user(&shmid64, buf, version))
597 ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
599 return PTR_ERR(ipcp);
601 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
603 err = security_shm_shmctl(shp, cmd);
608 do_shm_rmid(ns, ipcp);
611 ipc_update_perm(&shmid64.shm_perm, ipcp);
612 shp->shm_ctim = get_seconds();
620 up_write(&shm_ids(ns).rw_mutex);
624 asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
626 struct shmid_kernel *shp;
628 struct ipc_namespace *ns;
630 if (cmd < 0 || shmid < 0) {
635 version = ipc_parse_version(&cmd);
636 ns = current->nsproxy->ipc_ns;
638 switch (cmd) { /* replace with proc interface ? */
641 struct shminfo64 shminfo;
643 err = security_shm_shmctl(NULL, cmd);
647 memset(&shminfo,0,sizeof(shminfo));
648 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
649 shminfo.shmmax = ns->shm_ctlmax;
650 shminfo.shmall = ns->shm_ctlall;
652 shminfo.shmmin = SHMMIN;
653 if(copy_shminfo_to_user (buf, &shminfo, version))
656 down_read(&shm_ids(ns).rw_mutex);
657 err = ipc_get_maxid(&shm_ids(ns));
658 up_read(&shm_ids(ns).rw_mutex);
666 struct shm_info shm_info;
668 err = security_shm_shmctl(NULL, cmd);
672 memset(&shm_info,0,sizeof(shm_info));
673 down_read(&shm_ids(ns).rw_mutex);
674 shm_info.used_ids = shm_ids(ns).in_use;
675 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
676 shm_info.shm_tot = ns->shm_tot;
677 shm_info.swap_attempts = 0;
678 shm_info.swap_successes = 0;
679 err = ipc_get_maxid(&shm_ids(ns));
680 up_read(&shm_ids(ns).rw_mutex);
681 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
686 err = err < 0 ? 0 : err;
692 struct shmid64_ds tbuf;
700 if (cmd == SHM_STAT) {
701 shp = shm_lock(ns, shmid);
706 result = shp->shm_perm.id;
708 shp = shm_lock_check(ns, shmid);
716 if (ipcperms (&shp->shm_perm, S_IRUGO))
718 err = security_shm_shmctl(shp, cmd);
721 memset(&tbuf, 0, sizeof(tbuf));
722 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
723 tbuf.shm_segsz = shp->shm_segsz;
724 tbuf.shm_atime = shp->shm_atim;
725 tbuf.shm_dtime = shp->shm_dtim;
726 tbuf.shm_ctime = shp->shm_ctim;
727 tbuf.shm_cpid = shp->shm_cprid;
728 tbuf.shm_lpid = shp->shm_lprid;
729 tbuf.shm_nattch = shp->shm_nattch;
731 if(copy_shmid_to_user (buf, &tbuf, version))
740 struct file *uninitialized_var(shm_file);
742 lru_add_drain_all(); /* drain pagevecs to lru lists */
744 shp = shm_lock_check(ns, shmid);
750 err = audit_ipc_obj(&(shp->shm_perm));
754 if (!capable(CAP_IPC_LOCK)) {
755 uid_t euid = current_euid();
757 if (euid != shp->shm_perm.uid &&
758 euid != shp->shm_perm.cuid)
760 if (cmd == SHM_LOCK &&
761 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
765 err = security_shm_shmctl(shp, cmd);
770 struct user_struct *user = current_user();
771 if (!is_file_hugepages(shp->shm_file)) {
772 err = shmem_lock(shp->shm_file, 1, user);
773 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
774 shp->shm_perm.mode |= SHM_LOCKED;
775 shp->mlock_user = user;
778 } else if (!is_file_hugepages(shp->shm_file)) {
779 shmem_lock(shp->shm_file, 0, shp->mlock_user);
780 shp->shm_perm.mode &= ~SHM_LOCKED;
781 shp->mlock_user = NULL;
788 err = shmctl_down(ns, shmid, cmd, buf, version);
801 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
803 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
804 * "raddr" thing points to kernel space, and there has to be a wrapper around
807 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
809 struct shmid_kernel *shp;
817 unsigned long user_addr;
818 struct ipc_namespace *ns;
819 struct shm_file_data *sfd;
826 else if ((addr = (ulong)shmaddr)) {
827 if (addr & (SHMLBA-1)) {
828 if (shmflg & SHM_RND)
829 addr &= ~(SHMLBA-1); /* round down */
831 #ifndef __ARCH_FORCE_SHMLBA
832 if (addr & ~PAGE_MASK)
836 flags = MAP_SHARED | MAP_FIXED;
838 if ((shmflg & SHM_REMAP))
844 if (shmflg & SHM_RDONLY) {
849 prot = PROT_READ | PROT_WRITE;
850 acc_mode = S_IRUGO | S_IWUGO;
851 f_mode = FMODE_READ | FMODE_WRITE;
853 if (shmflg & SHM_EXEC) {
859 * We cannot rely on the fs check since SYSV IPC does have an
860 * additional creator id...
862 ns = current->nsproxy->ipc_ns;
863 shp = shm_lock_check(ns, shmid);
870 if (ipcperms(&shp->shm_perm, acc_mode))
873 err = security_shm_shmat(shp, shmaddr, shmflg);
877 path.dentry = dget(shp->shm_file->f_path.dentry);
878 path.mnt = shp->shm_file->f_path.mnt;
880 size = i_size_read(path.dentry->d_inode);
884 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
888 file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
892 file->private_data = sfd;
893 file->f_mapping = shp->shm_file->f_mapping;
894 sfd->id = shp->shm_perm.id;
895 sfd->ns = get_ipc_ns(ns);
896 sfd->file = shp->shm_file;
899 down_write(¤t->mm->mmap_sem);
900 if (addr && !(shmflg & SHM_REMAP)) {
902 if (find_vma_intersection(current->mm, addr, addr + size))
905 * If shm segment goes below stack, make sure there is some
906 * space left for the stack to grow (at least 4 pages).
908 if (addr < current->mm->start_stack &&
909 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
913 user_addr = do_mmap (file, addr, size, prot, flags, 0);
916 if (IS_ERR_VALUE(user_addr))
917 err = (long)user_addr;
919 up_write(¤t->mm->mmap_sem);
924 down_write(&shm_ids(ns).rw_mutex);
925 shp = shm_lock(ns, shmid);
928 if(shp->shm_nattch == 0 &&
929 shp->shm_perm.mode & SHM_DEST)
930 shm_destroy(ns, shp);
933 up_write(&shm_ids(ns).rw_mutex);
949 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
954 err = do_shmat(shmid, shmaddr, shmflg, &ret);
957 force_successful_syscall_return();
962 * detach and kill segment if marked destroyed.
963 * The work is done in shm_close.
965 asmlinkage long sys_shmdt(char __user *shmaddr)
967 struct mm_struct *mm = current->mm;
968 struct vm_area_struct *vma, *next;
969 unsigned long addr = (unsigned long)shmaddr;
971 int retval = -EINVAL;
973 if (addr & ~PAGE_MASK)
976 down_write(&mm->mmap_sem);
979 * This function tries to be smart and unmap shm segments that
980 * were modified by partial mlock or munmap calls:
981 * - It first determines the size of the shm segment that should be
982 * unmapped: It searches for a vma that is backed by shm and that
983 * started at address shmaddr. It records it's size and then unmaps
985 * - Then it unmaps all shm vmas that started at shmaddr and that
986 * are within the initially determined size.
987 * Errors from do_munmap are ignored: the function only fails if
988 * it's called with invalid parameters or if it's called to unmap
989 * a part of a vma. Both calls in this function are for full vmas,
990 * the parameters are directly copied from the vma itself and always
991 * valid - therefore do_munmap cannot fail. (famous last words?)
994 * If it had been mremap()'d, the starting address would not
995 * match the usual checks anyway. So assume all vma's are
996 * above the starting address given.
998 vma = find_vma(mm, addr);
1001 next = vma->vm_next;
1004 * Check if the starting address would match, i.e. it's
1005 * a fragment created by mprotect() and/or munmap(), or it
1006 * otherwise it starts at this address with no hassles.
1008 if ((vma->vm_ops == &shm_vm_ops) &&
1009 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1012 size = vma->vm_file->f_path.dentry->d_inode->i_size;
1013 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1015 * We discovered the size of the shm segment, so
1016 * break out of here and fall through to the next
1017 * loop that uses the size information to stop
1018 * searching for matching vma's.
1028 * We need look no further than the maximum address a fragment
1029 * could possibly have landed at. Also cast things to loff_t to
1030 * prevent overflows and make comparisions vs. equal-width types.
1032 size = PAGE_ALIGN(size);
1033 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1034 next = vma->vm_next;
1036 /* finding a matching vma now does not alter retval */
1037 if ((vma->vm_ops == &shm_vm_ops) &&
1038 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1040 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1044 up_write(&mm->mmap_sem);
1048 #ifdef CONFIG_PROC_FS
1049 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1051 struct shmid_kernel *shp = it;
1053 #if BITS_PER_LONG <= 32
1054 #define SIZE_SPEC "%10lu"
1056 #define SIZE_SPEC "%21lu"
1059 return seq_printf(s,
1060 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1061 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",