3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 * Pavel Emelianov <xemul@openvz.org>
24 #include <linux/slab.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
42 #include <asm/uaccess.h>
46 struct shm_file_data {
48 struct ipc_namespace *ns;
50 const struct vm_operations_struct *vm_ops;
53 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
55 static const struct file_operations shm_file_operations;
56 static struct vm_operations_struct shm_vm_ops;
58 static struct ipc_ids init_shm_ids;
60 #define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS]))
62 #define shm_unlock(shp) \
63 ipc_unlock(&(shp)->shm_perm)
64 #define shm_buildid(id, seq) ipc_buildid(id, seq)
66 static int newseg(struct ipc_namespace *, struct ipc_params *);
67 static void shm_open(struct vm_area_struct *vma);
68 static void shm_close(struct vm_area_struct *vma);
69 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
74 static void __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
76 ns->ids[IPC_SHM_IDS] = ids;
77 ns->shm_ctlmax = SHMMAX;
78 ns->shm_ctlall = SHMALL;
79 ns->shm_ctlmni = SHMMNI;
85 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
86 * Only shm_ids.rw_mutex remains locked on exit.
88 static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp)
91 shp->shm_perm.mode |= SHM_DEST;
92 /* Do not find it any more */
93 shp->shm_perm.key = IPC_PRIVATE;
99 int shm_init_ns(struct ipc_namespace *ns)
103 ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
107 __shm_init_ns(ns, ids);
111 void shm_exit_ns(struct ipc_namespace *ns)
113 struct shmid_kernel *shp;
114 struct kern_ipc_perm *perm;
118 down_write(&shm_ids(ns).rw_mutex);
120 in_use = shm_ids(ns).in_use;
122 for (total = 0, next_id = 0; total < in_use; next_id++) {
123 perm = idr_find(&shm_ids(ns).ipcs_idr, next_id);
126 ipc_lock_by_ptr(perm);
127 shp = container_of(perm, struct shmid_kernel, shm_perm);
128 do_shm_rmid(ns, shp);
131 up_write(&shm_ids(ns).rw_mutex);
133 kfree(ns->ids[IPC_SHM_IDS]);
134 ns->ids[IPC_SHM_IDS] = NULL;
137 void __init shm_init (void)
139 __shm_init_ns(&init_ipc_ns, &init_shm_ids);
140 ipc_init_proc_interface("sysvipc/shm",
141 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
142 IPC_SHM_IDS, sysvipc_shm_proc_show);
146 * shm_lock_(check_)down routines are called in the paths where the rw_mutex
147 * is held to protect access to the idr tree.
149 static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
152 struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
155 return (struct shmid_kernel *)ipcp;
157 return container_of(ipcp, struct shmid_kernel, shm_perm);
160 static inline struct shmid_kernel *shm_lock_check_down(
161 struct ipc_namespace *ns,
164 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id);
167 return (struct shmid_kernel *)ipcp;
169 return container_of(ipcp, struct shmid_kernel, shm_perm);
173 * shm_lock_(check_) routines are called in the paths where the rw_mutex
176 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
178 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
181 return (struct shmid_kernel *)ipcp;
183 return container_of(ipcp, struct shmid_kernel, shm_perm);
186 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
189 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
192 return (struct shmid_kernel *)ipcp;
194 return container_of(ipcp, struct shmid_kernel, shm_perm);
197 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
199 ipc_rmid(&shm_ids(ns), &s->shm_perm);
202 static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
204 return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
209 /* This is called by fork, once for every shm attach. */
210 static void shm_open(struct vm_area_struct *vma)
212 struct file *file = vma->vm_file;
213 struct shm_file_data *sfd = shm_file_data(file);
214 struct shmid_kernel *shp;
216 shp = shm_lock(sfd->ns, sfd->id);
218 shp->shm_atim = get_seconds();
219 shp->shm_lprid = task_tgid_vnr(current);
225 * shm_destroy - free the struct shmid_kernel
228 * @shp: struct to free
230 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
231 * but returns with shp unlocked and freed.
233 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
235 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
238 if (!is_file_hugepages(shp->shm_file))
239 shmem_lock(shp->shm_file, 0, shp->mlock_user);
241 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
243 fput (shp->shm_file);
244 security_shm_free(shp);
249 * remove the attach descriptor vma.
250 * free memory for segment if it is marked destroyed.
251 * The descriptor has already been removed from the current->mm->mmap list
252 * and will later be kfree()d.
254 static void shm_close(struct vm_area_struct *vma)
256 struct file * file = vma->vm_file;
257 struct shm_file_data *sfd = shm_file_data(file);
258 struct shmid_kernel *shp;
259 struct ipc_namespace *ns = sfd->ns;
261 down_write(&shm_ids(ns).rw_mutex);
262 /* remove from the list of attaches of the shm segment */
263 shp = shm_lock_down(ns, sfd->id);
265 shp->shm_lprid = task_tgid_vnr(current);
266 shp->shm_dtim = get_seconds();
268 if(shp->shm_nattch == 0 &&
269 shp->shm_perm.mode & SHM_DEST)
270 shm_destroy(ns, shp);
273 up_write(&shm_ids(ns).rw_mutex);
276 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
278 struct file *file = vma->vm_file;
279 struct shm_file_data *sfd = shm_file_data(file);
281 return sfd->vm_ops->fault(vma, vmf);
285 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
287 struct file *file = vma->vm_file;
288 struct shm_file_data *sfd = shm_file_data(file);
290 if (sfd->vm_ops->set_policy)
291 err = sfd->vm_ops->set_policy(vma, new);
295 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
298 struct file *file = vma->vm_file;
299 struct shm_file_data *sfd = shm_file_data(file);
300 struct mempolicy *pol = NULL;
302 if (sfd->vm_ops->get_policy)
303 pol = sfd->vm_ops->get_policy(vma, addr);
304 else if (vma->vm_policy)
305 pol = vma->vm_policy;
307 pol = current->mempolicy;
312 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
314 struct shm_file_data *sfd = shm_file_data(file);
317 ret = sfd->file->f_op->mmap(sfd->file, vma);
320 sfd->vm_ops = vma->vm_ops;
322 BUG_ON(!sfd->vm_ops->fault);
324 vma->vm_ops = &shm_vm_ops;
330 static int shm_release(struct inode *ino, struct file *file)
332 struct shm_file_data *sfd = shm_file_data(file);
335 shm_file_data(file) = NULL;
340 static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
342 int (*fsync) (struct file *, struct dentry *, int datasync);
343 struct shm_file_data *sfd = shm_file_data(file);
346 fsync = sfd->file->f_op->fsync;
348 ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
352 static unsigned long shm_get_unmapped_area(struct file *file,
353 unsigned long addr, unsigned long len, unsigned long pgoff,
356 struct shm_file_data *sfd = shm_file_data(file);
357 return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
360 int is_file_shm_hugepages(struct file *file)
364 if (file->f_op == &shm_file_operations) {
365 struct shm_file_data *sfd;
366 sfd = shm_file_data(file);
367 ret = is_file_hugepages(sfd->file);
372 static const struct file_operations shm_file_operations = {
375 .release = shm_release,
376 .get_unmapped_area = shm_get_unmapped_area,
379 static struct vm_operations_struct shm_vm_ops = {
380 .open = shm_open, /* callback for a new vm-area open */
381 .close = shm_close, /* callback for when the vm-area is released */
383 #if defined(CONFIG_NUMA)
384 .set_policy = shm_set_policy,
385 .get_policy = shm_get_policy,
390 * newseg - Create a new shared memory segment
392 * @params: ptr to the structure that contains key, size and shmflg
394 * Called with shm_ids.rw_mutex held as a writer.
397 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
399 key_t key = params->key;
400 int shmflg = params->flg;
401 size_t size = params->u.size;
403 struct shmid_kernel *shp;
404 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
409 if (size < SHMMIN || size > ns->shm_ctlmax)
412 if (ns->shm_tot + numpages > ns->shm_ctlall)
415 shp = ipc_rcu_alloc(sizeof(*shp));
419 shp->shm_perm.key = key;
420 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
421 shp->mlock_user = NULL;
423 shp->shm_perm.security = NULL;
424 error = security_shm_alloc(shp);
430 sprintf (name, "SYSV%08x", key);
431 if (shmflg & SHM_HUGETLB) {
432 /* hugetlb_file_setup takes care of mlock user accounting */
433 file = hugetlb_file_setup(name, size);
434 shp->mlock_user = current->user;
436 int acctflag = VM_ACCOUNT;
438 * Do not allow no accounting for OVERCOMMIT_NEVER, even
441 if ((shmflg & SHM_NORESERVE) &&
442 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
444 file = shmem_file_setup(name, size, acctflag);
446 error = PTR_ERR(file);
450 id = shm_addid(ns, shp);
456 shp->shm_cprid = task_tgid_vnr(current);
458 shp->shm_atim = shp->shm_dtim = 0;
459 shp->shm_ctim = get_seconds();
460 shp->shm_segsz = size;
462 shp->shm_perm.id = shm_buildid(id, shp->shm_perm.seq);
463 shp->shm_file = file;
465 * shmid gets reported as "inode#" in /proc/pid/maps.
466 * proc-ps tools use this. Changing this will break them.
468 file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
470 ns->shm_tot += numpages;
471 error = shp->shm_perm.id;
478 security_shm_free(shp);
484 * Called with shm_ids.rw_mutex and ipcp locked.
486 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
488 struct shmid_kernel *shp;
490 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
491 return security_shm_associate(shp, shmflg);
495 * Called with shm_ids.rw_mutex and ipcp locked.
497 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
498 struct ipc_params *params)
500 struct shmid_kernel *shp;
502 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
503 if (shp->shm_segsz < params->u.size)
509 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
511 struct ipc_namespace *ns;
512 struct ipc_ops shm_ops;
513 struct ipc_params shm_params;
515 ns = current->nsproxy->ipc_ns;
517 shm_ops.getnew = newseg;
518 shm_ops.associate = shm_security;
519 shm_ops.more_checks = shm_more_checks;
521 shm_params.key = key;
522 shm_params.flg = shmflg;
523 shm_params.u.size = size;
525 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
528 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
532 return copy_to_user(buf, in, sizeof(*in));
537 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
538 out.shm_segsz = in->shm_segsz;
539 out.shm_atime = in->shm_atime;
540 out.shm_dtime = in->shm_dtime;
541 out.shm_ctime = in->shm_ctime;
542 out.shm_cpid = in->shm_cpid;
543 out.shm_lpid = in->shm_lpid;
544 out.shm_nattch = in->shm_nattch;
546 return copy_to_user(buf, &out, sizeof(out));
559 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
564 struct shmid64_ds tbuf;
566 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
569 out->uid = tbuf.shm_perm.uid;
570 out->gid = tbuf.shm_perm.gid;
571 out->mode = tbuf.shm_perm.mode;
577 struct shmid_ds tbuf_old;
579 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
582 out->uid = tbuf_old.shm_perm.uid;
583 out->gid = tbuf_old.shm_perm.gid;
584 out->mode = tbuf_old.shm_perm.mode;
593 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
597 return copy_to_user(buf, in, sizeof(*in));
602 if(in->shmmax > INT_MAX)
603 out.shmmax = INT_MAX;
605 out.shmmax = (int)in->shmmax;
607 out.shmmin = in->shmmin;
608 out.shmmni = in->shmmni;
609 out.shmseg = in->shmseg;
610 out.shmall = in->shmall;
612 return copy_to_user(buf, &out, sizeof(out));
620 * Called with shm_ids.rw_mutex held as a reader
622 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
631 in_use = shm_ids(ns).in_use;
633 for (total = 0, next_id = 0; total < in_use; next_id++) {
634 struct shmid_kernel *shp;
637 shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
641 inode = shp->shm_file->f_path.dentry->d_inode;
643 if (is_file_hugepages(shp->shm_file)) {
644 struct address_space *mapping = inode->i_mapping;
645 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
647 struct shmem_inode_info *info = SHMEM_I(inode);
648 spin_lock(&info->lock);
649 *rss += inode->i_mapping->nrpages;
650 *swp += info->swapped;
651 spin_unlock(&info->lock);
658 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
660 struct shm_setbuf setbuf;
661 struct shmid_kernel *shp;
663 struct ipc_namespace *ns;
665 if (cmd < 0 || shmid < 0) {
670 version = ipc_parse_version(&cmd);
671 ns = current->nsproxy->ipc_ns;
673 switch (cmd) { /* replace with proc interface ? */
676 struct shminfo64 shminfo;
678 err = security_shm_shmctl(NULL, cmd);
682 memset(&shminfo,0,sizeof(shminfo));
683 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
684 shminfo.shmmax = ns->shm_ctlmax;
685 shminfo.shmall = ns->shm_ctlall;
687 shminfo.shmmin = SHMMIN;
688 if(copy_shminfo_to_user (buf, &shminfo, version))
691 down_read(&shm_ids(ns).rw_mutex);
692 err = ipc_get_maxid(&shm_ids(ns));
693 up_read(&shm_ids(ns).rw_mutex);
701 struct shm_info shm_info;
703 err = security_shm_shmctl(NULL, cmd);
707 memset(&shm_info,0,sizeof(shm_info));
708 down_read(&shm_ids(ns).rw_mutex);
709 shm_info.used_ids = shm_ids(ns).in_use;
710 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
711 shm_info.shm_tot = ns->shm_tot;
712 shm_info.swap_attempts = 0;
713 shm_info.swap_successes = 0;
714 err = ipc_get_maxid(&shm_ids(ns));
715 up_read(&shm_ids(ns).rw_mutex);
716 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
721 err = err < 0 ? 0 : err;
727 struct shmid64_ds tbuf;
735 if (cmd == SHM_STAT) {
736 shp = shm_lock(ns, shmid);
741 result = shp->shm_perm.id;
743 shp = shm_lock_check(ns, shmid);
751 if (ipcperms (&shp->shm_perm, S_IRUGO))
753 err = security_shm_shmctl(shp, cmd);
756 memset(&tbuf, 0, sizeof(tbuf));
757 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
758 tbuf.shm_segsz = shp->shm_segsz;
759 tbuf.shm_atime = shp->shm_atim;
760 tbuf.shm_dtime = shp->shm_dtim;
761 tbuf.shm_ctime = shp->shm_ctim;
762 tbuf.shm_cpid = shp->shm_cprid;
763 tbuf.shm_lpid = shp->shm_lprid;
764 tbuf.shm_nattch = shp->shm_nattch;
766 if(copy_shmid_to_user (buf, &tbuf, version))
775 shp = shm_lock_check(ns, shmid);
781 err = audit_ipc_obj(&(shp->shm_perm));
785 if (!capable(CAP_IPC_LOCK)) {
787 if (current->euid != shp->shm_perm.uid &&
788 current->euid != shp->shm_perm.cuid)
790 if (cmd == SHM_LOCK &&
791 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
795 err = security_shm_shmctl(shp, cmd);
800 struct user_struct * user = current->user;
801 if (!is_file_hugepages(shp->shm_file)) {
802 err = shmem_lock(shp->shm_file, 1, user);
803 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
804 shp->shm_perm.mode |= SHM_LOCKED;
805 shp->mlock_user = user;
808 } else if (!is_file_hugepages(shp->shm_file)) {
809 shmem_lock(shp->shm_file, 0, shp->mlock_user);
810 shp->shm_perm.mode &= ~SHM_LOCKED;
811 shp->mlock_user = NULL;
819 * We cannot simply remove the file. The SVID states
820 * that the block remains until the last person
821 * detaches from it, then is deleted. A shmat() on
822 * an RMID segment is legal in older Linux and if
823 * we change it apps break...
825 * Instead we set a destroyed flag, and then blow
826 * the name away when the usage hits zero.
828 down_write(&shm_ids(ns).rw_mutex);
829 shp = shm_lock_check_down(ns, shmid);
835 err = audit_ipc_obj(&(shp->shm_perm));
839 if (current->euid != shp->shm_perm.uid &&
840 current->euid != shp->shm_perm.cuid &&
841 !capable(CAP_SYS_ADMIN)) {
846 err = security_shm_shmctl(shp, cmd);
850 do_shm_rmid(ns, shp);
851 up_write(&shm_ids(ns).rw_mutex);
862 if (copy_shmid_from_user (&setbuf, buf, version)) {
866 down_write(&shm_ids(ns).rw_mutex);
867 shp = shm_lock_check_down(ns, shmid);
872 err = audit_ipc_obj(&(shp->shm_perm));
875 err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
879 if (current->euid != shp->shm_perm.uid &&
880 current->euid != shp->shm_perm.cuid &&
881 !capable(CAP_SYS_ADMIN)) {
885 err = security_shm_shmctl(shp, cmd);
889 shp->shm_perm.uid = setbuf.uid;
890 shp->shm_perm.gid = setbuf.gid;
891 shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
892 | (setbuf.mode & S_IRWXUGO);
893 shp->shm_ctim = get_seconds();
906 up_write(&shm_ids(ns).rw_mutex);
915 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
917 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
918 * "raddr" thing points to kernel space, and there has to be a wrapper around
921 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
923 struct shmid_kernel *shp;
931 unsigned long user_addr;
932 struct ipc_namespace *ns;
933 struct shm_file_data *sfd;
940 else if ((addr = (ulong)shmaddr)) {
941 if (addr & (SHMLBA-1)) {
942 if (shmflg & SHM_RND)
943 addr &= ~(SHMLBA-1); /* round down */
945 #ifndef __ARCH_FORCE_SHMLBA
946 if (addr & ~PAGE_MASK)
950 flags = MAP_SHARED | MAP_FIXED;
952 if ((shmflg & SHM_REMAP))
958 if (shmflg & SHM_RDONLY) {
963 prot = PROT_READ | PROT_WRITE;
964 acc_mode = S_IRUGO | S_IWUGO;
965 f_mode = FMODE_READ | FMODE_WRITE;
967 if (shmflg & SHM_EXEC) {
973 * We cannot rely on the fs check since SYSV IPC does have an
974 * additional creator id...
976 ns = current->nsproxy->ipc_ns;
977 shp = shm_lock_check(ns, shmid);
984 if (ipcperms(&shp->shm_perm, acc_mode))
987 err = security_shm_shmat(shp, shmaddr, shmflg);
991 path.dentry = dget(shp->shm_file->f_path.dentry);
992 path.mnt = shp->shm_file->f_path.mnt;
994 size = i_size_read(path.dentry->d_inode);
998 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1000 goto out_put_dentry;
1004 file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
1008 file->private_data = sfd;
1009 file->f_mapping = shp->shm_file->f_mapping;
1010 sfd->id = shp->shm_perm.id;
1011 sfd->ns = get_ipc_ns(ns);
1012 sfd->file = shp->shm_file;
1015 down_write(¤t->mm->mmap_sem);
1016 if (addr && !(shmflg & SHM_REMAP)) {
1018 if (find_vma_intersection(current->mm, addr, addr + size))
1021 * If shm segment goes below stack, make sure there is some
1022 * space left for the stack to grow (at least 4 pages).
1024 if (addr < current->mm->start_stack &&
1025 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1029 user_addr = do_mmap (file, addr, size, prot, flags, 0);
1032 if (IS_ERR_VALUE(user_addr))
1033 err = (long)user_addr;
1035 up_write(¤t->mm->mmap_sem);
1040 down_write(&shm_ids(ns).rw_mutex);
1041 shp = shm_lock_down(ns, shmid);
1042 BUG_ON(IS_ERR(shp));
1044 if(shp->shm_nattch == 0 &&
1045 shp->shm_perm.mode & SHM_DEST)
1046 shm_destroy(ns, shp);
1049 up_write(&shm_ids(ns).rw_mutex);
1065 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
1070 err = do_shmat(shmid, shmaddr, shmflg, &ret);
1073 force_successful_syscall_return();
1078 * detach and kill segment if marked destroyed.
1079 * The work is done in shm_close.
1081 asmlinkage long sys_shmdt(char __user *shmaddr)
1083 struct mm_struct *mm = current->mm;
1084 struct vm_area_struct *vma, *next;
1085 unsigned long addr = (unsigned long)shmaddr;
1087 int retval = -EINVAL;
1089 if (addr & ~PAGE_MASK)
1092 down_write(&mm->mmap_sem);
1095 * This function tries to be smart and unmap shm segments that
1096 * were modified by partial mlock or munmap calls:
1097 * - It first determines the size of the shm segment that should be
1098 * unmapped: It searches for a vma that is backed by shm and that
1099 * started at address shmaddr. It records it's size and then unmaps
1101 * - Then it unmaps all shm vmas that started at shmaddr and that
1102 * are within the initially determined size.
1103 * Errors from do_munmap are ignored: the function only fails if
1104 * it's called with invalid parameters or if it's called to unmap
1105 * a part of a vma. Both calls in this function are for full vmas,
1106 * the parameters are directly copied from the vma itself and always
1107 * valid - therefore do_munmap cannot fail. (famous last words?)
1110 * If it had been mremap()'d, the starting address would not
1111 * match the usual checks anyway. So assume all vma's are
1112 * above the starting address given.
1114 vma = find_vma(mm, addr);
1117 next = vma->vm_next;
1120 * Check if the starting address would match, i.e. it's
1121 * a fragment created by mprotect() and/or munmap(), or it
1122 * otherwise it starts at this address with no hassles.
1124 if ((vma->vm_ops == &shm_vm_ops) &&
1125 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1128 size = vma->vm_file->f_path.dentry->d_inode->i_size;
1129 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1131 * We discovered the size of the shm segment, so
1132 * break out of here and fall through to the next
1133 * loop that uses the size information to stop
1134 * searching for matching vma's.
1144 * We need look no further than the maximum address a fragment
1145 * could possibly have landed at. Also cast things to loff_t to
1146 * prevent overflows and make comparisions vs. equal-width types.
1148 size = PAGE_ALIGN(size);
1149 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1150 next = vma->vm_next;
1152 /* finding a matching vma now does not alter retval */
1153 if ((vma->vm_ops == &shm_vm_ops) &&
1154 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1156 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1160 up_write(&mm->mmap_sem);
1164 #ifdef CONFIG_PROC_FS
1165 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1167 struct shmid_kernel *shp = it;
1170 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
1171 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
1173 if (sizeof(size_t) <= sizeof(int))
1174 format = SMALL_STRING;
1176 format = BIG_STRING;
1177 return seq_printf(s, format,