3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
18 #include <linux/config.h>
19 #include <linux/slab.h>
21 #include <linux/hugetlb.h>
22 #include <linux/shm.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/mman.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/security.h>
28 #include <linux/syscalls.h>
29 #include <linux/audit.h>
30 #include <linux/capability.h>
31 #include <linux/ptrace.h>
32 #include <linux/seq_file.h>
33 #include <linux/mutex.h>
35 #include <asm/uaccess.h>
39 static struct file_operations shm_file_operations;
40 static struct vm_operations_struct shm_vm_ops;
42 static struct ipc_ids shm_ids;
44 #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
45 #define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
46 #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
47 #define shm_buildid(id, seq) \
48 ipc_buildid(&shm_ids, id, seq)
50 static int newseg (key_t key, int shmflg, size_t size);
51 static void shm_open (struct vm_area_struct *shmd);
52 static void shm_close (struct vm_area_struct *shmd);
54 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
57 size_t shm_ctlmax = SHMMAX;
58 size_t shm_ctlall = SHMALL;
59 int shm_ctlmni = SHMMNI;
61 static int shm_tot; /* total number of shared memory pages */
63 void __init shm_init (void)
65 ipc_init_ids(&shm_ids, 1);
66 ipc_init_proc_interface("sysvipc/shm",
67 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
69 sysvipc_shm_proc_show);
72 static inline int shm_checkid(struct shmid_kernel *s, int id)
74 if (ipc_checkid(&shm_ids,&s->shm_perm,id))
79 static inline struct shmid_kernel *shm_rmid(int id)
81 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
84 static inline int shm_addid(struct shmid_kernel *shp)
86 return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni);
91 static inline void shm_inc (int id) {
92 struct shmid_kernel *shp;
96 shp->shm_atim = get_seconds();
97 shp->shm_lprid = current->tgid;
102 /* This is called by fork, once for every shm attach. */
103 static void shm_open (struct vm_area_struct *shmd)
105 shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
109 * shm_destroy - free the struct shmid_kernel
111 * @shp: struct to free
113 * It has to be called with shp and shm_ids.mutex locked,
114 * but returns with shp unlocked and freed.
116 static void shm_destroy (struct shmid_kernel *shp)
118 shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
121 if (!is_file_hugepages(shp->shm_file))
122 shmem_lock(shp->shm_file, 0, shp->mlock_user);
124 user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
126 fput (shp->shm_file);
127 security_shm_free(shp);
132 * remove the attach descriptor shmd.
133 * free memory for segment if it is marked destroyed.
134 * The descriptor has already been removed from the current->mm->mmap list
135 * and will later be kfree()d.
137 static void shm_close (struct vm_area_struct *shmd)
139 struct file * file = shmd->vm_file;
140 int id = file->f_dentry->d_inode->i_ino;
141 struct shmid_kernel *shp;
143 mutex_lock(&shm_ids.mutex);
144 /* remove from the list of attaches of the shm segment */
147 shp->shm_lprid = current->tgid;
148 shp->shm_dtim = get_seconds();
150 if(shp->shm_nattch == 0 &&
151 shp->shm_perm.mode & SHM_DEST)
155 mutex_unlock(&shm_ids.mutex);
158 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
162 ret = shmem_mmap(file, vma);
164 vma->vm_ops = &shm_vm_ops;
165 if (!(vma->vm_flags & VM_WRITE))
166 vma->vm_flags &= ~VM_MAYWRITE;
167 shm_inc(file->f_dentry->d_inode->i_ino);
173 static struct file_operations shm_file_operations = {
176 .get_unmapped_area = shmem_get_unmapped_area,
180 static struct vm_operations_struct shm_vm_ops = {
181 .open = shm_open, /* callback for a new vm-area open */
182 .close = shm_close, /* callback for when the vm-area is released */
183 .nopage = shmem_nopage,
184 #if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM)
185 .set_policy = shmem_set_policy,
186 .get_policy = shmem_get_policy,
190 static int newseg (key_t key, int shmflg, size_t size)
193 struct shmid_kernel *shp;
194 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
199 if (size < SHMMIN || size > shm_ctlmax)
202 if (shm_tot + numpages >= shm_ctlall)
205 shp = ipc_rcu_alloc(sizeof(*shp));
209 shp->shm_perm.key = key;
210 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
211 shp->mlock_user = NULL;
213 shp->shm_perm.security = NULL;
214 error = security_shm_alloc(shp);
220 if (shmflg & SHM_HUGETLB) {
221 /* hugetlb_zero_setup takes care of mlock user accounting */
222 file = hugetlb_zero_setup(size);
223 shp->mlock_user = current->user;
225 int acctflag = VM_ACCOUNT;
227 * Do not allow no accounting for OVERCOMMIT_NEVER, even
230 if ((shmflg & SHM_NORESERVE) &&
231 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
233 sprintf (name, "SYSV%08x", key);
234 file = shmem_file_setup(name, size, acctflag);
236 error = PTR_ERR(file);
245 shp->shm_cprid = current->tgid;
247 shp->shm_atim = shp->shm_dtim = 0;
248 shp->shm_ctim = get_seconds();
249 shp->shm_segsz = size;
251 shp->id = shm_buildid(id,shp->shm_perm.seq);
252 shp->shm_file = file;
253 file->f_dentry->d_inode->i_ino = shp->id;
255 /* Hugetlb ops would have already been assigned. */
256 if (!(shmflg & SHM_HUGETLB))
257 file->f_op = &shm_file_operations;
266 security_shm_free(shp);
271 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
273 struct shmid_kernel *shp;
276 mutex_lock(&shm_ids.mutex);
277 if (key == IPC_PRIVATE) {
278 err = newseg(key, shmflg, size);
279 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
280 if (!(shmflg & IPC_CREAT))
283 err = newseg(key, shmflg, size);
284 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
289 if (shp->shm_segsz < size)
291 else if (ipcperms(&shp->shm_perm, shmflg))
294 int shmid = shm_buildid(id, shp->shm_perm.seq);
295 err = security_shm_associate(shp, shmflg);
301 mutex_unlock(&shm_ids.mutex);
306 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
310 return copy_to_user(buf, in, sizeof(*in));
315 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
316 out.shm_segsz = in->shm_segsz;
317 out.shm_atime = in->shm_atime;
318 out.shm_dtime = in->shm_dtime;
319 out.shm_ctime = in->shm_ctime;
320 out.shm_cpid = in->shm_cpid;
321 out.shm_lpid = in->shm_lpid;
322 out.shm_nattch = in->shm_nattch;
324 return copy_to_user(buf, &out, sizeof(out));
337 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
342 struct shmid64_ds tbuf;
344 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
347 out->uid = tbuf.shm_perm.uid;
348 out->gid = tbuf.shm_perm.gid;
349 out->mode = tbuf.shm_perm.mode;
355 struct shmid_ds tbuf_old;
357 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
360 out->uid = tbuf_old.shm_perm.uid;
361 out->gid = tbuf_old.shm_perm.gid;
362 out->mode = tbuf_old.shm_perm.mode;
371 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
375 return copy_to_user(buf, in, sizeof(*in));
380 if(in->shmmax > INT_MAX)
381 out.shmmax = INT_MAX;
383 out.shmmax = (int)in->shmmax;
385 out.shmmin = in->shmmin;
386 out.shmmni = in->shmmni;
387 out.shmseg = in->shmseg;
388 out.shmall = in->shmall;
390 return copy_to_user(buf, &out, sizeof(out));
397 static void shm_get_stat(unsigned long *rss, unsigned long *swp)
404 for (i = 0; i <= shm_ids.max_id; i++) {
405 struct shmid_kernel *shp;
412 inode = shp->shm_file->f_dentry->d_inode;
414 if (is_file_hugepages(shp->shm_file)) {
415 struct address_space *mapping = inode->i_mapping;
416 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
418 struct shmem_inode_info *info = SHMEM_I(inode);
419 spin_lock(&info->lock);
420 *rss += inode->i_mapping->nrpages;
421 *swp += info->swapped;
422 spin_unlock(&info->lock);
427 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
429 struct shm_setbuf setbuf;
430 struct shmid_kernel *shp;
433 if (cmd < 0 || shmid < 0) {
438 version = ipc_parse_version(&cmd);
440 switch (cmd) { /* replace with proc interface ? */
443 struct shminfo64 shminfo;
445 err = security_shm_shmctl(NULL, cmd);
449 memset(&shminfo,0,sizeof(shminfo));
450 shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
451 shminfo.shmmax = shm_ctlmax;
452 shminfo.shmall = shm_ctlall;
454 shminfo.shmmin = SHMMIN;
455 if(copy_shminfo_to_user (buf, &shminfo, version))
457 /* reading a integer is always atomic */
465 struct shm_info shm_info;
467 err = security_shm_shmctl(NULL, cmd);
471 memset(&shm_info,0,sizeof(shm_info));
472 mutex_lock(&shm_ids.mutex);
473 shm_info.used_ids = shm_ids.in_use;
474 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
475 shm_info.shm_tot = shm_tot;
476 shm_info.swap_attempts = 0;
477 shm_info.swap_successes = 0;
478 err = shm_ids.max_id;
479 mutex_unlock(&shm_ids.mutex);
480 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
485 err = err < 0 ? 0 : err;
491 struct shmid64_ds tbuf;
493 memset(&tbuf, 0, sizeof(tbuf));
494 shp = shm_lock(shmid);
498 } else if(cmd==SHM_STAT) {
500 if (shmid > shm_ids.max_id)
502 result = shm_buildid(shmid, shp->shm_perm.seq);
504 err = shm_checkid(shp,shmid);
510 if (ipcperms (&shp->shm_perm, S_IRUGO))
512 err = security_shm_shmctl(shp, cmd);
515 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
516 tbuf.shm_segsz = shp->shm_segsz;
517 tbuf.shm_atime = shp->shm_atim;
518 tbuf.shm_dtime = shp->shm_dtim;
519 tbuf.shm_ctime = shp->shm_ctim;
520 tbuf.shm_cpid = shp->shm_cprid;
521 tbuf.shm_lpid = shp->shm_lprid;
522 if (!is_file_hugepages(shp->shm_file))
523 tbuf.shm_nattch = shp->shm_nattch;
525 tbuf.shm_nattch = file_count(shp->shm_file) - 1;
527 if(copy_shmid_to_user (buf, &tbuf, version))
536 shp = shm_lock(shmid);
541 err = shm_checkid(shp,shmid);
545 if (!capable(CAP_IPC_LOCK)) {
547 if (current->euid != shp->shm_perm.uid &&
548 current->euid != shp->shm_perm.cuid)
550 if (cmd == SHM_LOCK &&
551 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
555 err = security_shm_shmctl(shp, cmd);
560 struct user_struct * user = current->user;
561 if (!is_file_hugepages(shp->shm_file)) {
562 err = shmem_lock(shp->shm_file, 1, user);
564 shp->shm_perm.mode |= SHM_LOCKED;
565 shp->mlock_user = user;
568 } else if (!is_file_hugepages(shp->shm_file)) {
569 shmem_lock(shp->shm_file, 0, shp->mlock_user);
570 shp->shm_perm.mode &= ~SHM_LOCKED;
571 shp->mlock_user = NULL;
579 * We cannot simply remove the file. The SVID states
580 * that the block remains until the last person
581 * detaches from it, then is deleted. A shmat() on
582 * an RMID segment is legal in older Linux and if
583 * we change it apps break...
585 * Instead we set a destroyed flag, and then blow
586 * the name away when the usage hits zero.
588 mutex_lock(&shm_ids.mutex);
589 shp = shm_lock(shmid);
593 err = shm_checkid(shp, shmid);
597 if (current->euid != shp->shm_perm.uid &&
598 current->euid != shp->shm_perm.cuid &&
599 !capable(CAP_SYS_ADMIN)) {
604 err = security_shm_shmctl(shp, cmd);
608 if (shp->shm_nattch){
609 shp->shm_perm.mode |= SHM_DEST;
610 /* Do not find it any more */
611 shp->shm_perm.key = IPC_PRIVATE;
615 mutex_unlock(&shm_ids.mutex);
621 if (copy_shmid_from_user (&setbuf, buf, version)) {
625 mutex_lock(&shm_ids.mutex);
626 shp = shm_lock(shmid);
630 if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid,
631 setbuf.mode, &(shp->shm_perm))))
633 err = shm_checkid(shp,shmid);
637 if (current->euid != shp->shm_perm.uid &&
638 current->euid != shp->shm_perm.cuid &&
639 !capable(CAP_SYS_ADMIN)) {
643 err = security_shm_shmctl(shp, cmd);
647 shp->shm_perm.uid = setbuf.uid;
648 shp->shm_perm.gid = setbuf.gid;
649 shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
650 | (setbuf.mode & S_IRWXUGO);
651 shp->shm_ctim = get_seconds();
664 mutex_unlock(&shm_ids.mutex);
673 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
675 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
676 * "raddr" thing points to kernel space, and there has to be a wrapper around
679 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
681 struct shmid_kernel *shp;
688 unsigned long o_flags;
695 } else if ((addr = (ulong)shmaddr)) {
696 if (addr & (SHMLBA-1)) {
697 if (shmflg & SHM_RND)
698 addr &= ~(SHMLBA-1); /* round down */
700 #ifndef __ARCH_FORCE_SHMLBA
701 if (addr & ~PAGE_MASK)
705 flags = MAP_SHARED | MAP_FIXED;
707 if ((shmflg & SHM_REMAP))
713 if (shmflg & SHM_RDONLY) {
718 prot = PROT_READ | PROT_WRITE;
720 acc_mode = S_IRUGO | S_IWUGO;
722 if (shmflg & SHM_EXEC) {
728 * We cannot rely on the fs check since SYSV IPC does have an
729 * additional creator id...
731 shp = shm_lock(shmid);
736 err = shm_checkid(shp,shmid);
741 if (ipcperms(&shp->shm_perm, acc_mode)) {
747 err = security_shm_shmat(shp, shmaddr, shmflg);
753 file = shp->shm_file;
754 size = i_size_read(file->f_dentry->d_inode);
758 down_write(¤t->mm->mmap_sem);
759 if (addr && !(shmflg & SHM_REMAP)) {
760 user_addr = ERR_PTR(-EINVAL);
761 if (find_vma_intersection(current->mm, addr, addr + size))
764 * If shm segment goes below stack, make sure there is some
765 * space left for the stack to grow (at least 4 pages).
767 if (addr < current->mm->start_stack &&
768 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
772 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
775 up_write(¤t->mm->mmap_sem);
777 mutex_lock(&shm_ids.mutex);
778 shp = shm_lock(shmid);
781 if(shp->shm_nattch == 0 &&
782 shp->shm_perm.mode & SHM_DEST)
786 mutex_unlock(&shm_ids.mutex);
788 *raddr = (unsigned long) user_addr;
790 if (IS_ERR(user_addr))
791 err = PTR_ERR(user_addr);
796 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
801 err = do_shmat(shmid, shmaddr, shmflg, &ret);
804 force_successful_syscall_return();
809 * detach and kill segment if marked destroyed.
810 * The work is done in shm_close.
812 asmlinkage long sys_shmdt(char __user *shmaddr)
814 struct mm_struct *mm = current->mm;
815 struct vm_area_struct *vma, *next;
816 unsigned long addr = (unsigned long)shmaddr;
818 int retval = -EINVAL;
820 if (addr & ~PAGE_MASK)
823 down_write(&mm->mmap_sem);
826 * This function tries to be smart and unmap shm segments that
827 * were modified by partial mlock or munmap calls:
828 * - It first determines the size of the shm segment that should be
829 * unmapped: It searches for a vma that is backed by shm and that
830 * started at address shmaddr. It records it's size and then unmaps
832 * - Then it unmaps all shm vmas that started at shmaddr and that
833 * are within the initially determined size.
834 * Errors from do_munmap are ignored: the function only fails if
835 * it's called with invalid parameters or if it's called to unmap
836 * a part of a vma. Both calls in this function are for full vmas,
837 * the parameters are directly copied from the vma itself and always
838 * valid - therefore do_munmap cannot fail. (famous last words?)
841 * If it had been mremap()'d, the starting address would not
842 * match the usual checks anyway. So assume all vma's are
843 * above the starting address given.
845 vma = find_vma(mm, addr);
851 * Check if the starting address would match, i.e. it's
852 * a fragment created by mprotect() and/or munmap(), or it
853 * otherwise it starts at this address with no hassles.
855 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
856 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
859 size = vma->vm_file->f_dentry->d_inode->i_size;
860 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
862 * We discovered the size of the shm segment, so
863 * break out of here and fall through to the next
864 * loop that uses the size information to stop
865 * searching for matching vma's.
875 * We need look no further than the maximum address a fragment
876 * could possibly have landed at. Also cast things to loff_t to
877 * prevent overflows and make comparisions vs. equal-width types.
879 size = PAGE_ALIGN(size);
880 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
883 /* finding a matching vma now does not alter retval */
884 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
885 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
887 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
891 up_write(&mm->mmap_sem);
895 #ifdef CONFIG_PROC_FS
896 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
898 struct shmid_kernel *shp = it;
901 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
902 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
904 if (sizeof(size_t) <= sizeof(int))
905 format = SMALL_STRING;
908 return seq_printf(s, format,
915 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch,