2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/pipe_fs_i.h>
28 #include <linux/pfn.h>
30 #include <asm/uaccess.h>
34 # include <linux/efi.h>
38 * Architectures vary in how they handle caching for addresses
39 * outside of main memory.
42 static inline int uncached_access(struct file *file, unsigned long addr)
46 * On the PPro and successors, the MTRRs are used to set
47 * memory types for physical addresses outside main memory,
48 * so blindly setting PCD or PWT on those pages is wrong.
49 * For Pentiums and earlier, the surround logic should disable
50 * caching for the high addresses through the KEN pin, but
51 * we maintain the tradition of paranoia in this code.
53 if (file->f_flags & O_SYNC)
55 return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
56 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
57 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
58 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
59 && addr >= __pa(high_memory);
60 #elif defined(__x86_64__)
62 * This is broken because it can generate memory type aliases,
63 * which can cause cache corruptions
64 * But it is only available for root and we have to be bug-to-bug
65 * compatible with i386.
67 if (file->f_flags & O_SYNC)
69 /* same behaviour as i386. PAT always set to cached and MTRRs control the
71 Hopefully a full PAT implementation will fix that soon. */
73 #elif defined(CONFIG_IA64)
75 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
77 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
80 * Accessing memory above the top the kernel knows about or through a file pointer
81 * that was marked O_SYNC will be done non-cached.
83 if (file->f_flags & O_SYNC)
85 return addr >= __pa(high_memory);
89 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
90 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
92 if (addr + count > __pa(high_memory))
98 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
105 * This funcion reads the *physical* memory. The f_pos points directly to the
108 static ssize_t read_mem(struct file * file, char __user * buf,
109 size_t count, loff_t *ppos)
111 unsigned long p = *ppos;
115 if (!valid_phys_addr_range(p, count))
118 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
119 /* we don't have page 0 mapped on sparc and m68k.. */
125 if (clear_user(buf, sz))
137 * Handle first page in case it's not aligned
139 if (-p & (PAGE_SIZE - 1))
140 sz = -p & (PAGE_SIZE - 1);
144 sz = min_t(unsigned long, sz, count);
147 * On ia64 if a page has been mapped somewhere as
148 * uncached, then it must also be accessed uncached
149 * by the kernel or data corruption may occur
151 ptr = xlate_dev_mem_ptr(p);
153 if (copy_to_user(buf, ptr, sz))
165 static ssize_t write_mem(struct file * file, const char __user * buf,
166 size_t count, loff_t *ppos)
168 unsigned long p = *ppos;
170 unsigned long copied;
173 if (!valid_phys_addr_range(p, count))
178 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
179 /* we don't have page 0 mapped on sparc and m68k.. */
181 unsigned long sz = PAGE_SIZE - p;
184 /* Hmm. Do something? */
194 * Handle first page in case it's not aligned
196 if (-p & (PAGE_SIZE - 1))
197 sz = -p & (PAGE_SIZE - 1);
201 sz = min_t(unsigned long, sz, count);
204 * On ia64 if a page has been mapped somewhere as
205 * uncached, then it must also be accessed uncached
206 * by the kernel or data corruption may occur
208 ptr = xlate_dev_mem_ptr(p);
210 copied = copy_from_user(ptr, buf, sz);
212 written += sz - copied;
227 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
228 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
229 unsigned long size, pgprot_t vma_prot)
231 #ifdef pgprot_noncached
232 unsigned long offset = pfn << PAGE_SHIFT;
234 if (uncached_access(file, offset))
235 return pgprot_noncached(vma_prot);
242 static unsigned long get_unmapped_area_mem(struct file *file,
248 if (!valid_mmap_phys_addr_range(pgoff, len))
249 return (unsigned long) -EINVAL;
250 return pgoff << PAGE_SHIFT;
253 /* can't do an in-place private mapping if there's no MMU */
254 static inline int private_mapping_ok(struct vm_area_struct *vma)
256 return vma->vm_flags & VM_MAYSHARE;
259 #define get_unmapped_area_mem NULL
261 static inline int private_mapping_ok(struct vm_area_struct *vma)
267 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
269 size_t size = vma->vm_end - vma->vm_start;
271 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
274 if (!private_mapping_ok(vma))
277 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
281 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
282 if (remap_pfn_range(vma,
291 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
295 /* Turn a kernel-virtual address into a physical page frame */
296 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
299 * RED-PEN: on some architectures there is more mapped memory
300 * than available in mem_map which pfn_valid checks
301 * for. Perhaps should add a new macro here.
303 * RED-PEN: vmalloc is not supported right now.
309 return mmap_mem(file, vma);
312 #ifdef CONFIG_CRASH_DUMP
314 * Read memory corresponding to the old kernel.
316 static ssize_t read_oldmem(struct file *file, char __user *buf,
317 size_t count, loff_t *ppos)
319 unsigned long pfn, offset;
320 size_t read = 0, csize;
324 pfn = *ppos / PAGE_SIZE;
325 if (pfn > saved_max_pfn)
328 offset = (unsigned long)(*ppos % PAGE_SIZE);
329 if (count > PAGE_SIZE - offset)
330 csize = PAGE_SIZE - offset;
334 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
346 extern long vread(char *buf, char *addr, unsigned long count);
347 extern long vwrite(char *buf, char *addr, unsigned long count);
350 * This function reads the *virtual* memory as seen by the kernel.
352 static ssize_t read_kmem(struct file *file, char __user *buf,
353 size_t count, loff_t *ppos)
355 unsigned long p = *ppos;
356 ssize_t low_count, read, sz;
357 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
360 if (p < (unsigned long) high_memory) {
362 if (count > (unsigned long) high_memory - p)
363 low_count = (unsigned long) high_memory - p;
365 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
366 /* we don't have page 0 mapped on sparc and m68k.. */
367 if (p < PAGE_SIZE && low_count > 0) {
368 size_t tmp = PAGE_SIZE - p;
369 if (tmp > low_count) tmp = low_count;
370 if (clear_user(buf, tmp))
379 while (low_count > 0) {
381 * Handle first page in case it's not aligned
383 if (-p & (PAGE_SIZE - 1))
384 sz = -p & (PAGE_SIZE - 1);
388 sz = min_t(unsigned long, sz, low_count);
391 * On ia64 if a page has been mapped somewhere as
392 * uncached, then it must also be accessed uncached
393 * by the kernel or data corruption may occur
395 kbuf = xlate_dev_kmem_ptr((char *)p);
397 if (copy_to_user(buf, kbuf, sz))
408 kbuf = (char *)__get_free_page(GFP_KERNEL);
416 len = vread(kbuf, (char *)p, len);
419 if (copy_to_user(buf, kbuf, len)) {
420 free_page((unsigned long)kbuf);
428 free_page((unsigned long)kbuf);
435 static inline ssize_t
436 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
437 size_t count, loff_t *ppos)
440 unsigned long copied;
443 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
444 /* we don't have page 0 mapped on sparc and m68k.. */
445 if (realp < PAGE_SIZE) {
446 unsigned long sz = PAGE_SIZE - realp;
449 /* Hmm. Do something? */
461 * Handle first page in case it's not aligned
463 if (-realp & (PAGE_SIZE - 1))
464 sz = -realp & (PAGE_SIZE - 1);
468 sz = min_t(unsigned long, sz, count);
471 * On ia64 if a page has been mapped somewhere as
472 * uncached, then it must also be accessed uncached
473 * by the kernel or data corruption may occur
475 ptr = xlate_dev_kmem_ptr(p);
477 copied = copy_from_user(ptr, buf, sz);
479 written += sz - copied;
497 * This function writes to the *virtual* memory as seen by the kernel.
499 static ssize_t write_kmem(struct file * file, const char __user * buf,
500 size_t count, loff_t *ppos)
502 unsigned long p = *ppos;
506 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
508 if (p < (unsigned long) high_memory) {
511 if (count > (unsigned long) high_memory - p)
512 wrote = (unsigned long) high_memory - p;
514 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
515 if (written != wrote)
524 kbuf = (char *)__get_free_page(GFP_KERNEL);
526 return wrote ? wrote : -ENOMEM;
533 written = copy_from_user(kbuf, buf, len);
537 free_page((unsigned long)kbuf);
541 len = vwrite(kbuf, (char *)p, len);
547 free_page((unsigned long)kbuf);
551 return virtr + wrote;
554 #ifdef CONFIG_DEVPORT
555 static ssize_t read_port(struct file * file, char __user * buf,
556 size_t count, loff_t *ppos)
558 unsigned long i = *ppos;
559 char __user *tmp = buf;
561 if (!access_ok(VERIFY_WRITE, buf, count))
563 while (count-- > 0 && i < 65536) {
564 if (__put_user(inb(i),tmp) < 0)
573 static ssize_t write_port(struct file * file, const char __user * buf,
574 size_t count, loff_t *ppos)
576 unsigned long i = *ppos;
577 const char __user * tmp = buf;
579 if (!access_ok(VERIFY_READ,buf,count))
581 while (count-- > 0 && i < 65536) {
583 if (__get_user(c, tmp)) {
597 static ssize_t read_null(struct file * file, char __user * buf,
598 size_t count, loff_t *ppos)
603 static ssize_t write_null(struct file * file, const char __user * buf,
604 size_t count, loff_t *ppos)
609 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
610 struct splice_desc *sd)
615 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
616 loff_t *ppos, size_t len, unsigned int flags)
618 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
623 * For fun, we are using the MMU for this.
625 static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
627 struct mm_struct *mm;
628 struct vm_area_struct * vma;
629 unsigned long addr=(unsigned long)buf;
632 /* Oops, this was forgotten before. -ben */
633 down_read(&mm->mmap_sem);
635 /* For private mappings, just map in zero pages. */
636 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
639 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
641 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
643 count = vma->vm_end - addr;
647 zap_page_range(vma, addr, count, NULL);
648 if (zeromap_page_range(vma, addr, count, PAGE_COPY))
658 up_read(&mm->mmap_sem);
660 /* The shared case is hard. Let's do the conventional zeroing. */
662 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
664 return size + unwritten - PAGE_SIZE;
672 up_read(&mm->mmap_sem);
676 static ssize_t read_zero(struct file * file, char __user * buf,
677 size_t count, loff_t *ppos)
679 unsigned long left, unwritten, written = 0;
684 if (!access_ok(VERIFY_WRITE, buf, count))
689 /* do we want to be clever? Arbitrary cut-off */
690 if (count >= PAGE_SIZE*4) {
691 unsigned long partial;
693 /* How much left of the page? */
694 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
695 unwritten = clear_user(buf, partial);
696 written = partial - unwritten;
701 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
702 written += (left & PAGE_MASK) - unwritten;
705 buf += left & PAGE_MASK;
708 unwritten = clear_user(buf, left);
709 written += left - unwritten;
711 return written ? written : -EFAULT;
714 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
718 if (vma->vm_flags & VM_SHARED)
719 return shmem_zero_setup(vma);
720 err = zeromap_page_range(vma, vma->vm_start,
721 vma->vm_end - vma->vm_start, vma->vm_page_prot);
722 BUG_ON(err == -EEXIST);
725 #else /* CONFIG_MMU */
726 static ssize_t read_zero(struct file * file, char * buf,
727 size_t count, loff_t *ppos)
735 chunk = 4096; /* Just for latency reasons */
736 if (clear_user(buf, chunk))
745 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
749 #endif /* CONFIG_MMU */
751 static ssize_t write_full(struct file * file, const char __user * buf,
752 size_t count, loff_t *ppos)
758 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
759 * can fopen() both devices with "a" now. This was previously impossible.
763 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
765 return file->f_pos = 0;
769 * The memory devices use the full 32/64 bits of the offset, and so we cannot
770 * check against negative addresses: they are ok. The return value is weird,
771 * though, in that case (0).
773 * also note that seeking relative to the "end of file" isn't supported:
774 * it has no meaning, so it returns -EINVAL.
776 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
780 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
783 file->f_pos = offset;
785 force_successful_syscall_return();
788 file->f_pos += offset;
790 force_successful_syscall_return();
795 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
799 static int open_port(struct inode * inode, struct file * filp)
801 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
804 #define zero_lseek null_lseek
805 #define full_lseek null_lseek
806 #define write_zero write_null
807 #define read_full read_zero
808 #define open_mem open_port
809 #define open_kmem open_mem
810 #define open_oldmem open_mem
812 static const struct file_operations mem_fops = {
813 .llseek = memory_lseek,
818 .get_unmapped_area = get_unmapped_area_mem,
821 static const struct file_operations kmem_fops = {
822 .llseek = memory_lseek,
827 .get_unmapped_area = get_unmapped_area_mem,
830 static const struct file_operations null_fops = {
831 .llseek = null_lseek,
834 .splice_write = splice_write_null,
837 #ifdef CONFIG_DEVPORT
838 static const struct file_operations port_fops = {
839 .llseek = memory_lseek,
846 static const struct file_operations zero_fops = {
847 .llseek = zero_lseek,
854 * capabilities for /dev/zero
855 * - permits private mappings, "copies" are taken of the source of zeros
857 static struct backing_dev_info zero_bdi = {
858 .capabilities = BDI_CAP_MAP_COPY,
861 static const struct file_operations full_fops = {
862 .llseek = full_lseek,
867 #ifdef CONFIG_CRASH_DUMP
868 static const struct file_operations oldmem_fops = {
874 static ssize_t kmsg_write(struct file * file, const char __user * buf,
875 size_t count, loff_t *ppos)
880 tmp = kmalloc(count + 1, GFP_KERNEL);
884 if (!copy_from_user(tmp, buf, count)) {
886 ret = printk("%s", tmp);
888 /* printk can add a prefix */
895 static const struct file_operations kmsg_fops = {
899 static int memory_open(struct inode * inode, struct file * filp)
901 switch (iminor(inode)) {
903 filp->f_op = &mem_fops;
904 filp->f_mapping->backing_dev_info =
905 &directly_mappable_cdev_bdi;
908 filp->f_op = &kmem_fops;
909 filp->f_mapping->backing_dev_info =
910 &directly_mappable_cdev_bdi;
913 filp->f_op = &null_fops;
915 #ifdef CONFIG_DEVPORT
917 filp->f_op = &port_fops;
921 filp->f_mapping->backing_dev_info = &zero_bdi;
922 filp->f_op = &zero_fops;
925 filp->f_op = &full_fops;
928 filp->f_op = &random_fops;
931 filp->f_op = &urandom_fops;
934 filp->f_op = &kmsg_fops;
936 #ifdef CONFIG_CRASH_DUMP
938 filp->f_op = &oldmem_fops;
944 if (filp->f_op && filp->f_op->open)
945 return filp->f_op->open(inode,filp);
949 static const struct file_operations memory_fops = {
950 .open = memory_open, /* just a selector for the real open */
953 static const struct {
957 const struct file_operations *fops;
958 } devlist[] = { /* list of minor devices */
959 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
960 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
961 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
962 #ifdef CONFIG_DEVPORT
963 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
965 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
966 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
967 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
968 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
969 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
970 #ifdef CONFIG_CRASH_DUMP
971 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
975 static struct class *mem_class;
977 static int __init chr_dev_init(void)
981 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
982 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
984 mem_class = class_create(THIS_MODULE, "mem");
985 for (i = 0; i < ARRAY_SIZE(devlist); i++)
986 device_create(mem_class, NULL,
987 MKDEV(MEM_MAJOR, devlist[i].minor),
993 fs_initcall(chr_dev_init);