[SCTP]: Set chunk->data_accepted only if we are going to accept it.
[linux-2.6] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support. 
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/smp_lock.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/crash_dump.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bootmem.h>
28 #include <linux/pipe_fs_i.h>
29
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 /*
38  * Architectures vary in how they handle caching for addresses
39  * outside of main memory.
40  *
41  */
42 static inline int uncached_access(struct file *file, unsigned long addr)
43 {
44 #if defined(__i386__)
45         /*
46          * On the PPro and successors, the MTRRs are used to set
47          * memory types for physical addresses outside main memory,
48          * so blindly setting PCD or PWT on those pages is wrong.
49          * For Pentiums and earlier, the surround logic should disable
50          * caching for the high addresses through the KEN pin, but
51          * we maintain the tradition of paranoia in this code.
52          */
53         if (file->f_flags & O_SYNC)
54                 return 1;
55         return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
56                   test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
57                   test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
58                   test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
59           && addr >= __pa(high_memory);
60 #elif defined(__x86_64__)
61         /* 
62          * This is broken because it can generate memory type aliases,
63          * which can cause cache corruptions
64          * But it is only available for root and we have to be bug-to-bug
65          * compatible with i386.
66          */
67         if (file->f_flags & O_SYNC)
68                 return 1;
69         /* same behaviour as i386. PAT always set to cached and MTRRs control the
70            caching behaviour. 
71            Hopefully a full PAT implementation will fix that soon. */      
72         return 0;
73 #elif defined(CONFIG_IA64)
74         /*
75          * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
76          */
77         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
78 #else
79         /*
80          * Accessing memory above the top the kernel knows about or through a file pointer
81          * that was marked O_SYNC will be done non-cached.
82          */
83         if (file->f_flags & O_SYNC)
84                 return 1;
85         return addr >= __pa(high_memory);
86 #endif
87 }
88
89 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
90 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
91 {
92         if (addr + count > __pa(high_memory))
93                 return 0;
94
95         return 1;
96 }
97
98 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
99 {
100         return 1;
101 }
102 #endif
103
104 /*
105  * This funcion reads the *physical* memory. The f_pos points directly to the 
106  * memory location. 
107  */
108 static ssize_t read_mem(struct file * file, char __user * buf,
109                         size_t count, loff_t *ppos)
110 {
111         unsigned long p = *ppos;
112         ssize_t read, sz;
113         char *ptr;
114
115         if (!valid_phys_addr_range(p, count))
116                 return -EFAULT;
117         read = 0;
118 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
119         /* we don't have page 0 mapped on sparc and m68k.. */
120         if (p < PAGE_SIZE) {
121                 sz = PAGE_SIZE - p;
122                 if (sz > count) 
123                         sz = count; 
124                 if (sz > 0) {
125                         if (clear_user(buf, sz))
126                                 return -EFAULT;
127                         buf += sz; 
128                         p += sz; 
129                         count -= sz; 
130                         read += sz; 
131                 }
132         }
133 #endif
134
135         while (count > 0) {
136                 /*
137                  * Handle first page in case it's not aligned
138                  */
139                 if (-p & (PAGE_SIZE - 1))
140                         sz = -p & (PAGE_SIZE - 1);
141                 else
142                         sz = PAGE_SIZE;
143
144                 sz = min_t(unsigned long, sz, count);
145
146                 /*
147                  * On ia64 if a page has been mapped somewhere as
148                  * uncached, then it must also be accessed uncached
149                  * by the kernel or data corruption may occur
150                  */
151                 ptr = xlate_dev_mem_ptr(p);
152
153                 if (copy_to_user(buf, ptr, sz))
154                         return -EFAULT;
155                 buf += sz;
156                 p += sz;
157                 count -= sz;
158                 read += sz;
159         }
160
161         *ppos += read;
162         return read;
163 }
164
165 static ssize_t write_mem(struct file * file, const char __user * buf, 
166                          size_t count, loff_t *ppos)
167 {
168         unsigned long p = *ppos;
169         ssize_t written, sz;
170         unsigned long copied;
171         void *ptr;
172
173         if (!valid_phys_addr_range(p, count))
174                 return -EFAULT;
175
176         written = 0;
177
178 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
179         /* we don't have page 0 mapped on sparc and m68k.. */
180         if (p < PAGE_SIZE) {
181                 unsigned long sz = PAGE_SIZE - p;
182                 if (sz > count)
183                         sz = count;
184                 /* Hmm. Do something? */
185                 buf += sz;
186                 p += sz;
187                 count -= sz;
188                 written += sz;
189         }
190 #endif
191
192         while (count > 0) {
193                 /*
194                  * Handle first page in case it's not aligned
195                  */
196                 if (-p & (PAGE_SIZE - 1))
197                         sz = -p & (PAGE_SIZE - 1);
198                 else
199                         sz = PAGE_SIZE;
200
201                 sz = min_t(unsigned long, sz, count);
202
203                 /*
204                  * On ia64 if a page has been mapped somewhere as
205                  * uncached, then it must also be accessed uncached
206                  * by the kernel or data corruption may occur
207                  */
208                 ptr = xlate_dev_mem_ptr(p);
209
210                 copied = copy_from_user(ptr, buf, sz);
211                 if (copied) {
212                         written += sz - copied;
213                         if (written)
214                                 break;
215                         return -EFAULT;
216                 }
217                 buf += sz;
218                 p += sz;
219                 count -= sz;
220                 written += sz;
221         }
222
223         *ppos += written;
224         return written;
225 }
226
227 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
228 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
229                                      unsigned long size, pgprot_t vma_prot)
230 {
231 #ifdef pgprot_noncached
232         unsigned long offset = pfn << PAGE_SHIFT;
233
234         if (uncached_access(file, offset))
235                 return pgprot_noncached(vma_prot);
236 #endif
237         return vma_prot;
238 }
239 #endif
240
241 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
242 {
243         size_t size = vma->vm_end - vma->vm_start;
244
245         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
246                 return -EINVAL;
247
248         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
249                                                  size,
250                                                  vma->vm_page_prot);
251
252         /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
253         if (remap_pfn_range(vma,
254                             vma->vm_start,
255                             vma->vm_pgoff,
256                             size,
257                             vma->vm_page_prot))
258                 return -EAGAIN;
259         return 0;
260 }
261
262 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
263 {
264         unsigned long pfn;
265
266         /* Turn a kernel-virtual address into a physical page frame */
267         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
268
269         /*
270          * RED-PEN: on some architectures there is more mapped memory
271          * than available in mem_map which pfn_valid checks
272          * for. Perhaps should add a new macro here.
273          *
274          * RED-PEN: vmalloc is not supported right now.
275          */
276         if (!pfn_valid(pfn))
277                 return -EIO;
278
279         vma->vm_pgoff = pfn;
280         return mmap_mem(file, vma);
281 }
282
283 #ifdef CONFIG_CRASH_DUMP
284 /*
285  * Read memory corresponding to the old kernel.
286  */
287 static ssize_t read_oldmem(struct file *file, char __user *buf,
288                                 size_t count, loff_t *ppos)
289 {
290         unsigned long pfn, offset;
291         size_t read = 0, csize;
292         int rc = 0;
293
294         while (count) {
295                 pfn = *ppos / PAGE_SIZE;
296                 if (pfn > saved_max_pfn)
297                         return read;
298
299                 offset = (unsigned long)(*ppos % PAGE_SIZE);
300                 if (count > PAGE_SIZE - offset)
301                         csize = PAGE_SIZE - offset;
302                 else
303                         csize = count;
304
305                 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
306                 if (rc < 0)
307                         return rc;
308                 buf += csize;
309                 *ppos += csize;
310                 read += csize;
311                 count -= csize;
312         }
313         return read;
314 }
315 #endif
316
317 extern long vread(char *buf, char *addr, unsigned long count);
318 extern long vwrite(char *buf, char *addr, unsigned long count);
319
320 /*
321  * This function reads the *virtual* memory as seen by the kernel.
322  */
323 static ssize_t read_kmem(struct file *file, char __user *buf, 
324                          size_t count, loff_t *ppos)
325 {
326         unsigned long p = *ppos;
327         ssize_t low_count, read, sz;
328         char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
329
330         read = 0;
331         if (p < (unsigned long) high_memory) {
332                 low_count = count;
333                 if (count > (unsigned long) high_memory - p)
334                         low_count = (unsigned long) high_memory - p;
335
336 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
337                 /* we don't have page 0 mapped on sparc and m68k.. */
338                 if (p < PAGE_SIZE && low_count > 0) {
339                         size_t tmp = PAGE_SIZE - p;
340                         if (tmp > low_count) tmp = low_count;
341                         if (clear_user(buf, tmp))
342                                 return -EFAULT;
343                         buf += tmp;
344                         p += tmp;
345                         read += tmp;
346                         low_count -= tmp;
347                         count -= tmp;
348                 }
349 #endif
350                 while (low_count > 0) {
351                         /*
352                          * Handle first page in case it's not aligned
353                          */
354                         if (-p & (PAGE_SIZE - 1))
355                                 sz = -p & (PAGE_SIZE - 1);
356                         else
357                                 sz = PAGE_SIZE;
358
359                         sz = min_t(unsigned long, sz, low_count);
360
361                         /*
362                          * On ia64 if a page has been mapped somewhere as
363                          * uncached, then it must also be accessed uncached
364                          * by the kernel or data corruption may occur
365                          */
366                         kbuf = xlate_dev_kmem_ptr((char *)p);
367
368                         if (copy_to_user(buf, kbuf, sz))
369                                 return -EFAULT;
370                         buf += sz;
371                         p += sz;
372                         read += sz;
373                         low_count -= sz;
374                         count -= sz;
375                 }
376         }
377
378         if (count > 0) {
379                 kbuf = (char *)__get_free_page(GFP_KERNEL);
380                 if (!kbuf)
381                         return -ENOMEM;
382                 while (count > 0) {
383                         int len = count;
384
385                         if (len > PAGE_SIZE)
386                                 len = PAGE_SIZE;
387                         len = vread(kbuf, (char *)p, len);
388                         if (!len)
389                                 break;
390                         if (copy_to_user(buf, kbuf, len)) {
391                                 free_page((unsigned long)kbuf);
392                                 return -EFAULT;
393                         }
394                         count -= len;
395                         buf += len;
396                         read += len;
397                         p += len;
398                 }
399                 free_page((unsigned long)kbuf);
400         }
401         *ppos = p;
402         return read;
403 }
404
405
406 static inline ssize_t
407 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
408               size_t count, loff_t *ppos)
409 {
410         ssize_t written, sz;
411         unsigned long copied;
412
413         written = 0;
414 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
415         /* we don't have page 0 mapped on sparc and m68k.. */
416         if (realp < PAGE_SIZE) {
417                 unsigned long sz = PAGE_SIZE - realp;
418                 if (sz > count)
419                         sz = count;
420                 /* Hmm. Do something? */
421                 buf += sz;
422                 p += sz;
423                 realp += sz;
424                 count -= sz;
425                 written += sz;
426         }
427 #endif
428
429         while (count > 0) {
430                 char *ptr;
431                 /*
432                  * Handle first page in case it's not aligned
433                  */
434                 if (-realp & (PAGE_SIZE - 1))
435                         sz = -realp & (PAGE_SIZE - 1);
436                 else
437                         sz = PAGE_SIZE;
438
439                 sz = min_t(unsigned long, sz, count);
440
441                 /*
442                  * On ia64 if a page has been mapped somewhere as
443                  * uncached, then it must also be accessed uncached
444                  * by the kernel or data corruption may occur
445                  */
446                 ptr = xlate_dev_kmem_ptr(p);
447
448                 copied = copy_from_user(ptr, buf, sz);
449                 if (copied) {
450                         written += sz - copied;
451                         if (written)
452                                 break;
453                         return -EFAULT;
454                 }
455                 buf += sz;
456                 p += sz;
457                 realp += sz;
458                 count -= sz;
459                 written += sz;
460         }
461
462         *ppos += written;
463         return written;
464 }
465
466
467 /*
468  * This function writes to the *virtual* memory as seen by the kernel.
469  */
470 static ssize_t write_kmem(struct file * file, const char __user * buf, 
471                           size_t count, loff_t *ppos)
472 {
473         unsigned long p = *ppos;
474         ssize_t wrote = 0;
475         ssize_t virtr = 0;
476         ssize_t written;
477         char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
478
479         if (p < (unsigned long) high_memory) {
480
481                 wrote = count;
482                 if (count > (unsigned long) high_memory - p)
483                         wrote = (unsigned long) high_memory - p;
484
485                 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
486                 if (written != wrote)
487                         return written;
488                 wrote = written;
489                 p += wrote;
490                 buf += wrote;
491                 count -= wrote;
492         }
493
494         if (count > 0) {
495                 kbuf = (char *)__get_free_page(GFP_KERNEL);
496                 if (!kbuf)
497                         return wrote ? wrote : -ENOMEM;
498                 while (count > 0) {
499                         int len = count;
500
501                         if (len > PAGE_SIZE)
502                                 len = PAGE_SIZE;
503                         if (len) {
504                                 written = copy_from_user(kbuf, buf, len);
505                                 if (written) {
506                                         if (wrote + virtr)
507                                                 break;
508                                         free_page((unsigned long)kbuf);
509                                         return -EFAULT;
510                                 }
511                         }
512                         len = vwrite(kbuf, (char *)p, len);
513                         count -= len;
514                         buf += len;
515                         virtr += len;
516                         p += len;
517                 }
518                 free_page((unsigned long)kbuf);
519         }
520
521         *ppos = p;
522         return virtr + wrote;
523 }
524
525 #if defined(CONFIG_ISA) || !defined(__mc68000__)
526 static ssize_t read_port(struct file * file, char __user * buf,
527                          size_t count, loff_t *ppos)
528 {
529         unsigned long i = *ppos;
530         char __user *tmp = buf;
531
532         if (!access_ok(VERIFY_WRITE, buf, count))
533                 return -EFAULT; 
534         while (count-- > 0 && i < 65536) {
535                 if (__put_user(inb(i),tmp) < 0) 
536                         return -EFAULT;  
537                 i++;
538                 tmp++;
539         }
540         *ppos = i;
541         return tmp-buf;
542 }
543
544 static ssize_t write_port(struct file * file, const char __user * buf,
545                           size_t count, loff_t *ppos)
546 {
547         unsigned long i = *ppos;
548         const char __user * tmp = buf;
549
550         if (!access_ok(VERIFY_READ,buf,count))
551                 return -EFAULT;
552         while (count-- > 0 && i < 65536) {
553                 char c;
554                 if (__get_user(c, tmp)) {
555                         if (tmp > buf)
556                                 break;
557                         return -EFAULT; 
558                 }
559                 outb(c,i);
560                 i++;
561                 tmp++;
562         }
563         *ppos = i;
564         return tmp-buf;
565 }
566 #endif
567
568 static ssize_t read_null(struct file * file, char __user * buf,
569                          size_t count, loff_t *ppos)
570 {
571         return 0;
572 }
573
574 static ssize_t write_null(struct file * file, const char __user * buf,
575                           size_t count, loff_t *ppos)
576 {
577         return count;
578 }
579
580 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
581                         struct splice_desc *sd)
582 {
583         return sd->len;
584 }
585
586 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
587                                  loff_t *ppos, size_t len, unsigned int flags)
588 {
589         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
590 }
591
592 #ifdef CONFIG_MMU
593 /*
594  * For fun, we are using the MMU for this.
595  */
596 static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
597 {
598         struct mm_struct *mm;
599         struct vm_area_struct * vma;
600         unsigned long addr=(unsigned long)buf;
601
602         mm = current->mm;
603         /* Oops, this was forgotten before. -ben */
604         down_read(&mm->mmap_sem);
605
606         /* For private mappings, just map in zero pages. */
607         for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
608                 unsigned long count;
609
610                 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
611                         goto out_up;
612                 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
613                         break;
614                 count = vma->vm_end - addr;
615                 if (count > size)
616                         count = size;
617
618                 zap_page_range(vma, addr, count, NULL);
619                 zeromap_page_range(vma, addr, count, PAGE_COPY);
620
621                 size -= count;
622                 buf += count;
623                 addr += count;
624                 if (size == 0)
625                         goto out_up;
626         }
627
628         up_read(&mm->mmap_sem);
629         
630         /* The shared case is hard. Let's do the conventional zeroing. */ 
631         do {
632                 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
633                 if (unwritten)
634                         return size + unwritten - PAGE_SIZE;
635                 cond_resched();
636                 buf += PAGE_SIZE;
637                 size -= PAGE_SIZE;
638         } while (size);
639
640         return size;
641 out_up:
642         up_read(&mm->mmap_sem);
643         return size;
644 }
645
646 static ssize_t read_zero(struct file * file, char __user * buf, 
647                          size_t count, loff_t *ppos)
648 {
649         unsigned long left, unwritten, written = 0;
650
651         if (!count)
652                 return 0;
653
654         if (!access_ok(VERIFY_WRITE, buf, count))
655                 return -EFAULT;
656
657         left = count;
658
659         /* do we want to be clever? Arbitrary cut-off */
660         if (count >= PAGE_SIZE*4) {
661                 unsigned long partial;
662
663                 /* How much left of the page? */
664                 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
665                 unwritten = clear_user(buf, partial);
666                 written = partial - unwritten;
667                 if (unwritten)
668                         goto out;
669                 left -= partial;
670                 buf += partial;
671                 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
672                 written += (left & PAGE_MASK) - unwritten;
673                 if (unwritten)
674                         goto out;
675                 buf += left & PAGE_MASK;
676                 left &= ~PAGE_MASK;
677         }
678         unwritten = clear_user(buf, left);
679         written += left - unwritten;
680 out:
681         return written ? written : -EFAULT;
682 }
683
684 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
685 {
686         if (vma->vm_flags & VM_SHARED)
687                 return shmem_zero_setup(vma);
688         if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
689                 return -EAGAIN;
690         return 0;
691 }
692 #else /* CONFIG_MMU */
693 static ssize_t read_zero(struct file * file, char * buf, 
694                          size_t count, loff_t *ppos)
695 {
696         size_t todo = count;
697
698         while (todo) {
699                 size_t chunk = todo;
700
701                 if (chunk > 4096)
702                         chunk = 4096;   /* Just for latency reasons */
703                 if (clear_user(buf, chunk))
704                         return -EFAULT;
705                 buf += chunk;
706                 todo -= chunk;
707                 cond_resched();
708         }
709         return count;
710 }
711
712 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
713 {
714         return -ENOSYS;
715 }
716 #endif /* CONFIG_MMU */
717
718 static ssize_t write_full(struct file * file, const char __user * buf,
719                           size_t count, loff_t *ppos)
720 {
721         return -ENOSPC;
722 }
723
724 /*
725  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
726  * can fopen() both devices with "a" now.  This was previously impossible.
727  * -- SRB.
728  */
729
730 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
731 {
732         return file->f_pos = 0;
733 }
734
735 /*
736  * The memory devices use the full 32/64 bits of the offset, and so we cannot
737  * check against negative addresses: they are ok. The return value is weird,
738  * though, in that case (0).
739  *
740  * also note that seeking relative to the "end of file" isn't supported:
741  * it has no meaning, so it returns -EINVAL.
742  */
743 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
744 {
745         loff_t ret;
746
747         mutex_lock(&file->f_dentry->d_inode->i_mutex);
748         switch (orig) {
749                 case 0:
750                         file->f_pos = offset;
751                         ret = file->f_pos;
752                         force_successful_syscall_return();
753                         break;
754                 case 1:
755                         file->f_pos += offset;
756                         ret = file->f_pos;
757                         force_successful_syscall_return();
758                         break;
759                 default:
760                         ret = -EINVAL;
761         }
762         mutex_unlock(&file->f_dentry->d_inode->i_mutex);
763         return ret;
764 }
765
766 static int open_port(struct inode * inode, struct file * filp)
767 {
768         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
769 }
770
771 #define zero_lseek      null_lseek
772 #define full_lseek      null_lseek
773 #define write_zero      write_null
774 #define read_full       read_zero
775 #define open_mem        open_port
776 #define open_kmem       open_mem
777 #define open_oldmem     open_mem
778
779 static const struct file_operations mem_fops = {
780         .llseek         = memory_lseek,
781         .read           = read_mem,
782         .write          = write_mem,
783         .mmap           = mmap_mem,
784         .open           = open_mem,
785 };
786
787 static const struct file_operations kmem_fops = {
788         .llseek         = memory_lseek,
789         .read           = read_kmem,
790         .write          = write_kmem,
791         .mmap           = mmap_kmem,
792         .open           = open_kmem,
793 };
794
795 static const struct file_operations null_fops = {
796         .llseek         = null_lseek,
797         .read           = read_null,
798         .write          = write_null,
799         .splice_write   = splice_write_null,
800 };
801
802 #if defined(CONFIG_ISA) || !defined(__mc68000__)
803 static const struct file_operations port_fops = {
804         .llseek         = memory_lseek,
805         .read           = read_port,
806         .write          = write_port,
807         .open           = open_port,
808 };
809 #endif
810
811 static const struct file_operations zero_fops = {
812         .llseek         = zero_lseek,
813         .read           = read_zero,
814         .write          = write_zero,
815         .mmap           = mmap_zero,
816 };
817
818 static struct backing_dev_info zero_bdi = {
819         .capabilities   = BDI_CAP_MAP_COPY,
820 };
821
822 static const struct file_operations full_fops = {
823         .llseek         = full_lseek,
824         .read           = read_full,
825         .write          = write_full,
826 };
827
828 #ifdef CONFIG_CRASH_DUMP
829 static const struct file_operations oldmem_fops = {
830         .read   = read_oldmem,
831         .open   = open_oldmem,
832 };
833 #endif
834
835 static ssize_t kmsg_write(struct file * file, const char __user * buf,
836                           size_t count, loff_t *ppos)
837 {
838         char *tmp;
839         ssize_t ret;
840
841         tmp = kmalloc(count + 1, GFP_KERNEL);
842         if (tmp == NULL)
843                 return -ENOMEM;
844         ret = -EFAULT;
845         if (!copy_from_user(tmp, buf, count)) {
846                 tmp[count] = 0;
847                 ret = printk("%s", tmp);
848                 if (ret > count)
849                         /* printk can add a prefix */
850                         ret = count;
851         }
852         kfree(tmp);
853         return ret;
854 }
855
856 static const struct file_operations kmsg_fops = {
857         .write =        kmsg_write,
858 };
859
860 static int memory_open(struct inode * inode, struct file * filp)
861 {
862         switch (iminor(inode)) {
863                 case 1:
864                         filp->f_op = &mem_fops;
865                         break;
866                 case 2:
867                         filp->f_op = &kmem_fops;
868                         break;
869                 case 3:
870                         filp->f_op = &null_fops;
871                         break;
872 #if defined(CONFIG_ISA) || !defined(__mc68000__)
873                 case 4:
874                         filp->f_op = &port_fops;
875                         break;
876 #endif
877                 case 5:
878                         filp->f_mapping->backing_dev_info = &zero_bdi;
879                         filp->f_op = &zero_fops;
880                         break;
881                 case 7:
882                         filp->f_op = &full_fops;
883                         break;
884                 case 8:
885                         filp->f_op = &random_fops;
886                         break;
887                 case 9:
888                         filp->f_op = &urandom_fops;
889                         break;
890                 case 11:
891                         filp->f_op = &kmsg_fops;
892                         break;
893 #ifdef CONFIG_CRASH_DUMP
894                 case 12:
895                         filp->f_op = &oldmem_fops;
896                         break;
897 #endif
898                 default:
899                         return -ENXIO;
900         }
901         if (filp->f_op && filp->f_op->open)
902                 return filp->f_op->open(inode,filp);
903         return 0;
904 }
905
906 static const struct file_operations memory_fops = {
907         .open           = memory_open,  /* just a selector for the real open */
908 };
909
910 static const struct {
911         unsigned int            minor;
912         char                    *name;
913         umode_t                 mode;
914         const struct file_operations    *fops;
915 } devlist[] = { /* list of minor devices */
916         {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
917         {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
918         {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
919 #if defined(CONFIG_ISA) || !defined(__mc68000__)
920         {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
921 #endif
922         {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
923         {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
924         {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
925         {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
926         {11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
927 #ifdef CONFIG_CRASH_DUMP
928         {12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
929 #endif
930 };
931
932 static struct class *mem_class;
933
934 static int __init chr_dev_init(void)
935 {
936         int i;
937
938         if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
939                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
940
941         mem_class = class_create(THIS_MODULE, "mem");
942         for (i = 0; i < ARRAY_SIZE(devlist); i++)
943                 class_device_create(mem_class, NULL,
944                                         MKDEV(MEM_MAJOR, devlist[i].minor),
945                                         NULL, devlist[i].name);
946         
947         return 0;
948 }
949
950 fs_initcall(chr_dev_init);