devmem: add range_is_allowed() check to mmap of /dev/mem
[linux-2.6.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support. 
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 /*
38  * Architectures vary in how they handle caching for addresses
39  * outside of main memory.
40  *
41  */
42 static inline int uncached_access(struct file *file, unsigned long addr)
43 {
44 #if defined(__i386__) && !defined(__arch_um__)
45         /*
46          * On the PPro and successors, the MTRRs are used to set
47          * memory types for physical addresses outside main memory,
48          * so blindly setting PCD or PWT on those pages is wrong.
49          * For Pentiums and earlier, the surround logic should disable
50          * caching for the high addresses through the KEN pin, but
51          * we maintain the tradition of paranoia in this code.
52          */
53         if (file->f_flags & O_SYNC)
54                 return 1;
55         return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
56                   test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
57                   test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
58                   test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
59           && addr >= __pa(high_memory);
60 #elif defined(__x86_64__) && !defined(__arch_um__)
61         /* 
62          * This is broken because it can generate memory type aliases,
63          * which can cause cache corruptions
64          * But it is only available for root and we have to be bug-to-bug
65          * compatible with i386.
66          */
67         if (file->f_flags & O_SYNC)
68                 return 1;
69         /* same behaviour as i386. PAT always set to cached and MTRRs control the
70            caching behaviour. 
71            Hopefully a full PAT implementation will fix that soon. */      
72         return 0;
73 #elif defined(CONFIG_IA64)
74         /*
75          * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
76          */
77         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
78 #elif defined(CONFIG_MIPS)
79         {
80                 extern int __uncached_access(struct file *file,
81                                              unsigned long addr);
82
83                 return __uncached_access(file, addr);
84         }
85 #else
86         /*
87          * Accessing memory above the top the kernel knows about or through a file pointer
88          * that was marked O_SYNC will be done non-cached.
89          */
90         if (file->f_flags & O_SYNC)
91                 return 1;
92         return addr >= __pa(high_memory);
93 #endif
94 }
95
96 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
97 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
98 {
99         if (addr + count > __pa(high_memory))
100                 return 0;
101
102         return 1;
103 }
104
105 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
106 {
107         return 1;
108 }
109 #endif
110
111 #ifdef CONFIG_NONPROMISC_DEVMEM
112 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
113 {
114         u64 from = ((u64)pfn) << PAGE_SHIFT;
115         u64 to = from + size;
116         u64 cursor = from;
117
118         while (cursor < to) {
119                 if (!devmem_is_allowed(pfn)) {
120                         printk(KERN_INFO
121                 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
122                                 current->comm, from, to);
123                         return 0;
124                 }
125                 cursor += PAGE_SIZE;
126                 pfn++;
127         }
128         return 1;
129 }
130 #else
131 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
132 {
133         return 1;
134 }
135 #endif
136
137 /*
138  * This funcion reads the *physical* memory. The f_pos points directly to the 
139  * memory location. 
140  */
141 static ssize_t read_mem(struct file * file, char __user * buf,
142                         size_t count, loff_t *ppos)
143 {
144         unsigned long p = *ppos;
145         ssize_t read, sz;
146         char *ptr;
147
148         if (!valid_phys_addr_range(p, count))
149                 return -EFAULT;
150         read = 0;
151 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
152         /* we don't have page 0 mapped on sparc and m68k.. */
153         if (p < PAGE_SIZE) {
154                 sz = PAGE_SIZE - p;
155                 if (sz > count) 
156                         sz = count; 
157                 if (sz > 0) {
158                         if (clear_user(buf, sz))
159                                 return -EFAULT;
160                         buf += sz; 
161                         p += sz; 
162                         count -= sz; 
163                         read += sz; 
164                 }
165         }
166 #endif
167
168         while (count > 0) {
169                 /*
170                  * Handle first page in case it's not aligned
171                  */
172                 if (-p & (PAGE_SIZE - 1))
173                         sz = -p & (PAGE_SIZE - 1);
174                 else
175                         sz = PAGE_SIZE;
176
177                 sz = min_t(unsigned long, sz, count);
178
179                 /*
180                  * On ia64 if a page has been mapped somewhere as
181                  * uncached, then it must also be accessed uncached
182                  * by the kernel or data corruption may occur
183                  */
184                 ptr = xlate_dev_mem_ptr(p);
185
186                 if (!range_is_allowed(p >> PAGE_SHIFT, count))
187                         return -EPERM;
188                 if (copy_to_user(buf, ptr, sz))
189                         return -EFAULT;
190                 buf += sz;
191                 p += sz;
192                 count -= sz;
193                 read += sz;
194         }
195
196         *ppos += read;
197         return read;
198 }
199
200 static ssize_t write_mem(struct file * file, const char __user * buf, 
201                          size_t count, loff_t *ppos)
202 {
203         unsigned long p = *ppos;
204         ssize_t written, sz;
205         unsigned long copied;
206         void *ptr;
207
208         if (!valid_phys_addr_range(p, count))
209                 return -EFAULT;
210
211         written = 0;
212
213 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
214         /* we don't have page 0 mapped on sparc and m68k.. */
215         if (p < PAGE_SIZE) {
216                 unsigned long sz = PAGE_SIZE - p;
217                 if (sz > count)
218                         sz = count;
219                 /* Hmm. Do something? */
220                 buf += sz;
221                 p += sz;
222                 count -= sz;
223                 written += sz;
224         }
225 #endif
226
227         while (count > 0) {
228                 /*
229                  * Handle first page in case it's not aligned
230                  */
231                 if (-p & (PAGE_SIZE - 1))
232                         sz = -p & (PAGE_SIZE - 1);
233                 else
234                         sz = PAGE_SIZE;
235
236                 sz = min_t(unsigned long, sz, count);
237
238                 /*
239                  * On ia64 if a page has been mapped somewhere as
240                  * uncached, then it must also be accessed uncached
241                  * by the kernel or data corruption may occur
242                  */
243                 ptr = xlate_dev_mem_ptr(p);
244
245                 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
246                         return -EPERM;
247                 copied = copy_from_user(ptr, buf, sz);
248                 if (copied) {
249                         written += sz - copied;
250                         if (written)
251                                 break;
252                         return -EFAULT;
253                 }
254                 buf += sz;
255                 p += sz;
256                 count -= sz;
257                 written += sz;
258         }
259
260         *ppos += written;
261         return written;
262 }
263
264 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
265 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
266                                      unsigned long size, pgprot_t vma_prot)
267 {
268 #ifdef pgprot_noncached
269         unsigned long offset = pfn << PAGE_SHIFT;
270
271         if (uncached_access(file, offset))
272                 return pgprot_noncached(vma_prot);
273 #endif
274         return vma_prot;
275 }
276 #endif
277
278 #ifndef CONFIG_MMU
279 static unsigned long get_unmapped_area_mem(struct file *file,
280                                            unsigned long addr,
281                                            unsigned long len,
282                                            unsigned long pgoff,
283                                            unsigned long flags)
284 {
285         if (!valid_mmap_phys_addr_range(pgoff, len))
286                 return (unsigned long) -EINVAL;
287         return pgoff << PAGE_SHIFT;
288 }
289
290 /* can't do an in-place private mapping if there's no MMU */
291 static inline int private_mapping_ok(struct vm_area_struct *vma)
292 {
293         return vma->vm_flags & VM_MAYSHARE;
294 }
295 #else
296 #define get_unmapped_area_mem   NULL
297
298 static inline int private_mapping_ok(struct vm_area_struct *vma)
299 {
300         return 1;
301 }
302 #endif
303
304 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
305 {
306         size_t size = vma->vm_end - vma->vm_start;
307
308         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
309                 return -EINVAL;
310
311         if (!private_mapping_ok(vma))
312                 return -ENOSYS;
313
314         if (!range_is_allowed(vma->vm_pgoff, size))
315                 return -EPERM;
316
317         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
318                                                  size,
319                                                  vma->vm_page_prot);
320
321         /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
322         if (remap_pfn_range(vma,
323                             vma->vm_start,
324                             vma->vm_pgoff,
325                             size,
326                             vma->vm_page_prot))
327                 return -EAGAIN;
328         return 0;
329 }
330
331 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
332 {
333         unsigned long pfn;
334
335         /* Turn a kernel-virtual address into a physical page frame */
336         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
337
338         /*
339          * RED-PEN: on some architectures there is more mapped memory
340          * than available in mem_map which pfn_valid checks
341          * for. Perhaps should add a new macro here.
342          *
343          * RED-PEN: vmalloc is not supported right now.
344          */
345         if (!pfn_valid(pfn))
346                 return -EIO;
347
348         vma->vm_pgoff = pfn;
349         return mmap_mem(file, vma);
350 }
351
352 #ifdef CONFIG_CRASH_DUMP
353 /*
354  * Read memory corresponding to the old kernel.
355  */
356 static ssize_t read_oldmem(struct file *file, char __user *buf,
357                                 size_t count, loff_t *ppos)
358 {
359         unsigned long pfn, offset;
360         size_t read = 0, csize;
361         int rc = 0;
362
363         while (count) {
364                 pfn = *ppos / PAGE_SIZE;
365                 if (pfn > saved_max_pfn)
366                         return read;
367
368                 offset = (unsigned long)(*ppos % PAGE_SIZE);
369                 if (count > PAGE_SIZE - offset)
370                         csize = PAGE_SIZE - offset;
371                 else
372                         csize = count;
373
374                 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
375                 if (rc < 0)
376                         return rc;
377                 buf += csize;
378                 *ppos += csize;
379                 read += csize;
380                 count -= csize;
381         }
382         return read;
383 }
384 #endif
385
386 extern long vread(char *buf, char *addr, unsigned long count);
387 extern long vwrite(char *buf, char *addr, unsigned long count);
388
389 /*
390  * This function reads the *virtual* memory as seen by the kernel.
391  */
392 static ssize_t read_kmem(struct file *file, char __user *buf, 
393                          size_t count, loff_t *ppos)
394 {
395         unsigned long p = *ppos;
396         ssize_t low_count, read, sz;
397         char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
398
399         read = 0;
400         if (p < (unsigned long) high_memory) {
401                 low_count = count;
402                 if (count > (unsigned long) high_memory - p)
403                         low_count = (unsigned long) high_memory - p;
404
405 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
406                 /* we don't have page 0 mapped on sparc and m68k.. */
407                 if (p < PAGE_SIZE && low_count > 0) {
408                         size_t tmp = PAGE_SIZE - p;
409                         if (tmp > low_count) tmp = low_count;
410                         if (clear_user(buf, tmp))
411                                 return -EFAULT;
412                         buf += tmp;
413                         p += tmp;
414                         read += tmp;
415                         low_count -= tmp;
416                         count -= tmp;
417                 }
418 #endif
419                 while (low_count > 0) {
420                         /*
421                          * Handle first page in case it's not aligned
422                          */
423                         if (-p & (PAGE_SIZE - 1))
424                                 sz = -p & (PAGE_SIZE - 1);
425                         else
426                                 sz = PAGE_SIZE;
427
428                         sz = min_t(unsigned long, sz, low_count);
429
430                         /*
431                          * On ia64 if a page has been mapped somewhere as
432                          * uncached, then it must also be accessed uncached
433                          * by the kernel or data corruption may occur
434                          */
435                         kbuf = xlate_dev_kmem_ptr((char *)p);
436
437                         if (copy_to_user(buf, kbuf, sz))
438                                 return -EFAULT;
439                         buf += sz;
440                         p += sz;
441                         read += sz;
442                         low_count -= sz;
443                         count -= sz;
444                 }
445         }
446
447         if (count > 0) {
448                 kbuf = (char *)__get_free_page(GFP_KERNEL);
449                 if (!kbuf)
450                         return -ENOMEM;
451                 while (count > 0) {
452                         int len = count;
453
454                         if (len > PAGE_SIZE)
455                                 len = PAGE_SIZE;
456                         len = vread(kbuf, (char *)p, len);
457                         if (!len)
458                                 break;
459                         if (copy_to_user(buf, kbuf, len)) {
460                                 free_page((unsigned long)kbuf);
461                                 return -EFAULT;
462                         }
463                         count -= len;
464                         buf += len;
465                         read += len;
466                         p += len;
467                 }
468                 free_page((unsigned long)kbuf);
469         }
470         *ppos = p;
471         return read;
472 }
473
474
475 static inline ssize_t
476 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
477               size_t count, loff_t *ppos)
478 {
479         ssize_t written, sz;
480         unsigned long copied;
481
482         written = 0;
483 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
484         /* we don't have page 0 mapped on sparc and m68k.. */
485         if (realp < PAGE_SIZE) {
486                 unsigned long sz = PAGE_SIZE - realp;
487                 if (sz > count)
488                         sz = count;
489                 /* Hmm. Do something? */
490                 buf += sz;
491                 p += sz;
492                 realp += sz;
493                 count -= sz;
494                 written += sz;
495         }
496 #endif
497
498         while (count > 0) {
499                 char *ptr;
500                 /*
501                  * Handle first page in case it's not aligned
502                  */
503                 if (-realp & (PAGE_SIZE - 1))
504                         sz = -realp & (PAGE_SIZE - 1);
505                 else
506                         sz = PAGE_SIZE;
507
508                 sz = min_t(unsigned long, sz, count);
509
510                 /*
511                  * On ia64 if a page has been mapped somewhere as
512                  * uncached, then it must also be accessed uncached
513                  * by the kernel or data corruption may occur
514                  */
515                 ptr = xlate_dev_kmem_ptr(p);
516
517                 copied = copy_from_user(ptr, buf, sz);
518                 if (copied) {
519                         written += sz - copied;
520                         if (written)
521                                 break;
522                         return -EFAULT;
523                 }
524                 buf += sz;
525                 p += sz;
526                 realp += sz;
527                 count -= sz;
528                 written += sz;
529         }
530
531         *ppos += written;
532         return written;
533 }
534
535
536 /*
537  * This function writes to the *virtual* memory as seen by the kernel.
538  */
539 static ssize_t write_kmem(struct file * file, const char __user * buf, 
540                           size_t count, loff_t *ppos)
541 {
542         unsigned long p = *ppos;
543         ssize_t wrote = 0;
544         ssize_t virtr = 0;
545         ssize_t written;
546         char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
547
548         if (p < (unsigned long) high_memory) {
549
550                 wrote = count;
551                 if (count > (unsigned long) high_memory - p)
552                         wrote = (unsigned long) high_memory - p;
553
554                 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
555                 if (written != wrote)
556                         return written;
557                 wrote = written;
558                 p += wrote;
559                 buf += wrote;
560                 count -= wrote;
561         }
562
563         if (count > 0) {
564                 kbuf = (char *)__get_free_page(GFP_KERNEL);
565                 if (!kbuf)
566                         return wrote ? wrote : -ENOMEM;
567                 while (count > 0) {
568                         int len = count;
569
570                         if (len > PAGE_SIZE)
571                                 len = PAGE_SIZE;
572                         if (len) {
573                                 written = copy_from_user(kbuf, buf, len);
574                                 if (written) {
575                                         if (wrote + virtr)
576                                                 break;
577                                         free_page((unsigned long)kbuf);
578                                         return -EFAULT;
579                                 }
580                         }
581                         len = vwrite(kbuf, (char *)p, len);
582                         count -= len;
583                         buf += len;
584                         virtr += len;
585                         p += len;
586                 }
587                 free_page((unsigned long)kbuf);
588         }
589
590         *ppos = p;
591         return virtr + wrote;
592 }
593
594 #ifdef CONFIG_DEVPORT
595 static ssize_t read_port(struct file * file, char __user * buf,
596                          size_t count, loff_t *ppos)
597 {
598         unsigned long i = *ppos;
599         char __user *tmp = buf;
600
601         if (!access_ok(VERIFY_WRITE, buf, count))
602                 return -EFAULT; 
603         while (count-- > 0 && i < 65536) {
604                 if (__put_user(inb(i),tmp) < 0) 
605                         return -EFAULT;  
606                 i++;
607                 tmp++;
608         }
609         *ppos = i;
610         return tmp-buf;
611 }
612
613 static ssize_t write_port(struct file * file, const char __user * buf,
614                           size_t count, loff_t *ppos)
615 {
616         unsigned long i = *ppos;
617         const char __user * tmp = buf;
618
619         if (!access_ok(VERIFY_READ,buf,count))
620                 return -EFAULT;
621         while (count-- > 0 && i < 65536) {
622                 char c;
623                 if (__get_user(c, tmp)) {
624                         if (tmp > buf)
625                                 break;
626                         return -EFAULT; 
627                 }
628                 outb(c,i);
629                 i++;
630                 tmp++;
631         }
632         *ppos = i;
633         return tmp-buf;
634 }
635 #endif
636
637 static ssize_t read_null(struct file * file, char __user * buf,
638                          size_t count, loff_t *ppos)
639 {
640         return 0;
641 }
642
643 static ssize_t write_null(struct file * file, const char __user * buf,
644                           size_t count, loff_t *ppos)
645 {
646         return count;
647 }
648
649 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
650                         struct splice_desc *sd)
651 {
652         return sd->len;
653 }
654
655 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
656                                  loff_t *ppos, size_t len, unsigned int flags)
657 {
658         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
659 }
660
661 static ssize_t read_zero(struct file * file, char __user * buf, 
662                          size_t count, loff_t *ppos)
663 {
664         size_t written;
665
666         if (!count)
667                 return 0;
668
669         if (!access_ok(VERIFY_WRITE, buf, count))
670                 return -EFAULT;
671
672         written = 0;
673         while (count) {
674                 unsigned long unwritten;
675                 size_t chunk = count;
676
677                 if (chunk > PAGE_SIZE)
678                         chunk = PAGE_SIZE;      /* Just for latency reasons */
679                 unwritten = clear_user(buf, chunk);
680                 written += chunk - unwritten;
681                 if (unwritten)
682                         break;
683                 buf += chunk;
684                 count -= chunk;
685                 cond_resched();
686         }
687         return written ? written : -EFAULT;
688 }
689
690 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
691 {
692 #ifndef CONFIG_MMU
693         return -ENOSYS;
694 #endif
695         if (vma->vm_flags & VM_SHARED)
696                 return shmem_zero_setup(vma);
697         return 0;
698 }
699
700 static ssize_t write_full(struct file * file, const char __user * buf,
701                           size_t count, loff_t *ppos)
702 {
703         return -ENOSPC;
704 }
705
706 /*
707  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
708  * can fopen() both devices with "a" now.  This was previously impossible.
709  * -- SRB.
710  */
711
712 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
713 {
714         return file->f_pos = 0;
715 }
716
717 /*
718  * The memory devices use the full 32/64 bits of the offset, and so we cannot
719  * check against negative addresses: they are ok. The return value is weird,
720  * though, in that case (0).
721  *
722  * also note that seeking relative to the "end of file" isn't supported:
723  * it has no meaning, so it returns -EINVAL.
724  */
725 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
726 {
727         loff_t ret;
728
729         mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
730         switch (orig) {
731                 case 0:
732                         file->f_pos = offset;
733                         ret = file->f_pos;
734                         force_successful_syscall_return();
735                         break;
736                 case 1:
737                         file->f_pos += offset;
738                         ret = file->f_pos;
739                         force_successful_syscall_return();
740                         break;
741                 default:
742                         ret = -EINVAL;
743         }
744         mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
745         return ret;
746 }
747
748 static int open_port(struct inode * inode, struct file * filp)
749 {
750         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
751 }
752
753 #define zero_lseek      null_lseek
754 #define full_lseek      null_lseek
755 #define write_zero      write_null
756 #define read_full       read_zero
757 #define open_mem        open_port
758 #define open_kmem       open_mem
759 #define open_oldmem     open_mem
760
761 static const struct file_operations mem_fops = {
762         .llseek         = memory_lseek,
763         .read           = read_mem,
764         .write          = write_mem,
765         .mmap           = mmap_mem,
766         .open           = open_mem,
767         .get_unmapped_area = get_unmapped_area_mem,
768 };
769
770 static const struct file_operations kmem_fops = {
771         .llseek         = memory_lseek,
772         .read           = read_kmem,
773         .write          = write_kmem,
774         .mmap           = mmap_kmem,
775         .open           = open_kmem,
776         .get_unmapped_area = get_unmapped_area_mem,
777 };
778
779 static const struct file_operations null_fops = {
780         .llseek         = null_lseek,
781         .read           = read_null,
782         .write          = write_null,
783         .splice_write   = splice_write_null,
784 };
785
786 #ifdef CONFIG_DEVPORT
787 static const struct file_operations port_fops = {
788         .llseek         = memory_lseek,
789         .read           = read_port,
790         .write          = write_port,
791         .open           = open_port,
792 };
793 #endif
794
795 static const struct file_operations zero_fops = {
796         .llseek         = zero_lseek,
797         .read           = read_zero,
798         .write          = write_zero,
799         .mmap           = mmap_zero,
800 };
801
802 /*
803  * capabilities for /dev/zero
804  * - permits private mappings, "copies" are taken of the source of zeros
805  */
806 static struct backing_dev_info zero_bdi = {
807         .capabilities   = BDI_CAP_MAP_COPY,
808 };
809
810 static const struct file_operations full_fops = {
811         .llseek         = full_lseek,
812         .read           = read_full,
813         .write          = write_full,
814 };
815
816 #ifdef CONFIG_CRASH_DUMP
817 static const struct file_operations oldmem_fops = {
818         .read   = read_oldmem,
819         .open   = open_oldmem,
820 };
821 #endif
822
823 static ssize_t kmsg_write(struct file * file, const char __user * buf,
824                           size_t count, loff_t *ppos)
825 {
826         char *tmp;
827         ssize_t ret;
828
829         tmp = kmalloc(count + 1, GFP_KERNEL);
830         if (tmp == NULL)
831                 return -ENOMEM;
832         ret = -EFAULT;
833         if (!copy_from_user(tmp, buf, count)) {
834                 tmp[count] = 0;
835                 ret = printk("%s", tmp);
836                 if (ret > count)
837                         /* printk can add a prefix */
838                         ret = count;
839         }
840         kfree(tmp);
841         return ret;
842 }
843
844 static const struct file_operations kmsg_fops = {
845         .write =        kmsg_write,
846 };
847
848 static int memory_open(struct inode * inode, struct file * filp)
849 {
850         switch (iminor(inode)) {
851                 case 1:
852                         filp->f_op = &mem_fops;
853                         filp->f_mapping->backing_dev_info =
854                                 &directly_mappable_cdev_bdi;
855                         break;
856                 case 2:
857                         filp->f_op = &kmem_fops;
858                         filp->f_mapping->backing_dev_info =
859                                 &directly_mappable_cdev_bdi;
860                         break;
861                 case 3:
862                         filp->f_op = &null_fops;
863                         break;
864 #ifdef CONFIG_DEVPORT
865                 case 4:
866                         filp->f_op = &port_fops;
867                         break;
868 #endif
869                 case 5:
870                         filp->f_mapping->backing_dev_info = &zero_bdi;
871                         filp->f_op = &zero_fops;
872                         break;
873                 case 7:
874                         filp->f_op = &full_fops;
875                         break;
876                 case 8:
877                         filp->f_op = &random_fops;
878                         break;
879                 case 9:
880                         filp->f_op = &urandom_fops;
881                         break;
882                 case 11:
883                         filp->f_op = &kmsg_fops;
884                         break;
885 #ifdef CONFIG_CRASH_DUMP
886                 case 12:
887                         filp->f_op = &oldmem_fops;
888                         break;
889 #endif
890                 default:
891                         return -ENXIO;
892         }
893         if (filp->f_op && filp->f_op->open)
894                 return filp->f_op->open(inode,filp);
895         return 0;
896 }
897
898 static const struct file_operations memory_fops = {
899         .open           = memory_open,  /* just a selector for the real open */
900 };
901
902 static const struct {
903         unsigned int            minor;
904         char                    *name;
905         umode_t                 mode;
906         const struct file_operations    *fops;
907 } devlist[] = { /* list of minor devices */
908         {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
909         {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
910         {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
911 #ifdef CONFIG_DEVPORT
912         {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
913 #endif
914         {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
915         {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
916         {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
917         {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
918         {11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
919 #ifdef CONFIG_CRASH_DUMP
920         {12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
921 #endif
922 };
923
924 static struct class *mem_class;
925
926 static int __init chr_dev_init(void)
927 {
928         int i;
929         int err;
930
931         err = bdi_init(&zero_bdi);
932         if (err)
933                 return err;
934
935         if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
936                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
937
938         mem_class = class_create(THIS_MODULE, "mem");
939         for (i = 0; i < ARRAY_SIZE(devlist); i++)
940                 device_create(mem_class, NULL,
941                               MKDEV(MEM_MAJOR, devlist[i].minor),
942                               devlist[i].name);
943
944         return 0;
945 }
946
947 fs_initcall(chr_dev_init);