x86: PAT phys_mem_access_prot_allowed for dev/mem mmap
[linux-2.6.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support. 
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 /*
38  * Architectures vary in how they handle caching for addresses
39  * outside of main memory.
40  *
41  */
42 static inline int uncached_access(struct file *file, unsigned long addr)
43 {
44 #if defined(CONFIG_IA64)
45         /*
46          * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
47          */
48         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
49 #elif defined(CONFIG_MIPS)
50         {
51                 extern int __uncached_access(struct file *file,
52                                              unsigned long addr);
53
54                 return __uncached_access(file, addr);
55         }
56 #else
57         /*
58          * Accessing memory above the top the kernel knows about or through a file pointer
59          * that was marked O_SYNC will be done non-cached.
60          */
61         if (file->f_flags & O_SYNC)
62                 return 1;
63         return addr >= __pa(high_memory);
64 #endif
65 }
66
67 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
68 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
69 {
70         if (addr + count > __pa(high_memory))
71                 return 0;
72
73         return 1;
74 }
75
76 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
77 {
78         return 1;
79 }
80 #endif
81
82 #ifdef CONFIG_NONPROMISC_DEVMEM
83 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
84 {
85         u64 from = ((u64)pfn) << PAGE_SHIFT;
86         u64 to = from + size;
87         u64 cursor = from;
88
89         while (cursor < to) {
90                 if (!devmem_is_allowed(pfn)) {
91                         printk(KERN_INFO
92                 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
93                                 current->comm, from, to);
94                         return 0;
95                 }
96                 cursor += PAGE_SIZE;
97                 pfn++;
98         }
99         return 1;
100 }
101 #else
102 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
103 {
104         return 1;
105 }
106 #endif
107
108 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
109 {
110 }
111
112 /*
113  * This funcion reads the *physical* memory. The f_pos points directly to the 
114  * memory location. 
115  */
116 static ssize_t read_mem(struct file * file, char __user * buf,
117                         size_t count, loff_t *ppos)
118 {
119         unsigned long p = *ppos;
120         ssize_t read, sz;
121         char *ptr;
122
123         if (!valid_phys_addr_range(p, count))
124                 return -EFAULT;
125         read = 0;
126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127         /* we don't have page 0 mapped on sparc and m68k.. */
128         if (p < PAGE_SIZE) {
129                 sz = PAGE_SIZE - p;
130                 if (sz > count) 
131                         sz = count; 
132                 if (sz > 0) {
133                         if (clear_user(buf, sz))
134                                 return -EFAULT;
135                         buf += sz; 
136                         p += sz; 
137                         count -= sz; 
138                         read += sz; 
139                 }
140         }
141 #endif
142
143         while (count > 0) {
144                 /*
145                  * Handle first page in case it's not aligned
146                  */
147                 if (-p & (PAGE_SIZE - 1))
148                         sz = -p & (PAGE_SIZE - 1);
149                 else
150                         sz = PAGE_SIZE;
151
152                 sz = min_t(unsigned long, sz, count);
153
154                 if (!range_is_allowed(p >> PAGE_SHIFT, count))
155                         return -EPERM;
156
157                 /*
158                  * On ia64 if a page has been mapped somewhere as
159                  * uncached, then it must also be accessed uncached
160                  * by the kernel or data corruption may occur
161                  */
162                 ptr = xlate_dev_mem_ptr(p);
163                 if (!ptr)
164                         return -EFAULT;
165
166                 if (copy_to_user(buf, ptr, sz)) {
167                         unxlate_dev_mem_ptr(p, ptr);
168                         return -EFAULT;
169                 }
170
171                 unxlate_dev_mem_ptr(p, ptr);
172
173                 buf += sz;
174                 p += sz;
175                 count -= sz;
176                 read += sz;
177         }
178
179         *ppos += read;
180         return read;
181 }
182
183 static ssize_t write_mem(struct file * file, const char __user * buf, 
184                          size_t count, loff_t *ppos)
185 {
186         unsigned long p = *ppos;
187         ssize_t written, sz;
188         unsigned long copied;
189         void *ptr;
190
191         if (!valid_phys_addr_range(p, count))
192                 return -EFAULT;
193
194         written = 0;
195
196 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197         /* we don't have page 0 mapped on sparc and m68k.. */
198         if (p < PAGE_SIZE) {
199                 unsigned long sz = PAGE_SIZE - p;
200                 if (sz > count)
201                         sz = count;
202                 /* Hmm. Do something? */
203                 buf += sz;
204                 p += sz;
205                 count -= sz;
206                 written += sz;
207         }
208 #endif
209
210         while (count > 0) {
211                 /*
212                  * Handle first page in case it's not aligned
213                  */
214                 if (-p & (PAGE_SIZE - 1))
215                         sz = -p & (PAGE_SIZE - 1);
216                 else
217                         sz = PAGE_SIZE;
218
219                 sz = min_t(unsigned long, sz, count);
220
221                 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
222                         return -EPERM;
223
224                 /*
225                  * On ia64 if a page has been mapped somewhere as
226                  * uncached, then it must also be accessed uncached
227                  * by the kernel or data corruption may occur
228                  */
229                 ptr = xlate_dev_mem_ptr(p);
230                 if (!ptr) {
231                         if (written)
232                                 break;
233                         return -EFAULT;
234                 }
235
236                 copied = copy_from_user(ptr, buf, sz);
237                 if (copied) {
238                         written += sz - copied;
239                         unxlate_dev_mem_ptr(p, ptr);
240                         if (written)
241                                 break;
242                         return -EFAULT;
243                 }
244
245                 unxlate_dev_mem_ptr(p, ptr);
246
247                 buf += sz;
248                 p += sz;
249                 count -= sz;
250                 written += sz;
251         }
252
253         *ppos += written;
254         return written;
255 }
256
257 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
258         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
259 {
260         return 1;
261 }
262
263 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
264 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
265                                      unsigned long size, pgprot_t vma_prot)
266 {
267 #ifdef pgprot_noncached
268         unsigned long offset = pfn << PAGE_SHIFT;
269
270         if (uncached_access(file, offset))
271                 return pgprot_noncached(vma_prot);
272 #endif
273         return vma_prot;
274 }
275 #endif
276
277 #ifndef CONFIG_MMU
278 static unsigned long get_unmapped_area_mem(struct file *file,
279                                            unsigned long addr,
280                                            unsigned long len,
281                                            unsigned long pgoff,
282                                            unsigned long flags)
283 {
284         if (!valid_mmap_phys_addr_range(pgoff, len))
285                 return (unsigned long) -EINVAL;
286         return pgoff << PAGE_SHIFT;
287 }
288
289 /* can't do an in-place private mapping if there's no MMU */
290 static inline int private_mapping_ok(struct vm_area_struct *vma)
291 {
292         return vma->vm_flags & VM_MAYSHARE;
293 }
294 #else
295 #define get_unmapped_area_mem   NULL
296
297 static inline int private_mapping_ok(struct vm_area_struct *vma)
298 {
299         return 1;
300 }
301 #endif
302
303 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
304 {
305         size_t size = vma->vm_end - vma->vm_start;
306
307         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
308                 return -EINVAL;
309
310         if (!private_mapping_ok(vma))
311                 return -ENOSYS;
312
313         if (!range_is_allowed(vma->vm_pgoff, size))
314                 return -EPERM;
315
316         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
317                                                 &vma->vm_page_prot))
318                 return -EINVAL;
319
320         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
321                                                  size,
322                                                  vma->vm_page_prot);
323
324         /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
325         if (remap_pfn_range(vma,
326                             vma->vm_start,
327                             vma->vm_pgoff,
328                             size,
329                             vma->vm_page_prot))
330                 return -EAGAIN;
331         return 0;
332 }
333
334 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
335 {
336         unsigned long pfn;
337
338         /* Turn a kernel-virtual address into a physical page frame */
339         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
340
341         /*
342          * RED-PEN: on some architectures there is more mapped memory
343          * than available in mem_map which pfn_valid checks
344          * for. Perhaps should add a new macro here.
345          *
346          * RED-PEN: vmalloc is not supported right now.
347          */
348         if (!pfn_valid(pfn))
349                 return -EIO;
350
351         vma->vm_pgoff = pfn;
352         return mmap_mem(file, vma);
353 }
354
355 #ifdef CONFIG_CRASH_DUMP
356 /*
357  * Read memory corresponding to the old kernel.
358  */
359 static ssize_t read_oldmem(struct file *file, char __user *buf,
360                                 size_t count, loff_t *ppos)
361 {
362         unsigned long pfn, offset;
363         size_t read = 0, csize;
364         int rc = 0;
365
366         while (count) {
367                 pfn = *ppos / PAGE_SIZE;
368                 if (pfn > saved_max_pfn)
369                         return read;
370
371                 offset = (unsigned long)(*ppos % PAGE_SIZE);
372                 if (count > PAGE_SIZE - offset)
373                         csize = PAGE_SIZE - offset;
374                 else
375                         csize = count;
376
377                 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
378                 if (rc < 0)
379                         return rc;
380                 buf += csize;
381                 *ppos += csize;
382                 read += csize;
383                 count -= csize;
384         }
385         return read;
386 }
387 #endif
388
389 extern long vread(char *buf, char *addr, unsigned long count);
390 extern long vwrite(char *buf, char *addr, unsigned long count);
391
392 /*
393  * This function reads the *virtual* memory as seen by the kernel.
394  */
395 static ssize_t read_kmem(struct file *file, char __user *buf, 
396                          size_t count, loff_t *ppos)
397 {
398         unsigned long p = *ppos;
399         ssize_t low_count, read, sz;
400         char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
401
402         read = 0;
403         if (p < (unsigned long) high_memory) {
404                 low_count = count;
405                 if (count > (unsigned long) high_memory - p)
406                         low_count = (unsigned long) high_memory - p;
407
408 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
409                 /* we don't have page 0 mapped on sparc and m68k.. */
410                 if (p < PAGE_SIZE && low_count > 0) {
411                         size_t tmp = PAGE_SIZE - p;
412                         if (tmp > low_count) tmp = low_count;
413                         if (clear_user(buf, tmp))
414                                 return -EFAULT;
415                         buf += tmp;
416                         p += tmp;
417                         read += tmp;
418                         low_count -= tmp;
419                         count -= tmp;
420                 }
421 #endif
422                 while (low_count > 0) {
423                         /*
424                          * Handle first page in case it's not aligned
425                          */
426                         if (-p & (PAGE_SIZE - 1))
427                                 sz = -p & (PAGE_SIZE - 1);
428                         else
429                                 sz = PAGE_SIZE;
430
431                         sz = min_t(unsigned long, sz, low_count);
432
433                         /*
434                          * On ia64 if a page has been mapped somewhere as
435                          * uncached, then it must also be accessed uncached
436                          * by the kernel or data corruption may occur
437                          */
438                         kbuf = xlate_dev_kmem_ptr((char *)p);
439
440                         if (copy_to_user(buf, kbuf, sz))
441                                 return -EFAULT;
442                         buf += sz;
443                         p += sz;
444                         read += sz;
445                         low_count -= sz;
446                         count -= sz;
447                 }
448         }
449
450         if (count > 0) {
451                 kbuf = (char *)__get_free_page(GFP_KERNEL);
452                 if (!kbuf)
453                         return -ENOMEM;
454                 while (count > 0) {
455                         int len = count;
456
457                         if (len > PAGE_SIZE)
458                                 len = PAGE_SIZE;
459                         len = vread(kbuf, (char *)p, len);
460                         if (!len)
461                                 break;
462                         if (copy_to_user(buf, kbuf, len)) {
463                                 free_page((unsigned long)kbuf);
464                                 return -EFAULT;
465                         }
466                         count -= len;
467                         buf += len;
468                         read += len;
469                         p += len;
470                 }
471                 free_page((unsigned long)kbuf);
472         }
473         *ppos = p;
474         return read;
475 }
476
477
478 static inline ssize_t
479 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
480               size_t count, loff_t *ppos)
481 {
482         ssize_t written, sz;
483         unsigned long copied;
484
485         written = 0;
486 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
487         /* we don't have page 0 mapped on sparc and m68k.. */
488         if (realp < PAGE_SIZE) {
489                 unsigned long sz = PAGE_SIZE - realp;
490                 if (sz > count)
491                         sz = count;
492                 /* Hmm. Do something? */
493                 buf += sz;
494                 p += sz;
495                 realp += sz;
496                 count -= sz;
497                 written += sz;
498         }
499 #endif
500
501         while (count > 0) {
502                 char *ptr;
503                 /*
504                  * Handle first page in case it's not aligned
505                  */
506                 if (-realp & (PAGE_SIZE - 1))
507                         sz = -realp & (PAGE_SIZE - 1);
508                 else
509                         sz = PAGE_SIZE;
510
511                 sz = min_t(unsigned long, sz, count);
512
513                 /*
514                  * On ia64 if a page has been mapped somewhere as
515                  * uncached, then it must also be accessed uncached
516                  * by the kernel or data corruption may occur
517                  */
518                 ptr = xlate_dev_kmem_ptr(p);
519
520                 copied = copy_from_user(ptr, buf, sz);
521                 if (copied) {
522                         written += sz - copied;
523                         if (written)
524                                 break;
525                         return -EFAULT;
526                 }
527                 buf += sz;
528                 p += sz;
529                 realp += sz;
530                 count -= sz;
531                 written += sz;
532         }
533
534         *ppos += written;
535         return written;
536 }
537
538
539 /*
540  * This function writes to the *virtual* memory as seen by the kernel.
541  */
542 static ssize_t write_kmem(struct file * file, const char __user * buf, 
543                           size_t count, loff_t *ppos)
544 {
545         unsigned long p = *ppos;
546         ssize_t wrote = 0;
547         ssize_t virtr = 0;
548         ssize_t written;
549         char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
550
551         if (p < (unsigned long) high_memory) {
552
553                 wrote = count;
554                 if (count > (unsigned long) high_memory - p)
555                         wrote = (unsigned long) high_memory - p;
556
557                 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
558                 if (written != wrote)
559                         return written;
560                 wrote = written;
561                 p += wrote;
562                 buf += wrote;
563                 count -= wrote;
564         }
565
566         if (count > 0) {
567                 kbuf = (char *)__get_free_page(GFP_KERNEL);
568                 if (!kbuf)
569                         return wrote ? wrote : -ENOMEM;
570                 while (count > 0) {
571                         int len = count;
572
573                         if (len > PAGE_SIZE)
574                                 len = PAGE_SIZE;
575                         if (len) {
576                                 written = copy_from_user(kbuf, buf, len);
577                                 if (written) {
578                                         if (wrote + virtr)
579                                                 break;
580                                         free_page((unsigned long)kbuf);
581                                         return -EFAULT;
582                                 }
583                         }
584                         len = vwrite(kbuf, (char *)p, len);
585                         count -= len;
586                         buf += len;
587                         virtr += len;
588                         p += len;
589                 }
590                 free_page((unsigned long)kbuf);
591         }
592
593         *ppos = p;
594         return virtr + wrote;
595 }
596
597 #ifdef CONFIG_DEVPORT
598 static ssize_t read_port(struct file * file, char __user * buf,
599                          size_t count, loff_t *ppos)
600 {
601         unsigned long i = *ppos;
602         char __user *tmp = buf;
603
604         if (!access_ok(VERIFY_WRITE, buf, count))
605                 return -EFAULT; 
606         while (count-- > 0 && i < 65536) {
607                 if (__put_user(inb(i),tmp) < 0) 
608                         return -EFAULT;  
609                 i++;
610                 tmp++;
611         }
612         *ppos = i;
613         return tmp-buf;
614 }
615
616 static ssize_t write_port(struct file * file, const char __user * buf,
617                           size_t count, loff_t *ppos)
618 {
619         unsigned long i = *ppos;
620         const char __user * tmp = buf;
621
622         if (!access_ok(VERIFY_READ,buf,count))
623                 return -EFAULT;
624         while (count-- > 0 && i < 65536) {
625                 char c;
626                 if (__get_user(c, tmp)) {
627                         if (tmp > buf)
628                                 break;
629                         return -EFAULT; 
630                 }
631                 outb(c,i);
632                 i++;
633                 tmp++;
634         }
635         *ppos = i;
636         return tmp-buf;
637 }
638 #endif
639
640 static ssize_t read_null(struct file * file, char __user * buf,
641                          size_t count, loff_t *ppos)
642 {
643         return 0;
644 }
645
646 static ssize_t write_null(struct file * file, const char __user * buf,
647                           size_t count, loff_t *ppos)
648 {
649         return count;
650 }
651
652 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
653                         struct splice_desc *sd)
654 {
655         return sd->len;
656 }
657
658 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
659                                  loff_t *ppos, size_t len, unsigned int flags)
660 {
661         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
662 }
663
664 static ssize_t read_zero(struct file * file, char __user * buf, 
665                          size_t count, loff_t *ppos)
666 {
667         size_t written;
668
669         if (!count)
670                 return 0;
671
672         if (!access_ok(VERIFY_WRITE, buf, count))
673                 return -EFAULT;
674
675         written = 0;
676         while (count) {
677                 unsigned long unwritten;
678                 size_t chunk = count;
679
680                 if (chunk > PAGE_SIZE)
681                         chunk = PAGE_SIZE;      /* Just for latency reasons */
682                 unwritten = clear_user(buf, chunk);
683                 written += chunk - unwritten;
684                 if (unwritten)
685                         break;
686                 buf += chunk;
687                 count -= chunk;
688                 cond_resched();
689         }
690         return written ? written : -EFAULT;
691 }
692
693 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
694 {
695 #ifndef CONFIG_MMU
696         return -ENOSYS;
697 #endif
698         if (vma->vm_flags & VM_SHARED)
699                 return shmem_zero_setup(vma);
700         return 0;
701 }
702
703 static ssize_t write_full(struct file * file, const char __user * buf,
704                           size_t count, loff_t *ppos)
705 {
706         return -ENOSPC;
707 }
708
709 /*
710  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
711  * can fopen() both devices with "a" now.  This was previously impossible.
712  * -- SRB.
713  */
714
715 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
716 {
717         return file->f_pos = 0;
718 }
719
720 /*
721  * The memory devices use the full 32/64 bits of the offset, and so we cannot
722  * check against negative addresses: they are ok. The return value is weird,
723  * though, in that case (0).
724  *
725  * also note that seeking relative to the "end of file" isn't supported:
726  * it has no meaning, so it returns -EINVAL.
727  */
728 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
729 {
730         loff_t ret;
731
732         mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
733         switch (orig) {
734                 case 0:
735                         file->f_pos = offset;
736                         ret = file->f_pos;
737                         force_successful_syscall_return();
738                         break;
739                 case 1:
740                         file->f_pos += offset;
741                         ret = file->f_pos;
742                         force_successful_syscall_return();
743                         break;
744                 default:
745                         ret = -EINVAL;
746         }
747         mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
748         return ret;
749 }
750
751 static int open_port(struct inode * inode, struct file * filp)
752 {
753         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
754 }
755
756 #define zero_lseek      null_lseek
757 #define full_lseek      null_lseek
758 #define write_zero      write_null
759 #define read_full       read_zero
760 #define open_mem        open_port
761 #define open_kmem       open_mem
762 #define open_oldmem     open_mem
763
764 static const struct file_operations mem_fops = {
765         .llseek         = memory_lseek,
766         .read           = read_mem,
767         .write          = write_mem,
768         .mmap           = mmap_mem,
769         .open           = open_mem,
770         .get_unmapped_area = get_unmapped_area_mem,
771 };
772
773 static const struct file_operations kmem_fops = {
774         .llseek         = memory_lseek,
775         .read           = read_kmem,
776         .write          = write_kmem,
777         .mmap           = mmap_kmem,
778         .open           = open_kmem,
779         .get_unmapped_area = get_unmapped_area_mem,
780 };
781
782 static const struct file_operations null_fops = {
783         .llseek         = null_lseek,
784         .read           = read_null,
785         .write          = write_null,
786         .splice_write   = splice_write_null,
787 };
788
789 #ifdef CONFIG_DEVPORT
790 static const struct file_operations port_fops = {
791         .llseek         = memory_lseek,
792         .read           = read_port,
793         .write          = write_port,
794         .open           = open_port,
795 };
796 #endif
797
798 static const struct file_operations zero_fops = {
799         .llseek         = zero_lseek,
800         .read           = read_zero,
801         .write          = write_zero,
802         .mmap           = mmap_zero,
803 };
804
805 /*
806  * capabilities for /dev/zero
807  * - permits private mappings, "copies" are taken of the source of zeros
808  */
809 static struct backing_dev_info zero_bdi = {
810         .capabilities   = BDI_CAP_MAP_COPY,
811 };
812
813 static const struct file_operations full_fops = {
814         .llseek         = full_lseek,
815         .read           = read_full,
816         .write          = write_full,
817 };
818
819 #ifdef CONFIG_CRASH_DUMP
820 static const struct file_operations oldmem_fops = {
821         .read   = read_oldmem,
822         .open   = open_oldmem,
823 };
824 #endif
825
826 static ssize_t kmsg_write(struct file * file, const char __user * buf,
827                           size_t count, loff_t *ppos)
828 {
829         char *tmp;
830         ssize_t ret;
831
832         tmp = kmalloc(count + 1, GFP_KERNEL);
833         if (tmp == NULL)
834                 return -ENOMEM;
835         ret = -EFAULT;
836         if (!copy_from_user(tmp, buf, count)) {
837                 tmp[count] = 0;
838                 ret = printk("%s", tmp);
839                 if (ret > count)
840                         /* printk can add a prefix */
841                         ret = count;
842         }
843         kfree(tmp);
844         return ret;
845 }
846
847 static const struct file_operations kmsg_fops = {
848         .write =        kmsg_write,
849 };
850
851 static int memory_open(struct inode * inode, struct file * filp)
852 {
853         switch (iminor(inode)) {
854                 case 1:
855                         filp->f_op = &mem_fops;
856                         filp->f_mapping->backing_dev_info =
857                                 &directly_mappable_cdev_bdi;
858                         break;
859                 case 2:
860                         filp->f_op = &kmem_fops;
861                         filp->f_mapping->backing_dev_info =
862                                 &directly_mappable_cdev_bdi;
863                         break;
864                 case 3:
865                         filp->f_op = &null_fops;
866                         break;
867 #ifdef CONFIG_DEVPORT
868                 case 4:
869                         filp->f_op = &port_fops;
870                         break;
871 #endif
872                 case 5:
873                         filp->f_mapping->backing_dev_info = &zero_bdi;
874                         filp->f_op = &zero_fops;
875                         break;
876                 case 7:
877                         filp->f_op = &full_fops;
878                         break;
879                 case 8:
880                         filp->f_op = &random_fops;
881                         break;
882                 case 9:
883                         filp->f_op = &urandom_fops;
884                         break;
885                 case 11:
886                         filp->f_op = &kmsg_fops;
887                         break;
888 #ifdef CONFIG_CRASH_DUMP
889                 case 12:
890                         filp->f_op = &oldmem_fops;
891                         break;
892 #endif
893                 default:
894                         return -ENXIO;
895         }
896         if (filp->f_op && filp->f_op->open)
897                 return filp->f_op->open(inode,filp);
898         return 0;
899 }
900
901 static const struct file_operations memory_fops = {
902         .open           = memory_open,  /* just a selector for the real open */
903 };
904
905 static const struct {
906         unsigned int            minor;
907         char                    *name;
908         umode_t                 mode;
909         const struct file_operations    *fops;
910 } devlist[] = { /* list of minor devices */
911         {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
912         {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
913         {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
914 #ifdef CONFIG_DEVPORT
915         {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
916 #endif
917         {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
918         {7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
919         {8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
920         {9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
921         {11,"kmsg",    S_IRUGO | S_IWUSR,           &kmsg_fops},
922 #ifdef CONFIG_CRASH_DUMP
923         {12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
924 #endif
925 };
926
927 static struct class *mem_class;
928
929 static int __init chr_dev_init(void)
930 {
931         int i;
932         int err;
933
934         err = bdi_init(&zero_bdi);
935         if (err)
936                 return err;
937
938         if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
939                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
940
941         mem_class = class_create(THIS_MODULE, "mem");
942         for (i = 0; i < ARRAY_SIZE(devlist); i++)
943                 device_create(mem_class, NULL,
944                               MKDEV(MEM_MAJOR, devlist[i].minor),
945                               devlist[i].name);
946
947         return 0;
948 }
949
950 fs_initcall(chr_dev_init);