source: trunk/src/samhain_kmem.c@ 285

Last change on this file since 285 was 279, checked in by katerina, 15 years ago

Fix for tickets #200 to #206 (kernel check, login checks, bugfixes).

File size: 15.1 KB
Line 
1/* Most of this code is ripped from the Linux kernel:
2 *
3 * linux/drivers/char/mem.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * Added devfs support.
8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
10 */
11
12#include "config.h"
13
14#undef _
15#define _(string) string
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/proc_fs.h>
20#include <linux/string.h>
21#include <linux/vmalloc.h>
22#include <linux/mm.h>
23#include <linux/miscdevice.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include <linux/mman.h>
27#include <linux/random.h>
28#include <linux/init.h>
29#include <linux/raw.h>
30#include <linux/tty.h>
31#include <linux/capability.h>
32#include <linux/ptrace.h>
33#include <linux/device.h>
34#include <linux/highmem.h>
35#include <linux/crash_dump.h>
36#include <linux/backing-dev.h>
37#include <linux/bootmem.h>
38#include <linux/splice.h>
39#include <linux/pfn.h>
40#include <linux/version.h>
41
42#include <asm/uaccess.h>
43#include <asm/io.h>
44#include <asm/pgtable.h>
45
46#ifdef CONFIG_IA64
47# include <linux/efi.h>
48#endif
49
50MODULE_LICENSE("GPL");
51MODULE_DESCRIPTION("samhain_kmem Kernel Module");
52MODULE_AUTHOR("Rainer Wichmann");
53
54static int debug = 0;
55#ifdef MODULE_PARM
56MODULE_PARM (debug, "i");
57#else
58module_param(debug, int, 0444);
59#endif
60
61#ifdef MODULE_PARM_DESC
62MODULE_PARM_DESC(debug, "Set to a non-zero value for debugging.");
63#endif
64
65/* struct task_struct
66 */
67#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
68#define TASK_EUID euid
69#else
70#define TASK_EUID cred->euid
71#endif
72
73static struct proc_dir_entry *proc_entry;
74
75/*
76 * Architectures vary in how they handle caching for addresses
77 * outside of main memory.
78 *
79 */
80static inline int uncached_access(struct file *file, unsigned long addr)
81{
82#if defined(__i386__) && !defined(__arch_um__)
83 /*
84 * On the PPro and successors, the MTRRs are used to set
85 * memory types for physical addresses outside main memory,
86 * so blindly setting PCD or PWT on those pages is wrong.
87 * For Pentiums and earlier, the surround logic should disable
88 * caching for the high addresses through the KEN pin, but
89 * we maintain the tradition of paranoia in this code.
90 */
91 if (file->f_flags & O_SYNC)
92 return 1;
93 return !( test_bit(X86_FEATURE_MTRR, (const void *) boot_cpu_data.x86_capability) ||
94 test_bit(X86_FEATURE_K6_MTRR, (const void *) boot_cpu_data.x86_capability) ||
95 test_bit(X86_FEATURE_CYRIX_ARR, (const void *) boot_cpu_data.x86_capability) ||
96 test_bit(X86_FEATURE_CENTAUR_MCR, (const void *) boot_cpu_data.x86_capability) )
97 && addr >= __pa(high_memory);
98#elif defined(__x86_64__) && !defined(__arch_um__)
99 /*
100 * This is broken because it can generate memory type aliases,
101 * which can cause cache corruptions
102 * But it is only available for root and we have to be bug-to-bug
103 * compatible with i386.
104 */
105 if (file->f_flags & O_SYNC)
106 return 1;
107 /* same behaviour as i386. PAT always set to cached and MTRRs control the
108 caching behaviour.
109 Hopefully a full PAT implementation will fix that soon. */
110 return 0;
111#elif defined(CONFIG_IA64)
112 /*
113 * On ia64, we ignore O_SYNC because we cannot tolerate
114 * memory attribute aliases.
115 */
116 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
117#elif defined(CONFIG_MIPS)
118 {
119 extern int __uncached_access(struct file *file,
120 unsigned long addr);
121
122 return __uncached_access(file, addr);
123 }
124#else
125 /*
126 * Accessing memory above the top the kernel knows about
127 * or through a file pointer
128 * that was marked O_SYNC will be done non-cached.
129 */
130 if (file->f_flags & O_SYNC)
131 return 1;
132 return addr >= __pa(high_memory);
133#endif
134}
135
136#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
137static inline int valid_phys_addr_range(unsigned long addr, size_t count)
138{
139 if (addr + count > __pa(high_memory))
140 return 0;
141
142 return 1;
143}
144
145static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
146{
147 return 1;
148}
149#endif
150
151
152/* #ifndef __HAVE_PHYS_MEM_ACCESS_PROT */
153static pgprot_t my_phys_mem_access_prot(struct file *file, unsigned long pfn,
154 unsigned long size, pgprot_t vma_prot)
155{
156#ifdef pgprot_noncached
157 unsigned long offset = pfn << PAGE_SHIFT;
158
159 if (uncached_access(file, offset))
160 return pgprot_noncached(vma_prot);
161#else
162#error pgtable
163#endif
164 return vma_prot;
165}
166/* #endif */
167
168
169#ifndef CONFIG_MMU
170static unsigned long get_unmapped_area_mem(struct file *file,
171 unsigned long addr,
172 unsigned long len,
173 unsigned long pgoff,
174 unsigned long flags)
175{
176 if (!valid_mmap_phys_addr_range(pgoff, len))
177 return (unsigned long) -EINVAL;
178 return pgoff << PAGE_SHIFT;
179}
180
181/* can't do an in-place private mapping if there's no MMU */
182static inline int private_mapping_ok(struct vm_area_struct *vma)
183{
184 return vma->vm_flags & VM_MAYSHARE;
185}
186#else
187#define get_unmapped_area_mem NULL
188
189static inline int private_mapping_ok(struct vm_area_struct *vma)
190{
191 return 1;
192}
193#endif
194
195static int mmap_mem(struct file * file, struct vm_area_struct * vma)
196{
197 size_t size = vma->vm_end - vma->vm_start;
198
199 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
200 return -EINVAL;
201
202 if (!private_mapping_ok(vma))
203 return -ENOSYS;
204
205 vma->vm_page_prot = my_phys_mem_access_prot(file, vma->vm_pgoff,
206 size,
207 vma->vm_page_prot);
208
209 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
210 if (remap_pfn_range(vma,
211 vma->vm_start,
212 vma->vm_pgoff,
213 size,
214 vma->vm_page_prot))
215 return -EAGAIN;
216 return 0;
217}
218
219static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
220{
221 unsigned long pfn;
222
223 /* Turn a kernel-virtual address into a physical page frame */
224 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
225
226 /*
227 * RED-PEN: on some architectures there is more mapped memory
228 * than available in mem_map which pfn_valid checks
229 * for. Perhaps should add a new macro here.
230 *
231 * RED-PEN: vmalloc is not supported right now.
232 */
233 if (!pfn_valid(pfn))
234 return -EIO;
235
236 vma->vm_pgoff = pfn;
237 return mmap_mem(file, vma);
238}
239
240static int my_permission(struct inode *inode, int op)
241{
242 /*
243 * only root (uid 0) may read from it
244 */
245 if (debug)
246 {
247 printk(KERN_INFO "samhain_kmem: permission op = %d, current->euid = %d\n",
248 op, (int)current->TASK_EUID );
249 }
250
251 if ((op & 4) != 0 && (op & 2) == 0 && current->TASK_EUID == 0)
252 {
253 if (debug)
254 {
255 printk(KERN_INFO "samhain_kmem: access granted\n" );
256 }
257 return 0;
258 }
259
260 /*
261 * If it's anything else, access is denied
262 */
263 if ((op & 2) != 0)
264 {
265 printk(KERN_INFO "/proc/kmem: access denied, "
266 "permission op = %d, current->euid = %d\n",
267 op, (int)current->TASK_EUID );
268 }
269 else if (debug)
270 {
271 printk(KERN_INFO "samhain_kmem: access denied\n" );
272 }
273 return -EACCES;
274}
275
276static struct inode_operations Inode_Ops_Kmem = {
277 .permission = my_permission, /* check for permissions */
278};
279
280static int open_kmem(struct inode * inode, struct file * filp)
281{
282 int ret = capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
283
284 if (debug)
285 {
286 printk(KERN_INFO "samhain_kmem: open_kmem retval = %d\n", ret);
287 }
288
289 if (ret == 0)
290 try_module_get(THIS_MODULE);
291
292 if (debug)
293 {
294 printk(KERN_INFO "samhain_kmem: open_kmem return\n");
295 }
296
297 return ret;
298}
299
300static int close_kmem(struct inode *inode, struct file *file)
301{
302 if (debug)
303 {
304 printk(KERN_INFO "samhain_kmem: close_kmem enter\n");
305 }
306
307 module_put(THIS_MODULE);
308
309 if (debug)
310 {
311 printk(KERN_INFO "samhain_kmem: close_kmem return\n");
312 }
313
314 return 0; /* success */
315}
316
317/*********************************************************************
318 *
319 * >>> Required info from System.map: vmlist_lock, vmlist <<<
320 */
321static rwlock_t * sh_vmlist_lock_ptr = (rwlock_t *) SH_VMLIST_LOCK;
322
323static struct vm_struct * sh_vmlist = (struct vm_struct *) SH_VMLIST_LOCK;
324/*
325 *
326 *********************************************************************/
327
328static long my_vread(char *buf, char *addr, unsigned long count)
329{
330 struct vm_struct *tmp;
331 char *vaddr, *buf_start = buf;
332 unsigned long n;
333
334 /* Don't allow overflow */
335 if ((unsigned long) addr + count < count)
336 count = -(unsigned long) addr;
337
338 read_lock(sh_vmlist_lock_ptr);
339 for (tmp = sh_vmlist; tmp; tmp = tmp->next) {
340 vaddr = (char *) tmp->addr;
341 if (addr >= vaddr + tmp->size - PAGE_SIZE)
342 continue;
343 while (addr < vaddr) {
344 if (count == 0)
345 goto finished;
346 *buf = '\0';
347 buf++;
348 addr++;
349 count--;
350 }
351 n = vaddr + tmp->size - PAGE_SIZE - addr;
352 do {
353 if (count == 0)
354 goto finished;
355 *buf = *addr;
356 buf++;
357 addr++;
358 count--;
359 } while (--n > 0);
360 }
361finished:
362 read_unlock(sh_vmlist_lock_ptr);
363 if (debug)
364 {
365 printk(KERN_INFO "samhain_kmem: start %lu\n", (unsigned long) buf_start);
366 printk(KERN_INFO "samhain_kmem: end %lu\n", (unsigned long) buf);
367 printk(KERN_INFO "samhain_kmem: size %lu\n", (unsigned long) (buf - buf_start));
368 }
369 return buf - buf_start;
370}
371
372static ssize_t read_kmem(struct file *file, char __user *buf,
373 size_t count, loff_t *ppos)
374{
375 unsigned long p = *ppos;
376 ssize_t low_count, read, sz;
377 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
378
379 if (debug) {
380 printk(KERN_INFO "samhain_kmem: read_kmem entry\n");
381 printk(KERN_INFO "samhain_kmem: p %lu\n", (unsigned long) p);
382 printk(KERN_INFO "samhain_kmem: high %lu\n", (unsigned long) high_memory);
383 }
384
385 read = 0;
386 if (p < (unsigned long) high_memory) {
387 low_count = count;
388
389 if (debug) {
390 printk(KERN_INFO "samhain_kmem: low_count(1) %ld\n", (long) low_count);
391 }
392
393 if (count > (unsigned long) high_memory - p)
394 low_count = (unsigned long) high_memory - p;
395
396 if (debug) {
397 printk(KERN_INFO "samhain_kmem: low_count(2) %ld\n", (long) low_count);
398 }
399
400#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
401 /* we don't have page 0 mapped on sparc and m68k.. */
402 if (p < PAGE_SIZE && low_count > 0) {
403 size_t tmp = PAGE_SIZE - p;
404 if (tmp > low_count) tmp = low_count;
405 if (clear_user(buf, tmp))
406 {
407 if (debug) {
408 printk(KERN_INFO "samhain_kmem: Bad address, line %d\n", __LINE__);
409 }
410 return -EFAULT;
411 }
412 buf += tmp;
413 p += tmp;
414 read += tmp;
415 low_count -= tmp;
416 count -= tmp;
417 }
418#endif
419
420 if (debug) {
421 printk(KERN_INFO "samhain_kmem: low_count(3) %ld\n", (long) low_count);
422 }
423
424 while (low_count > 0) {
425 /*
426 * Handle first page in case it's not aligned
427 */
428 if (-p & (PAGE_SIZE - 1))
429 sz = -p & (PAGE_SIZE - 1);
430 else
431 sz = PAGE_SIZE;
432
433 sz = min_t(unsigned long, sz, low_count);
434
435 /*
436 * On ia64 if a page has been mapped somewhere as
437 * uncached, then it must also be accessed uncached
438 * by the kernel or data corruption may occur
439 */
440 kbuf = xlate_dev_kmem_ptr((char *)p);
441
442 if (copy_to_user(buf, kbuf, sz))
443 {
444 if (debug) {
445 printk(KERN_INFO "samhain_kmem: Bad address, line %d\n", __LINE__);
446 printk(KERN_INFO "samhain_kmem: size %ld\n", (long) sz);
447 printk(KERN_INFO "samhain_kmem: kbuf %p\n", kbuf);
448 printk(KERN_INFO "samhain_kmem: buf %p\n", buf);
449 printk(KERN_INFO "samhain_kmem: high %lu\n", (unsigned long) high_memory);
450 }
451 return -EFAULT;
452 }
453 buf += sz;
454 p += sz;
455 read += sz;
456 low_count -= sz;
457 count -= sz;
458 if (debug) {
459 printk(KERN_INFO "samhain_kmem: low_count(4) %ld\n", (long) low_count);
460 }
461 }
462 }
463
464 if (debug) {
465 printk(KERN_INFO "samhain_kmem: read_kmem mid\n");
466 printk(KERN_INFO "samhain_kmem: count %lu\n", (unsigned long) count);
467 }
468
469 if (count > 0) {
470 kbuf = (char *)__get_free_page(GFP_KERNEL);
471 if (!kbuf)
472 {
473 if (debug) {
474 printk(KERN_INFO "samhain_kmem: out of memory\n");
475 }
476 return -ENOMEM;
477 }
478 while (count > 0) {
479 int len = count;
480
481 if (len > PAGE_SIZE)
482 len = PAGE_SIZE;
483 len = my_vread(kbuf, (char *)p, len);
484 if (!len)
485 break;
486 if (copy_to_user(buf, kbuf, len)) {
487 if (debug) {
488 printk(KERN_INFO "samhain_kmem: Bad address, line %d\n", __LINE__);
489 printk(KERN_INFO "samhain_kmem: size %ld\n", (long) len);
490 printk(KERN_INFO "samhain_kmem: kbuf %p\n", kbuf);
491 printk(KERN_INFO "samhain_kmem: buf %p\n", buf);
492 printk(KERN_INFO "samhain_kmem: high %lu\n", (unsigned long) high_memory);
493 }
494 free_page((unsigned long)kbuf);
495 return -EFAULT;
496 }
497 count -= len;
498 buf += len;
499 read += len;
500 p += len;
501 }
502 free_page((unsigned long)kbuf);
503 }
504 *ppos = p;
505 if (debug) {
506 printk(KERN_INFO "samhain_kmem: read_kmem end\n");
507 printk(KERN_INFO "samhain_kmem: read %ld\n", (long) read);
508 }
509 return read;
510}
511
512
513static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
514{
515 loff_t ret;
516
517 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
518 switch (orig) {
519 case 0:
520 file->f_pos = offset;
521 ret = file->f_pos;
522 force_successful_syscall_return();
523 break;
524 case 1:
525 file->f_pos += offset;
526 ret = file->f_pos;
527 force_successful_syscall_return();
528 break;
529 default:
530 if (debug) {
531 printk(KERN_INFO "samhain_kmem: invalid input %d\n", orig);
532 }
533 ret = -EINVAL;
534 }
535 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
536 return ret;
537}
538
539static const struct file_operations File_Ops_Kmem = {
540 .llseek = memory_lseek,
541 .read = read_kmem,
542 .mmap = mmap_kmem,
543 .open = open_kmem,
544 .release = close_kmem,
545 .get_unmapped_area = get_unmapped_area_mem,
546};
547
548
549/* Init function called on module entry
550 */
551static int my_module_init( void )
552{
553 int ret = 0;
554
555 proc_entry = create_proc_entry( "kmem", 0400, NULL );
556
557 if (proc_entry == NULL) {
558
559 ret = -ENOMEM;
560
561 printk(KERN_INFO "samhain_kmem: Couldn't create proc entry\n");
562
563 } else {
564
565/* 2.6.30 */
566#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
567 proc_entry->owner = THIS_MODULE;
568#endif
569 proc_entry->proc_iops = &Inode_Ops_Kmem;
570 proc_entry->proc_fops = &File_Ops_Kmem;
571
572 proc_entry->uid = 0;
573 proc_entry->gid = 0;
574 proc_entry->mode = S_IFREG | S_IRUSR;
575
576 if (debug) {
577 printk(KERN_INFO "samhain_kmem: module is now loaded.\n");
578 }
579 }
580
581 return ret;
582}
583
584/* Cleanup function called on module exit */
585
586static void my_module_cleanup( void )
587{
588 remove_proc_entry("kmem", NULL);
589
590 if (debug) {
591 printk(KERN_INFO "samhain_kmem: module is now unloaded.\n");
592 }
593 return;
594}
595
596
597
598/* Declare entry and exit functions */
599
600module_init( my_module_init );
601
602module_exit( my_module_cleanup );
Note: See TracBrowser for help on using the repository browser.