Home

File Index

All Tags

Tags by File

Tags referrers

file: main.c


  1 /*
  2 * main.c -- the bare scull char module
  3 *
  4 * Copyright (C) 2001 Alessandro Rubini and Jonathan Corbet
  5 * Copyright (C) 2001 O'Reilly & Associates
  6 *
  7 * The source code in this file can be freely used, adapted,
  8 * and redistributed in source or binary form, so long as an
  9 * acknowledgment appears in derived source files.  The citation
 10 * should list that the code comes from the book "Linux Device
 11 * Drivers" by Alessandro Rubini and Jonathan Corbet, published
 12 * by O'Reilly & Associates.   No warranty is attached;
 13 * we cannot take responsibility for errors or fitness for use.
 14 *
 15 */

 16
 17 #include <linux/module.h>
 18 #include <linux/moduleparam.h>
 19 #include <linux/init.h>
 20
 21 #include <linux/kernel.h>        /* printk() */
 22 #include <linux/slab.h>                /* kzalloc() */
 23 #include <linux/fs.h>                /* everything... */
 24 #include <linux/errno.h>        /* error codes */
 25 #include <linux/types.h>        /* size_t */
 26 #include <linux/proc_fs.h>
 27 #include <linux/fcntl.h>        /* O_ACCMODE */
 28 #include <linux/seq_file.h>
 29 #include <linux/cdev.h>
 30
 31 #include <asm/uaccess.h>        /* copy_*_user */
 32
 33 #include "scull.h"                /* local definitions */
 34
 35 /*
 36 * Our parameters which can be set at load time.
 37 */

 38 int /*X*/ scull_major =   SCULL_MAJOR;
 39 int /*X*/ scull_minor =   0;
 40 int /*X*/ scull_nr_devs = SCULL_NR_DEVS;        /* number of bare scull devices */
 41 int /*X*/ scull_quantum = SCULL_QUANTUM;
 42 int /*X*/ scull_qset =    SCULL_QSET;
 43
 44 module_param(scull_major, int, S_IRUGO);
 45 module_param(scull_minor, int, S_IRUGO);
 46 module_param(scull_nr_devs, int, S_IRUGO);
 47 module_param(scull_quantum, int, S_IRUGO);
 48 module_param(scull_qset, int, S_IRUGO);
 49
 50 MODULE_AUTHOR("Alessandro Rubini, Jonathan Corbet");
 51 MODULE_LICENSE("Dual BSD/GPL");
 52
 53 struct scull_dev * /*X*/ scull_devices;        /* allocated in scull_init_module */
 54
 55 /*
 56 * Empty out the scull device; must be called with the device
 57 * mutex held.
 58 */

 59 int /*X*/ scull_trim(struct scull_dev *dev)
 60 {
 61         struct scull_qset *next, *dptr;
 62         int qset = dev->qset;   /* "dev" is not-null */
 63         int i;
 64
 65         for (dptr = dev->data; dptr; dptr = next) { /* all the list items */
 66                 if (dptr->data) {
 67                         for (i = 0; i < qset; i++)
 68                                 kfree(dptr->data[i]);
 69                         kfree(dptr->data);
 70                         dptr->data = NULL;
 71                 }
 72                 next = dptr->next;
 73                 kfree(dptr);
 74         }
 75         dev->size = 0;
 76         dev->quantum = scull_quantum;
 77         dev->qset = scull_qset;
 78         dev->data = NULL;
 79         return 0;
 80 }
 81 #ifdef SCULL_DEBUG
 82 /*
 83 * The proc filesystem: function to read and entry
 84 */

 85
 86 static int /*X*/ scull_read_mem_proc_show(struct seq_file *m, void *v)
 87 {
 88         int i, j;
 89         int limit = m->size - 80; /* Don't print more than this */
 90
 91         for (i = 0; i < scull_nr_devs && m->count <= limit; i++) {
 92                 struct scull_dev *d = &scull_devices[i];
 93                 struct scull_qset *qs = d->data;
 94                 if (mutex_lock_interruptible(&d->mutex))
 95                         return -ERESTARTSYS;
 96                 seq_printf(m,"\nDevice %i: qset %i, q %i, sz %li\n",
 97                                 i, d->qset, d->quantum, d->size);
 98                 for (; qs && m->count <= limit; qs = qs->next) { /* scan the list */
 99                         seq_printf(m, "  item at %p, qset at %p\n",
100                                         qs, qs->data);
101                         if (qs->data && !qs->next) /* dump only the last item */
102                                 for (j = 0; j < d->qset; j++) {
103                                         if (qs->data[j])
104                                                 seq_printf(m,
105                                                                 "    % 4i: %8p\n",
106                                                                 j, qs->data[j]);
107                                 }
108                 }
109                 mutex_unlock(&scull_devices[i].mutex);
110         }
111         return 0;
112 }
113
114 /*
115 * For now, the seq_file implementation will exist in parallel.  The
116 * older read_procmem function should maybe go away, though.
117 */

118
119 /*
120 * Here are our sequence iteration methods.  Our "position" is
121 * simply the device number.
122 */

123 static void * /*X*/ scull_seq_start(struct seq_file *s, loff_t *pos)
124 {
125         if (*pos >= scull_nr_devs)
126                 return NULL;   /* No more to read */
127         return scull_devices + *pos;
128 }
129
130 static void * /*X*/ scull_seq_next(struct seq_file *s, void *v, loff_t *pos)
131 {
132         (*pos)++;
133         if (*pos >= scull_nr_devs)
134                 return NULL;
135         return scull_devices + *pos;
136 }
137
138 static void /*X*/ scull_seq_stop(struct seq_file *s, void *v)
139 {
140         /* Actually, there's nothing to do here */
141 }
142
143 static int /*X*/ scull_seq_show(struct seq_file *s, void *v)
144 {
145         struct scull_dev *dev = (struct scull_dev *) v;
146         struct scull_qset *d;
147         int i;
148
149         if (mutex_lock_interruptible(&dev->mutex))
150                 return -ERESTARTSYS;
151         seq_printf(s, "\nDevice %i: qset %i, q %i, sz %li\n",
152                         (int) (dev - scull_devices), dev->qset,
153                         dev->quantum, dev->size);
154         for (d = dev->data; d; d = d->next) { /* scan the list */
155                 seq_printf(s, "  item at %p, qset at %p\n", d, d->data);
156                 if (d->data && !d->next) /* dump only the last item */
157                         for (i = 0; i < dev->qset; i++) {
158                                 if (d->data[i])
159                                         seq_printf(s, "    % 4i: %8p\n",
160                                                         i, d->data[i]);
161                         }
162         }
163         mutex_unlock(&dev->mutex);
164         return 0;
165 }
166         
167 /*
168 * Tie the sequence operators up.
169 */

170 static struct seq_operations /*X*/ scull_seq_ops = {
171         .start = scull_seq_start,
172         .next  = scull_seq_next,
173         .stop  = scull_seq_stop,
174         .show  = scull_seq_show
175 };
176
177 /*
178 * Now to implement the /proc file we need only make an open
179 * method which sets up the sequence operators.
180 */

181 static int /*X*/ scull_proc_open(struct inode *inode, struct file *file)
182 {
183         return seq_open(file, &scull_seq_ops);
184 }
185
186 /*
187 * Create a set of file operations for our proc file.
188 */

189 static struct file_operations /*X*/ scull_proc_ops = {
190         .owner   = THIS_MODULE,
191         .open    = scull_proc_open,
192         .read    = seq_read,
193         .llseek  = seq_lseek,
194         .release = seq_release
195 };
196         
197 #define /*X*/ DEFINE_PROC_SEQ_FILE(_name) \
198         static int _name##_proc_open(struct inode *inode, struct file *file)\
199         {\
200                 return single_open(file, _name##_proc_show, NULL);\
201         }\
202         \
203         static const struct file_operations _name##_proc_fops = {\
204                 .open                = _name##_proc_open,\
205                 .read                = seq_read,\
206                 .llseek                = seq_lseek,\
207                 .release        = single_release,\
208         };

209
210 DEFINE_PROC_SEQ_FILE(scull_read_mem)
211
212 /*
213 * Actually create (and remove) the /proc file(s).
214 */

215
216 static void /*X*/ scull_create_proc(void)
217 {
218         struct proc_dir_entry *entry;
219         proc_create("scullmem", 0 /* default mode */,
220                         NULL /* parent dir */, &scull_read_mem_proc_fops);
221         entry = proc_create("scullseq", 0, NULL, &scull_proc_ops);
222         if (!entry) {
223                 printk(KERN_WARNING "proc_create scullseq failed\n");
224    }
225 }
226
227 static void /*X*/ scull_remove_proc(void)
228 {
229         /* no problem if it was not registered */
230         remove_proc_entry("scullmem", NULL /* parent dir */);
231         remove_proc_entry("scullseq", NULL);
232 }
233
234 #endif /* SCULL_DEBUG */
235
236 /*
237 * Open and close
238 */

239
240 int /*X*/ scull_open(struct inode *inode, struct file *filp)
241 {
242         struct scull_dev *dev; /* device information */
243
244         dev = container_of(inode->i_cdev, struct scull_dev, cdev);
245         filp->private_data = dev; /* for other methods */
246
247         /* now trim to 0 the length of the device if open was write-only */
248         if ( (filp->f_flags & O_ACCMODE) == O_WRONLY) {
249                 if (mutex_lock_interruptible(&dev->mutex))
250                         return -ERESTARTSYS;
251                 scull_trim(dev); /* ignore errors */
252                 mutex_unlock(&dev->mutex);
253         }
254         return 0;          /* success */
255 }
256
257 int /*X*/ scull_release(struct inode *inode, struct file *filp)
258 {
259         return 0;
260 }
261
262 /*
263 * Follow the list
264 */

265 struct scull_qset * /*X*/ scull_follow(struct scull_dev *dev, int n)
266 {
267         struct scull_qset *qs = dev->data;
268
269        /* Allocate first qset explicitly if need be */
270         if (! qs) {
271                 qs = dev->data = kzalloc(sizeof(struct scull_qset), GFP_KERNEL);
272                 if (qs == NULL)
273                         return NULL;  /* Never mind */
274         }
275
276         /* Then follow the list */
277         while (n--) {
278                 if (!qs->next) {
279                         qs->next = kzalloc(sizeof(struct scull_qset), GFP_KERNEL);
280                         if (qs->next == NULL)
281                                 return NULL;  /* Never mind */
282                 }
283                 qs = qs->next;
284                 continue;
285         }
286         return qs;
287 }
288
289 /*
290 * Data management: read and write
291 */

292
293 ssize_t /*X*/ scull_read(struct file *filp, char __user *buf, size_t count,
294                loff_t *f_pos)
295 {
296         struct scull_dev *dev = filp->private_data;
297         struct scull_qset *dptr;        /* the first listitem */
298         int quantum = dev->quantum, qset = dev->qset;
299         int itemsize = quantum * qset; /* how many bytes in the listitem */
300         int item, s_pos, q_pos, rest;
301         ssize_t retval = 0;
302
303         if (mutex_lock_interruptible(&dev->mutex))
304                 return -ERESTARTSYS;
305         if (*f_pos >= dev->size)
306                 goto out;
307         if (*f_pos + count > dev->size)
308                 count = dev->size - *f_pos;
309
310         /* find listitem, qset index, and offset in the quantum */
311         item = (long)*f_pos / itemsize;
312         rest = (long)*f_pos % itemsize;
313         s_pos = rest / quantum; q_pos = rest % quantum;
314
315         /* follow the list up to the right position (defined elsewhere) */
316         dptr = scull_follow(dev, item);
317
318         if (dptr == NULL || !dptr->data || ! dptr->data[s_pos])
319                 goto out; /* don't fill holes */
320
321         /* read only up to the end of this quantum */
322         if (count > quantum - q_pos)
323                 count = quantum - q_pos;
324
325         if (copy_to_user(buf, dptr->data[s_pos] + q_pos, count)) {
326                 retval = -EFAULT;
327                 goto out;
328         }
329         *f_pos += count;
330         retval = count;
331
332  out:
333         mutex_unlock(&dev->mutex);
334         return retval;
335 }
336
337 ssize_t /*X*/ scull_write(struct file *filp, const char __user *buf, size_t count,
338                loff_t *f_pos)
339 {
340         struct scull_dev *dev = filp->private_data;
341         struct scull_qset *dptr;
342         int quantum = dev->quantum, qset = dev->qset;
343         int itemsize = quantum * qset;
344         int item, s_pos, q_pos, rest;
345         ssize_t retval = -ENOMEM; /* value used in "goto out" statements */
346
347         if (mutex_lock_interruptible(&dev->mutex))
348                 return -ERESTARTSYS;
349
350         /* find listitem, qset index and offset in the quantum */
351         item = (long)*f_pos / itemsize;
352         rest = (long)*f_pos % itemsize;
353         s_pos = rest / quantum;
354         q_pos = rest % quantum;
355
356         /* follow the list up to the right position */
357         dptr = scull_follow(dev, item);
358         if (dptr == NULL)
359                 goto out;
360         if (!dptr->data) {
361                 dptr->data = kzalloc(qset * sizeof(char *), GFP_KERNEL);
362                 if (!dptr->data)
363                         goto out;
364         }
365         if (!dptr->data[s_pos]) {
366                 /* zero memory since there is no guarantee that memory will be
367                  * initialzed before being read by userspace (e.g., user could
368                  * seek and read uninitialized memory) */

369                 dptr->data[s_pos] = kzalloc(quantum, GFP_KERNEL);
370                 if (!dptr->data[s_pos])
371                         goto out;
372         }
373         /* write only up to the end of this quantum */
374         if (count > quantum - q_pos)
375                 count = quantum - q_pos;
376
377         if (copy_from_user(dptr->data[s_pos]+q_pos, buf, count)) {
378                 retval = -EFAULT;
379                 goto out;
380         }
381         *f_pos += count;
382         retval = count;
383
384        /* update the size */
385         if (dev->size < *f_pos)
386                 dev->size = *f_pos;
387
388  out:
389         mutex_unlock(&dev->mutex);
390         return retval;
391 }
392
393 /*
394 * The ioctl() implementation
395 */

396 long /*X*/ scull_ioctl(struct file *filp,
397                 unsigned int cmd, unsigned long arg)
398 {
399         int err = 0, tmp;
400         int retval = 0;
401    
402         /*
403          * extract the type and number bitfields, and don't decode
404          * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
405          */

406         if (_IOC_TYPE(cmd) != SCULL_IOC_MAGIC) return -ENOTTY;
407         if (_IOC_NR(cmd) > SCULL_IOC_MAXNR) return -ENOTTY;
408
409         /*
410          * the direction is a bitmask, and VERIFY_WRITE catches R/W
411          * transfers. `Type' is user-oriented, while
412          * access_ok is kernel-oriented, so the concept of "read" and
413          * "write" is reversed
414          */

415         if (_IOC_DIR(cmd) & _IOC_READ)
416                 err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
417         else if (_IOC_DIR(cmd) & _IOC_WRITE)
418                 err =  !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
419         if (err) return -EFAULT;
420
421         switch(cmd) {
422
423           case SCULL_IOCRESET:
424                 scull_quantum = SCULL_QUANTUM;
425                 scull_qset = SCULL_QSET;
426                 break;
427        
428           case SCULL_IOCSQUANTUM: /* Set: arg points to the value */
429                 if (! capable (CAP_SYS_ADMIN))
430                         return -EPERM;
431                 retval = __get_user(scull_quantum, (int __user *)arg);
432                 break;
433
434           case SCULL_IOCTQUANTUM: /* Tell: arg is the value */
435                 if (! capable (CAP_SYS_ADMIN))
436                         return -EPERM;
437                 scull_quantum = arg;
438                 break;
439
440           case SCULL_IOCGQUANTUM: /* Get: arg is pointer to result */
441                 retval = __put_user(scull_quantum, (int __user *)arg);
442                 break;
443
444           case SCULL_IOCQQUANTUM: /* Query: return it (it's positive) */
445                 return scull_quantum;
446
447           case SCULL_IOCXQUANTUM: /* eXchange: use arg as pointer */
448                 if (! capable (CAP_SYS_ADMIN))
449                         return -EPERM;
450                 tmp = scull_quantum;
451                 retval = __get_user(scull_quantum, (int __user *)arg);
452                 if (retval == 0)
453                         retval = __put_user(tmp, (int __user *)arg);
454                 break;
455
456           case SCULL_IOCHQUANTUM: /* sHift: like Tell + Query */
457                 if (! capable (CAP_SYS_ADMIN))
458                         return -EPERM;
459                 tmp = scull_quantum;
460                 scull_quantum = arg;
461                 return tmp;
462        
463           case SCULL_IOCSQSET:
464                 if (! capable (CAP_SYS_ADMIN))
465                         return -EPERM;
466                 retval = __get_user(scull_qset, (int __user *)arg);
467                 break;
468
469           case SCULL_IOCTQSET:
470                 if (! capable (CAP_SYS_ADMIN))
471                         return -EPERM;
472                 scull_qset = arg;
473                 break;
474
475           case SCULL_IOCGQSET:
476                 retval = __put_user(scull_qset, (int __user *)arg);
477                 break;
478
479           case SCULL_IOCQQSET:
480                 return scull_qset;
481
482           case SCULL_IOCXQSET:
483                 if (! capable (CAP_SYS_ADMIN))
484                         return -EPERM;
485                 tmp = scull_qset;
486                 retval = __get_user(scull_qset, (int __user *)arg);
487                 if (retval == 0)
488                         retval = put_user(tmp, (int __user *)arg);
489                 break;
490
491           case SCULL_IOCHQSET:
492                 if (! capable (CAP_SYS_ADMIN))
493                         return -EPERM;
494                 tmp = scull_qset;
495                 scull_qset = arg;
496                 return tmp;
497
498        /*
499         * The following two change the buffer size for scullpipe.
500         * The scullpipe device uses this same ioctl method, just to
501         * write less code. Actually, it's the same driver, isn't it?
502         */

503
504           case SCULL_P_IOCTSIZE:
505                 scull_p_buffer = arg;
506                 break;
507
508           case SCULL_P_IOCQSIZE:
509                 return scull_p_buffer;
510
511
512           default:  /* redundant, as cmd was checked against MAXNR */
513                 return -ENOTTY;
514         }
515         return retval;
516
517 }
518
519 /*
520 * "extended" operations (only seek)
521 */

522 loff_t /*X*/ scull_llseek(struct file *filp, loff_t off, int whence)
523 {
524         struct scull_dev *dev = filp->private_data;
525         loff_t newpos;
526
527         switch(whence) {
528           case 0: /* SEEK_SET */
529                 newpos = off;
530                 break;
531
532           case 1: /* SEEK_CUR */
533                 newpos = filp->f_pos + off;
534                 break;
535
536           case 2: /* SEEK_END */
537                 newpos = dev->size + off;
538                 break;
539
540           default: /* can't happen */
541                 return -EINVAL;
542         }
543         if (newpos < 0) return -EINVAL;
544         filp->f_pos = newpos;
545         return newpos;
546 }
547
548 struct file_operations /*X*/ scull_fops = {
549         .owner =    THIS_MODULE,
550         .llseek =   scull_llseek,
551         .read =     scull_read,
552         .write =    scull_write,
553         .unlocked_ioctl =    scull_ioctl,
554         .open =     scull_open,
555         .release =  scull_release,
556 };
557
558 /*
559 * finally, the module stuff
560 */

561
562 /*
563 * The cleanup function is used to handle initialization failures as well.
564 * Thefore, it must be careful to work correctly even if some of the items
565 * have not been initialized
566 */

567 void /*X*/ scull_cleanup_module(void)
568 {
569         int i;
570         dev_t devno = MKDEV(scull_major, scull_minor);
571
572         /* Get rid of our char dev entries */
573         if (scull_devices) {
574                 for (i = 0; i < scull_nr_devs; i++) {
575                         scull_trim(scull_devices + i);
576                         cdev_del(&scull_devices[i].cdev);
577                 }
578                 kfree(scull_devices);
579         }
580
581 #ifdef SCULL_DEBUG
582         scull_remove_proc();
583 #endif
584
585         /* cleanup_module is never called if registering failed */
586         unregister_chrdev_region(devno, scull_nr_devs);
587
588         /* and call the cleanup functions for friend devices */
589         scull_p_cleanup();
590         scull_access_cleanup();
591 }
592
593 /*
594 * Setup the char_dev structure for this device.
595 */

596 static void /*X*/ scull_setup_cdev(struct scull_dev *dev, int index)
597 {
598         int err, devno = MKDEV(scull_major, scull_minor + index);
599    
600         cdev_init(&dev->cdev, &scull_fops);
601         dev->cdev.owner = THIS_MODULE;
602         dev->cdev.ops = &scull_fops;
603         err = cdev_add (&dev->cdev, devno, 1);
604         if (err)
605                 /* fail gracefully */
606                 printk(KERN_NOTICE "Error %d adding scull%d", err, index);
607 }
608
609 int /*X*/ scull_init_module(void)
610 {
611         int i;
612         int result;
613         dev_t dev;
614
615         /*
616          * Get a range of minor numbers to work with, asking for a dynamic
617          * major unless directed otherwise by a module parameter.
618          */

619         if (scull_major) {
620                 dev = MKDEV(scull_major, scull_minor);
621                 result = register_chrdev_region(dev, scull_nr_devs, "scull");
622         } else {
623                 result = alloc_chrdev_region(&dev, scull_minor, scull_nr_devs, "scull");
624                 scull_major = MAJOR(dev);
625         }
626         if (result < 0) {
627                 printk(KERN_WARNING "scull: can't get major %d\n", scull_major);
628                 return result;
629         }
630
631        /*
632          * allocate the devices -- we can't have them static, as the number
633          * can be specified at load time
634          */

635         scull_devices = kzalloc(scull_nr_devs * sizeof(struct scull_dev), GFP_KERNEL);
636         if (!scull_devices) {
637                 result = -ENOMEM;
638                 goto fail;  /* Make this more graceful */
639         }
640
641        /* Initialize each device. */
642         for (i = 0; i < scull_nr_devs; i++) {
643                 scull_devices[i].quantum = scull_quantum;
644                 scull_devices[i].qset = scull_qset;
645                 mutex_init(&scull_devices[i].mutex);
646                 scull_setup_cdev(&scull_devices[i], i);
647         }
648
649        /* At this point call the init function for any friend device */
650         dev = MKDEV(scull_major, scull_minor + scull_nr_devs);
651         dev += scull_p_init(dev);
652         dev += scull_access_init(dev);
653
654 #ifdef SCULL_DEBUG
655         scull_create_proc();
656 #endif
657         return 0; /* succeed */
658
659  fail:
660         scull_cleanup_module();
661         return result;
662 }
663
664 module_init( /*X*/ scull_init_module);
665 module_exit( /*X*/ scull_cleanup_module);


Home

File Index

All Tags

Tags by File

Tags referrers

C to HTML Conversion by ctoohtml