Home

File Index

All Tags

Tags by File

Tags referrers

file: pipe.c


  1 /*
  2 * pipe.c -- fifo driver for scull
  3 *
  4 * Copyright (C) 2001 Alessandro Rubini and Jonathan Corbet
  5 * Copyright (C) 2001 O'Reilly & Associates
  6 *
  7 * The source code in this file can be freely used, adapted,
  8 * and redistributed in source or binary form, so long as an
  9 * acknowledgment appears in derived source files.  The citation
 10 * should list that the code comes from the book "Linux Device
 11 * Drivers" by Alessandro Rubini and Jonathan Corbet, published
 12 * by O'Reilly & Associates.   No warranty is attached;
 13 * we cannot take responsibility for errors or fitness for use.
 14 *
 15 */

 16
 17 #include <linux/module.h>
 18 #include <linux/moduleparam.h>
 19
 20 #include <linux/kernel.h>        /* printk(), min() */
 21 #include <linux/slab.h>                /* kzalloc() */
 22 #include <linux/sched.h>
 23 #include <linux/fs.h>                /* everything... */
 24 #include <linux/proc_fs.h>
 25 #include <linux/errno.h>        /* error codes */
 26 #include <linux/types.h>        /* size_t */
 27 #include <linux/fcntl.h>
 28 #include <linux/poll.h>
 29 #include <linux/cdev.h>
 30 #include <linux/seq_file.h>
 31 #include <asm/uaccess.h>
 32
 33 #include "scull.h"                /* local definitions */
 34
 35 struct /*X*/ scull_pipe {
 36        wait_queue_head_t inq, outq;       /* read and write queues */
 37        char *buffer, *end;                /* begin of buf, end of buf */
 38        int buffersize;                    /* used in pointer arithmetic */
 39        char *rp, *wp;                     /* where to read, where to write */
 40        int nreaders, nwriters;            /* number of openings for r/w */
 41        struct fasync_struct *async_queue; /* asynchronous readers */
 42        struct mutex mutex;              /* mutual exclusion semaphore */
 43        struct cdev cdev;                  /* Char device structure */
 44 };
 45
 46 /* parameters */
 47 static int /*X*/ scull_p_nr_devs = SCULL_P_NR_DEVS;        /* number of pipe devices */
 48 int /*X*/ scull_p_buffer =  SCULL_P_BUFFER;        /* buffer size */
 49 dev_t /*X*/ scull_p_devno;                        /* Our first device number */
 50
 51 module_param(scull_p_nr_devs, int, 0);        /* FIXME check perms */
 52 module_param(scull_p_buffer, int, 0);
 53
 54 static struct scull_pipe * /*X*/ scull_p_devices;
 55
 56 static int scull_p_fasync(int fd, struct file *filp, int mode);
 57 static int spacefree(struct scull_pipe *dev);
 58
 59 /*
 60 * Open and close
 61 */

 62
 63 static int /*X*/ scull_p_open(struct inode *inode, struct file *filp)
 64 {
 65         struct scull_pipe *dev;
 66
 67         dev = container_of(inode->i_cdev, struct scull_pipe, cdev);
 68         filp->private_data = dev;
 69
 70         if (mutex_lock_interruptible(&dev->mutex))
 71                 return -ERESTARTSYS;
 72         if (!dev->buffer) {
 73                 /* allocate the buffer */
 74                 dev->buffer = kzalloc(scull_p_buffer, GFP_KERNEL);
 75                 if (!dev->buffer) {
 76                         mutex_unlock(&dev->mutex);
 77                         return -ENOMEM;
 78                 }
 79         }
 80         dev->buffersize = scull_p_buffer;
 81         dev->end = dev->buffer + dev->buffersize;
 82         dev->rp = dev->wp = dev->buffer; /* rd and wr from the beginning */
 83
 84         /* use f_mode,not  f_flags: it's cleaner (fs/open.c tells why) */
 85         if (filp->f_mode & FMODE_READ)
 86                 dev->nreaders++;
 87         if (filp->f_mode & FMODE_WRITE)
 88                 dev->nwriters++;
 89         mutex_unlock(&dev->mutex);
 90
 91         return nonseekable_open(inode, filp);
 92 }
 93
 94 static int /*X*/ scull_p_release(struct inode *inode, struct file *filp)
 95 {
 96         struct scull_pipe *dev = filp->private_data;
 97
 98         /* remove this filp from the asynchronously notified filp's */
 99         scull_p_fasync(-1, filp, 0);
100         mutex_lock(&dev->mutex);
101         if (filp->f_mode & FMODE_READ)
102                 dev->nreaders--;
103         if (filp->f_mode & FMODE_WRITE)
104                 dev->nwriters--;
105         if (dev->nreaders + dev->nwriters == 0) {
106                 kfree(dev->buffer);
107                 dev->buffer = NULL; /* the other fields are not checked on open */
108         }
109         mutex_unlock(&dev->mutex);
110         return 0;
111 }
112
113 /*
114 * Data management: read and write
115 */

116
117 static ssize_t /*X*/ scull_p_read (struct file *filp, char __user *buf, size_t count,
118                loff_t *f_pos)
119 {
120         struct scull_pipe *dev = filp->private_data;
121
122         if (mutex_lock_interruptible(&dev->mutex))
123                 return -ERESTARTSYS;
124
125         while (dev->rp == dev->wp) { /* nothing to read */
126                 mutex_unlock(&dev->mutex); /* release the lock */
127                 if (filp->f_flags & O_NONBLOCK)
128                         return -EAGAIN;
129                 PDEBUG("\"%s\" reading: going to sleep\n", current->comm);
130                 if (wait_event_interruptible(dev->inq, (dev->rp != dev->wp)))
131                         return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
132                 /* otherwise loop, but first reacquire the lock */
133                 if (mutex_lock_interruptible(&dev->mutex))
134                         return -ERESTARTSYS;
135         }
136         /* ok, data is there, return something */
137         if (dev->wp > dev->rp)
138                 count = min(count, (size_t)(dev->wp - dev->rp));
139         else /* the write pointer has wrapped, return data up to dev->end */
140                 count = min(count, (size_t)(dev->end - dev->rp));
141         if (copy_to_user(buf, dev->rp, count)) {
142                 mutex_unlock (&dev->mutex);
143                 return -EFAULT;
144         }
145         dev->rp += count;
146         if (dev->rp == dev->end)
147                 dev->rp = dev->buffer; /* wrapped */
148         mutex_unlock (&dev->mutex);
149
150         /* finally, awake any writers and return */
151         wake_up_interruptible(&dev->outq);
152         PDEBUG("\"%s\" did read %li bytes\n",current->comm, (long)count);
153         return count;
154 }
155
156 /* Wait for space for writing; caller must hold device semaphore.  On
157 * error the semaphore will be released before returning. */

158 static int /*X*/ scull_getwritespace(struct scull_pipe *dev, struct file *filp)
159 {
160         while (spacefree(dev) == 0) { /* full */
161                 DEFINE_WAIT(wait);
162                 
163                 mutex_unlock(&dev->mutex);
164                 if (filp->f_flags & O_NONBLOCK)
165                         return -EAGAIN;
166                 PDEBUG("\"%s\" writing: going to sleep\n",current->comm);
167                 prepare_to_wait(&dev->outq, &wait, TASK_INTERRUPTIBLE);
168                 if (spacefree(dev) == 0)
169                         schedule();
170                 finish_wait(&dev->outq, &wait);
171                 if (signal_pending(current))
172                         return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
173                 if (mutex_lock_interruptible(&dev->mutex))
174                         return -ERESTARTSYS;
175         }
176         return 0;
177 }        
178
179 /* How much space is free? */
180 static int /*X*/ spacefree(struct scull_pipe *dev)
181 {
182         if (dev->rp == dev->wp)
183                 return dev->buffersize - 1;
184         return ((dev->rp + dev->buffersize - dev->wp) % dev->buffersize) - 1;
185 }
186
187 static ssize_t /*X*/ scull_p_write(struct file *filp, const char __user *buf, size_t count,
188                loff_t *f_pos)
189 {
190         struct scull_pipe *dev = filp->private_data;
191         int result;
192
193         if (mutex_lock_interruptible(&dev->mutex))
194                 return -ERESTARTSYS;
195
196         /* Make sure there's space to write */
197         result = scull_getwritespace(dev, filp);
198         if (result)
199                 return result; /* scull_getwritespace called mutex_unlock(&dev->mutex) */
200
201         /* ok, space is there, accept something */
202         count = min(count, (size_t)spacefree(dev));
203         if (dev->wp >= dev->rp)
204                 count = min(count, (size_t)(dev->end - dev->wp)); /* to end-of-buf */
205         else /* the write pointer has wrapped, fill up to rp-1 */
206                 count = min(count, (size_t)(dev->rp - dev->wp - 1));
207         PDEBUG("Going to accept %li bytes to %p from %p\n", (long)count, dev->wp, buf);
208         if (copy_from_user(dev->wp, buf, count)) {
209                 mutex_unlock (&dev->mutex);
210                 return -EFAULT;
211         }
212         dev->wp += count;
213         if (dev->wp == dev->end)
214                 dev->wp = dev->buffer; /* wrapped */
215         mutex_unlock(&dev->mutex);
216
217         /* finally, awake any reader */
218         wake_up_interruptible(&dev->inq);  /* blocked in read() and select() */
219
220         /* and signal asynchronous readers, explained late in chapter 5 */
221         if (dev->async_queue)
222                 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
223         PDEBUG("\"%s\" did write %li bytes\n",current->comm, (long)count);
224         return count;
225 }
226
227 static unsigned int /*X*/ scull_p_poll(struct file *filp, poll_table *wait)
228 {
229         struct scull_pipe *dev = filp->private_data;
230         unsigned int mask = 0;
231
232         /*
233          * The buffer is circular; it is considered full
234          * if "wp" is right behind "rp" and empty if the
235          * two are equal.
236          */

237         mutex_lock(&dev->mutex);
238         poll_wait(filp, &dev->inq,  wait);
239         poll_wait(filp, &dev->outq, wait);
240         if (dev->rp != dev->wp)
241                 mask |= POLLIN | POLLRDNORM;        /* readable */
242         if (spacefree(dev))
243                 mask |= POLLOUT | POLLWRNORM;        /* writable */
244         mutex_unlock(&dev->mutex);
245         return mask;
246 }
247
248 static int /*X*/ scull_p_fasync(int fd, struct file *filp, int mode)
249 {
250         struct scull_pipe *dev = filp->private_data;
251
252         return fasync_helper(fd, filp, mode, &dev->async_queue);
253 }
254
255 #ifdef SCULL_DEBUG
256 static int /*X*/ scull_read_p_mem_proc_show(struct seq_file *m, void *v)
257 {
258         int i;
259         struct scull_pipe *p;
260
261 #define /*X*/ LIMIT (m->size-200)        /* don't print any more after this size */
262         seq_printf(m, "Default buffersize is %i\n", scull_p_buffer);
263         for(i = 0; i<scull_p_nr_devs && m->count <= LIMIT; i++) {
264                 p = &scull_p_devices[i];
265                 if (mutex_lock_interruptible(&p->mutex))
266                         return -ERESTARTSYS;
267                 seq_printf(m, "\nDevice %i: %p\n", i, p);
268 /*                seq_printf(m, "   Queues: %p %p\n", p->inq, p->outq);*/
269                 seq_printf(m, "   Buffer: %p to %p (%i bytes)\n", p->buffer, p->end, p->buffersize);
270                 seq_printf(m, "   rp %p   wp %p\n", p->rp, p->wp);
271                 seq_printf(m, "   readers %i   writers %i\n", p->nreaders, p->nwriters);
272                 mutex_unlock(&p->mutex);
273         }
274         return 0;
275 }
276
277 #define /*X*/ DEFINE_PROC_SEQ_FILE(_name) \
278         static int _name##_proc_open(struct inode *inode, struct file *file)\
279         {\
280                 return single_open(file, _name##_proc_show, NULL);\
281         }\
282         \
283         static const struct file_operations _name##_proc_fops = {\
284                 .open                = _name##_proc_open,\
285                 .read                = seq_read,\
286                 .llseek                = seq_lseek,\
287                 .release        = single_release,\
288         };

289
290 DEFINE_PROC_SEQ_FILE(scull_read_p_mem)
291
292 #endif
293
294
295 /*
296 * The file operations for the pipe device
297 * (some are overlayed with bare scull)
298 */

299 struct file_operations scull_pipe_fops = {
300         .owner =        THIS_MODULE,
301         .llseek =        no_llseek,
302         .read =                scull_p_read,
303         .write =        scull_p_write,
304         .poll =                scull_p_poll,
305         .unlocked_ioctl =        scull_ioctl,
306         .open =                scull_p_open,
307         .release =        scull_p_release,
308         .fasync =        scull_p_fasync,
309 };
310
311 /*
312 * Set up a cdev entry.
313 */

314 static void /*X*/ scull_p_setup_cdev(struct scull_pipe *dev, int index)
315 {
316         int err, devno = scull_p_devno + index;
317    
318         cdev_init(&dev->cdev, &scull_pipe_fops);
319         dev->cdev.owner = THIS_MODULE;
320         err = cdev_add (&dev->cdev, devno, 1);
321         if (err)
322                 /* fail gracefully */
323                 printk(KERN_NOTICE "Error %d adding scullpipe%d", err, index);
324 }
325
326 /*
327 * Initialize the pipe devs; return how many we did.
328 */

329 int /*X*/ scull_p_init(dev_t firstdev)
330 {
331         int i, result;
332
333         result = register_chrdev_region(firstdev, scull_p_nr_devs, "scullp");
334         if (result < 0) {
335                 printk(KERN_NOTICE "Unable to get scullp region, error %d\n", result);
336                 return 0;
337         }
338         scull_p_devno = firstdev;
339         scull_p_devices = kzalloc(scull_p_nr_devs * sizeof(struct scull_pipe), GFP_KERNEL);
340         if (scull_p_devices == NULL) {
341                 unregister_chrdev_region(firstdev, scull_p_nr_devs);
342                 return 0;
343         }
344         for (i = 0; i < scull_p_nr_devs; i++) {
345                 init_waitqueue_head(&(scull_p_devices[i].inq));
346                 init_waitqueue_head(&(scull_p_devices[i].outq));
347                 mutex_init(&scull_p_devices[i].mutex);
348                 scull_p_setup_cdev(scull_p_devices + i, i);
349         }
350 #ifdef SCULL_DEBUG
351         proc_create("scullpipe", 0, NULL, &scull_read_p_mem_proc_fops);
352 #endif
353         return scull_p_nr_devs;
354 }
355
356 /*
357 * This is called by cleanup_module or on failure.
358 * It is required to never fail, even if nothing was initialized first
359 */

360 void /*X*/ scull_p_cleanup(void)
361 {
362         int i;
363
364 #ifdef SCULL_DEBUG
365         remove_proc_entry("scullpipe", NULL);
366 #endif
367
368         if (!scull_p_devices)
369                 return; /* nothing else to release */
370
371         for (i = 0; i < scull_p_nr_devs; i++) {
372                 cdev_del(&scull_p_devices[i].cdev);
373                 kfree(scull_p_devices[i].buffer);
374         }
375         kfree(scull_p_devices);
376         unregister_chrdev_region(scull_p_devno, scull_p_nr_devs);
377         scull_p_devices = NULL; /* pedantic */
378 }


Home

File Index

All Tags

Tags by File

Tags referrers

C to HTML Conversion by ctoohtml