file: access.c
1
/*
2
* access.c -- the files with access control on open
3
*
4
* Copyright (C) 2001 Alessandro Rubini and Jonathan Corbet
5
* Copyright (C) 2001 O'Reilly & Associates
6
*
7
* The source code in this file can be freely used, adapted,
8
* and redistributed in source or binary form, so long as an
9
* acknowledgment appears in derived source files. The citation
10
* should list that the code comes from the book "Linux Device
11
* Drivers" by Alessandro Rubini and Jonathan Corbet, published
12
* by O'Reilly & Associates. No warranty is attached;
13
* we cannot take responsibility for errors or fitness for use.
14
*
15
* $Id: access.c,v 1.17 2004/09/26 07:29:56 gregkh Exp $
16
*/
17
18
/* FIXME: cloned devices as a use for kobjects? */
19
20
#include <linux/kernel.h> /* printk() */
21
#include <linux/module.h>
22
#include <linux/slab.h> /* kzalloc() */
23
#include <linux/sched.h>
24
#include <linux/fs.h> /* everything... */
25
#include <linux/errno.h> /* error codes */
26
#include <linux/types.h> /* size_t */
27
#include <linux/fcntl.h>
28
#include <linux/cdev.h>
29
#include <linux/tty.h>
30
#include <asm/atomic.h>
31
#include <linux/list.h>
32
33
#include "scull.h" /* local definitions */
34
35
static dev_t /*X*/ scull_a_firstdev; /* Where our range begins */
36
37
/*
38
* These devices fall back on the main scull operations. They only
39
* differ in the implementation of open() and close()
40
*/
41
42
/************************************************************************
43
* The first device is the single-open one,
44
* it has an hw structure and an open count
45
*/
46
static struct scull_dev /*X*/ scull_s_device;
47
static atomic_t /*X*/ scull_s_available = ATOMIC_INIT(1);
48
49
static int /*X*/ scull_s_open(struct inode *inode, struct file *filp)
50
{
51
struct scull_dev *dev = &scull_s_device; /* device information */
52
53
if (! atomic_dec_and_test (&scull_s_available)) {
54
atomic_inc(&scull_s_available);
55
return -EBUSY; /* already open */
56
}
57
58
/* then, everything else is copied from the bare scull device */
59
if ( (filp->f_flags & O_ACCMODE) == O_WRONLY)
60
scull_trim(dev);
61
filp->private_data = dev;
62
return 0; /* success */
63
}
64
65
static int /*X*/ scull_s_release(struct inode *inode, struct file *filp)
66
{
67
atomic_inc(&scull_s_available); /* release the device */
68
return 0;
69
}
70
71
/*
72
* The other operations for the single-open device come from the bare device
73
*/
74
struct file_operations /*X*/ scull_sngl_fops = {
75
.owner = THIS_MODULE,
76
.llseek = scull_llseek,
77
.read = scull_read,
78
.write = scull_write,
79
.unlocked_ioctl = scull_ioctl,
80
.open = scull_s_open,
81
.release = scull_s_release,
82
};
83
84
/************************************************************************
85
* Next, the "uid" device. It can be opened multiple times by the
86
* same user, but access is denied to other users if the device is open
87
*/
88
89
static struct scull_dev /*X*/ scull_u_device;
90
static int /*X*/ scull_u_count; /* initialized to 0 by default */
91
static kuid_t /*X*/ scull_u_owner; /* initialized to 0 by default */
92
static DEFINE_SPINLOCK(scull_u_lock);
93
94
static int /*X*/ scull_u_open(struct inode *inode, struct file *filp)
95
{
96
struct scull_dev *dev = &scull_u_device; /* device information */
97
98
spin_lock(&scull_u_lock);
99
if (scull_u_count &&
100
(!uid_eq(scull_u_owner , current_uid()) ) && /* allow user */
101
(!uid_eq(scull_u_owner , current_euid()) ) && /* allow whoever did su */
102
!capable(CAP_DAC_OVERRIDE)) { /* still allow root */
103
spin_unlock(&scull_u_lock);
104
return -EBUSY; /* -EPERM would confuse the user */
105
}
106
107
if (scull_u_count == 0)
108
scull_u_owner = current_uid(); /* grab it */
109
110
scull_u_count++;
111
spin_unlock(&scull_u_lock);
112
113
/* then, everything else is copied from the bare scull device */
114
115
if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
116
scull_trim(dev);
117
filp->private_data = dev;
118
return 0; /* success */
119
}
120
121
static int /*X*/ scull_u_release(struct inode *inode, struct file *filp)
122
{
123
spin_lock(&scull_u_lock);
124
scull_u_count--; /* nothing else */
125
spin_unlock(&scull_u_lock);
126
return 0;
127
}
128
129
/*
130
* The other operations for the device come from the bare device
131
*/
132
struct file_operations /*X*/ scull_user_fops = {
133
.owner = THIS_MODULE,
134
.llseek = scull_llseek,
135
.read = scull_read,
136
.write = scull_write,
137
.unlocked_ioctl = scull_ioctl,
138
.open = scull_u_open,
139
.release = scull_u_release,
140
};
141
142
/************************************************************************
143
* Next, the device with blocking-open based on uid
144
*/
145
static struct scull_dev /*X*/ scull_w_device;
146
static int /*X*/ scull_w_count; /* initialized to 0 by default */
147
static kuid_t /*X*/ scull_w_owner; /* initialized to 0 by default */
148
static DECLARE_WAIT_QUEUE_HEAD(scull_w_wait);
149
static DEFINE_SPINLOCK(scull_w_lock);
150
151
static inline int /*X*/ scull_w_available(void)
152
{
153
return scull_w_count == 0 ||
154
uid_eq(scull_w_owner , current_uid()) ||
155
uid_eq(scull_w_owner , current_euid()) ||
156
capable(CAP_DAC_OVERRIDE);
157
}
158
159
160
static int /*X*/ scull_w_open(struct inode *inode, struct file *filp)
161
{
162
struct scull_dev *dev = &scull_w_device; /* device information */
163
164
spin_lock(&scull_w_lock);
165
while (! scull_w_available()) {
166
spin_unlock(&scull_w_lock);
167
if (filp->f_flags & O_NONBLOCK) return -EAGAIN;
168
if (wait_event_interruptible (scull_w_wait, scull_w_available()))
169
return -ERESTARTSYS; /* tell the fs layer to handle it */
170
spin_lock(&scull_w_lock);
171
}
172
if (scull_w_count == 0)
173
scull_w_owner = current_uid(); /* grab it */
174
scull_w_count++;
175
spin_unlock(&scull_w_lock);
176
177
/* then, everything else is copied from the bare scull device */
178
if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
179
scull_trim(dev);
180
filp->private_data = dev;
181
return 0; /* success */
182
}
183
184
static int /*X*/ scull_w_release(struct inode *inode, struct file *filp)
185
{
186
int temp;
187
188
spin_lock(&scull_w_lock);
189
scull_w_count--;
190
temp = scull_w_count;
191
spin_unlock(&scull_w_lock);
192
193
if (temp == 0)
194
wake_up_interruptible_sync(&scull_w_wait); /* awake other uid's */
195
return 0;
196
}
197
198
199
/*
200
* The other operations for the device come from the bare device
201
*/
202
struct file_operations /*X*/ scull_wusr_fops = {
203
.owner = THIS_MODULE,
204
.llseek = scull_llseek,
205
.read = scull_read,
206
.write = scull_write,
207
.unlocked_ioctl = scull_ioctl,
208
.open = scull_w_open,
209
.release = scull_w_release,
210
};
211
212
/************************************************************************
213
*
214
* Finally the `cloned' private device. This is trickier because it
215
* involves list management, and dynamic allocation.
216
*/
217
218
/* The clone-specific data structure includes a key field */
219
220
struct /*X*/ scull_listitem {
221
struct scull_dev device;
222
dev_t key;
223
struct list_head list;
224
225
};
226
227
/* The list of devices, and a lock to protect it */
228
static LIST_HEAD(scull_c_list);
229
static DEFINE_SPINLOCK(scull_c_lock);
230
231
/* A placeholder scull_dev which really just holds the cdev stuff. */
232
static struct scull_dev /*X*/ scull_c_device;
233
234
/* Look for a device or create one if missing */
235
static struct scull_dev * /*X*/ scull_c_lookfor_device(dev_t key)
236
{
237
struct scull_listitem *lptr;
238
239
list_for_each_entry(lptr, &scull_c_list, list) {
240
if (lptr->key == key)
241
return &(lptr->device);
242
}
243
244
/* not found */
245
lptr = kzalloc(sizeof(struct scull_listitem), GFP_KERNEL);
246
if (!lptr)
247
return NULL;
248
249
/* initialize the device */
250
lptr->key = key;
251
scull_trim(&(lptr->device)); /* initialize it */
252
mutex_init(&(lptr->device.mutex));
253
254
/* place it in the list */
255
list_add(&lptr->list, &scull_c_list);
256
257
return &(lptr->device);
258
}
259
260
static int /*X*/ scull_c_open(struct inode *inode, struct file *filp)
261
{
262
struct scull_dev *dev;
263
dev_t key;
264
265
if (!current->signal->tty) {
266
PDEBUG("Process \"%s\" has no ctl tty\n", current->comm);
267
return -EINVAL;
268
}
269
key = tty_devnum(current->signal->tty);
270
271
/* look for a scullc device in the list */
272
spin_lock(&scull_c_lock);
273
dev = scull_c_lookfor_device(key);
274
spin_unlock(&scull_c_lock);
275
276
if (!dev)
277
return -ENOMEM;
278
279
/* then, everything else is copied from the bare scull device */
280
if ( (filp->f_flags & O_ACCMODE) == O_WRONLY)
281
scull_trim(dev);
282
filp->private_data = dev;
283
return 0; /* success */
284
}
285
286
static int /*X*/ scull_c_release(struct inode *inode, struct file *filp)
287
{
288
/*
289
* Nothing to do, because the device is persistent.
290
* A `real' cloned device should be freed on last close
291
*/
292
return 0;
293
}
294
295
/*
296
* The other operations for the device come from the bare device
297
*/
298
struct file_operations /*X*/ scull_priv_fops = {
299
.owner = THIS_MODULE,
300
.llseek = scull_llseek,
301
.read = scull_read,
302
.write = scull_write,
303
.unlocked_ioctl = scull_ioctl,
304
.open = scull_c_open,
305
.release = scull_c_release,
306
};
307
308
/************************************************************************
309
* And the init and cleanup functions come last
310
*/
311
static struct /*X*/ scull_adev_info {
312
char *name;
313
struct scull_dev *sculldev;
314
struct file_operations *fops;
315
} /*X*/ scull_access_devs[] = {
316
{ "scullsingle", &scull_s_device, &scull_sngl_fops },
317
{ "sculluid", &scull_u_device, &scull_user_fops },
318
{ "scullwuid", &scull_w_device, &scull_wusr_fops },
319
{ "sullpriv", &scull_c_device, &scull_priv_fops }
320
};
321
#define /*X*/ SCULL_N_ADEVS 4
322
323
/*
324
* Set up a single device.
325
*/
326
static void /*X*/ scull_access_setup (dev_t devno, struct scull_adev_info *devinfo)
327
{
328
struct scull_dev *dev = devinfo->sculldev;
329
int err;
330
331
/* Initialize the device structure */
332
dev->quantum = scull_quantum;
333
dev->qset = scull_qset;
334
mutex_init(&dev->mutex);
335
336
/* Do the cdev stuff. */
337
cdev_init(&dev->cdev, devinfo->fops);
338
kobject_set_name(&dev->cdev.kobj, devinfo->name);
339
dev->cdev.owner = THIS_MODULE;
340
err = cdev_add (&dev->cdev, devno, 1);
341
/* Fail gracefully if need be */
342
if (err) {
343
printk(KERN_NOTICE "Error %d adding %s\n", err, devinfo->name);
344
kobject_put(&dev->cdev.kobj);
345
} else
346
printk(KERN_NOTICE "%s registered at %x\n", devinfo->name, devno);
347
}
348
349
int /*X*/ scull_access_init(dev_t firstdev)
350
{
351
int i;
352
int result;
353
354
/* Get our number space */
355
result = register_chrdev_region (firstdev, SCULL_N_ADEVS, "sculla");
356
if (result < 0) {
357
printk(KERN_WARNING "sculla: device number registration failed\n");
358
return 0;
359
}
360
scull_a_firstdev = firstdev;
361
362
/* setup each device */
363
for (i = 0; i < SCULL_N_ADEVS; i++)
364
scull_access_setup (firstdev + i, scull_access_devs + i);
365
366
return SCULL_N_ADEVS;
367
}
368
369
/*
370
* This is called by cleanup_module or on failure.
371
* It is required to never fail, even if nothing was initialized first
372
*/
373
void /*X*/ scull_access_cleanup(void)
374
{
375
struct scull_listitem *lptr, *next;
376
int i;
377
378
/* Clean up the static devs */
379
for (i = 0; i < SCULL_N_ADEVS; i++) {
380
struct scull_dev *dev = scull_access_devs[i].sculldev;
381
cdev_del(&dev->cdev);
382
scull_trim(scull_access_devs[i].sculldev);
383
}
384
385
/* And all the cloned devices */
386
list_for_each_entry_safe(lptr, next, &scull_c_list, list) {
387
list_del(&lptr->list);
388
scull_trim(&(lptr->device));
389
kfree(lptr);
390
}
391
392
/* Free up our number space */
393
unregister_chrdev_region(scull_a_firstdev, SCULL_N_ADEVS);
394
return;
395
}
C to HTML Conversion by ctoohtml