tracing/filters: fix NULL pointer dereference
[linux-2.6] / drivers / spi / spidev.c
1 /*
2  * spidev.c -- simple synchronous userspace interface to SPI devices
3  *
4  * Copyright (C) 2006 SWAPP
5  *      Andrea Paterniani <a.paterniani@swapp-eng.it>
6  * Copyright (C) 2007 David Brownell (simplification, cleanup)
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/ioctl.h>
26 #include <linux/fs.h>
27 #include <linux/device.h>
28 #include <linux/err.h>
29 #include <linux/list.h>
30 #include <linux/errno.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/smp_lock.h>
34
35 #include <linux/spi/spi.h>
36 #include <linux/spi/spidev.h>
37
38 #include <asm/uaccess.h>
39
40
41 /*
42  * This supports acccess to SPI devices using normal userspace I/O calls.
43  * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
44  * and often mask message boundaries, full SPI support requires full duplex
45  * transfers.  There are several kinds of of internal message boundaries to
46  * handle chipselect management and other protocol options.
47  *
48  * SPI has a character major number assigned.  We allocate minor numbers
49  * dynamically using a bitmask.  You must use hotplug tools, such as udev
50  * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
51  * nodes, since there is no fixed association of minor numbers with any
52  * particular SPI bus or device.
53  */
54 #define SPIDEV_MAJOR                    153     /* assigned */
55 #define N_SPI_MINORS                    32      /* ... up to 256 */
56
57 static unsigned long    minors[N_SPI_MINORS / BITS_PER_LONG];
58
59
60 /* Bit masks for spi_device.mode management.  Note that incorrect
61  * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other
62  * devices on a shared bus:  CS_HIGH, because this device will be
63  * active when it shouldn't be;  3WIRE, because when active it won't
64  * behave as it should.
65  *
66  * REVISIT should changing those two modes be privileged?
67  */
68 #define SPI_MODE_MASK           (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
69                                 | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP)
70
71 struct spidev_data {
72         dev_t                   devt;
73         spinlock_t              spi_lock;
74         struct spi_device       *spi;
75         struct list_head        device_entry;
76
77         /* buffer is NULL unless this device is open (users > 0) */
78         struct mutex            buf_lock;
79         unsigned                users;
80         u8                      *buffer;
81 };
82
83 static LIST_HEAD(device_list);
84 static DEFINE_MUTEX(device_list_lock);
85
86 static unsigned bufsiz = 4096;
87 module_param(bufsiz, uint, S_IRUGO);
88 MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
89
90 /*-------------------------------------------------------------------------*/
91
92 /*
93  * We can't use the standard synchronous wrappers for file I/O; we
94  * need to protect against async removal of the underlying spi_device.
95  */
96 static void spidev_complete(void *arg)
97 {
98         complete(arg);
99 }
100
101 static ssize_t
102 spidev_sync(struct spidev_data *spidev, struct spi_message *message)
103 {
104         DECLARE_COMPLETION_ONSTACK(done);
105         int status;
106
107         message->complete = spidev_complete;
108         message->context = &done;
109
110         spin_lock_irq(&spidev->spi_lock);
111         if (spidev->spi == NULL)
112                 status = -ESHUTDOWN;
113         else
114                 status = spi_async(spidev->spi, message);
115         spin_unlock_irq(&spidev->spi_lock);
116
117         if (status == 0) {
118                 wait_for_completion(&done);
119                 status = message->status;
120                 if (status == 0)
121                         status = message->actual_length;
122         }
123         return status;
124 }
125
126 static inline ssize_t
127 spidev_sync_write(struct spidev_data *spidev, size_t len)
128 {
129         struct spi_transfer     t = {
130                         .tx_buf         = spidev->buffer,
131                         .len            = len,
132                 };
133         struct spi_message      m;
134
135         spi_message_init(&m);
136         spi_message_add_tail(&t, &m);
137         return spidev_sync(spidev, &m);
138 }
139
140 static inline ssize_t
141 spidev_sync_read(struct spidev_data *spidev, size_t len)
142 {
143         struct spi_transfer     t = {
144                         .rx_buf         = spidev->buffer,
145                         .len            = len,
146                 };
147         struct spi_message      m;
148
149         spi_message_init(&m);
150         spi_message_add_tail(&t, &m);
151         return spidev_sync(spidev, &m);
152 }
153
154 /*-------------------------------------------------------------------------*/
155
156 /* Read-only message with current device setup */
157 static ssize_t
158 spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
159 {
160         struct spidev_data      *spidev;
161         ssize_t                 status = 0;
162
163         /* chipselect only toggles at start or end of operation */
164         if (count > bufsiz)
165                 return -EMSGSIZE;
166
167         spidev = filp->private_data;
168
169         mutex_lock(&spidev->buf_lock);
170         status = spidev_sync_read(spidev, count);
171         if (status > 0) {
172                 unsigned long   missing;
173
174                 missing = copy_to_user(buf, spidev->buffer, status);
175                 if (missing == status)
176                         status = -EFAULT;
177                 else
178                         status = status - missing;
179         }
180         mutex_unlock(&spidev->buf_lock);
181
182         return status;
183 }
184
185 /* Write-only message with current device setup */
186 static ssize_t
187 spidev_write(struct file *filp, const char __user *buf,
188                 size_t count, loff_t *f_pos)
189 {
190         struct spidev_data      *spidev;
191         ssize_t                 status = 0;
192         unsigned long           missing;
193
194         /* chipselect only toggles at start or end of operation */
195         if (count > bufsiz)
196                 return -EMSGSIZE;
197
198         spidev = filp->private_data;
199
200         mutex_lock(&spidev->buf_lock);
201         missing = copy_from_user(spidev->buffer, buf, count);
202         if (missing == 0) {
203                 status = spidev_sync_write(spidev, count);
204         } else
205                 status = -EFAULT;
206         mutex_unlock(&spidev->buf_lock);
207
208         return status;
209 }
210
211 static int spidev_message(struct spidev_data *spidev,
212                 struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
213 {
214         struct spi_message      msg;
215         struct spi_transfer     *k_xfers;
216         struct spi_transfer     *k_tmp;
217         struct spi_ioc_transfer *u_tmp;
218         unsigned                n, total;
219         u8                      *buf;
220         int                     status = -EFAULT;
221
222         spi_message_init(&msg);
223         k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
224         if (k_xfers == NULL)
225                 return -ENOMEM;
226
227         /* Construct spi_message, copying any tx data to bounce buffer.
228          * We walk the array of user-provided transfers, using each one
229          * to initialize a kernel version of the same transfer.
230          */
231         buf = spidev->buffer;
232         total = 0;
233         for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
234                         n;
235                         n--, k_tmp++, u_tmp++) {
236                 k_tmp->len = u_tmp->len;
237
238                 total += k_tmp->len;
239                 if (total > bufsiz) {
240                         status = -EMSGSIZE;
241                         goto done;
242                 }
243
244                 if (u_tmp->rx_buf) {
245                         k_tmp->rx_buf = buf;
246                         if (!access_ok(VERIFY_WRITE, (u8 __user *)
247                                                 (uintptr_t) u_tmp->rx_buf,
248                                                 u_tmp->len))
249                                 goto done;
250                 }
251                 if (u_tmp->tx_buf) {
252                         k_tmp->tx_buf = buf;
253                         if (copy_from_user(buf, (const u8 __user *)
254                                                 (uintptr_t) u_tmp->tx_buf,
255                                         u_tmp->len))
256                                 goto done;
257                 }
258                 buf += k_tmp->len;
259
260                 k_tmp->cs_change = !!u_tmp->cs_change;
261                 k_tmp->bits_per_word = u_tmp->bits_per_word;
262                 k_tmp->delay_usecs = u_tmp->delay_usecs;
263                 k_tmp->speed_hz = u_tmp->speed_hz;
264 #ifdef VERBOSE
265                 dev_dbg(&spi->dev,
266                         "  xfer len %zd %s%s%s%dbits %u usec %uHz\n",
267                         u_tmp->len,
268                         u_tmp->rx_buf ? "rx " : "",
269                         u_tmp->tx_buf ? "tx " : "",
270                         u_tmp->cs_change ? "cs " : "",
271                         u_tmp->bits_per_word ? : spi->bits_per_word,
272                         u_tmp->delay_usecs,
273                         u_tmp->speed_hz ? : spi->max_speed_hz);
274 #endif
275                 spi_message_add_tail(k_tmp, &msg);
276         }
277
278         status = spidev_sync(spidev, &msg);
279         if (status < 0)
280                 goto done;
281
282         /* copy any rx data out of bounce buffer */
283         buf = spidev->buffer;
284         for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
285                 if (u_tmp->rx_buf) {
286                         if (__copy_to_user((u8 __user *)
287                                         (uintptr_t) u_tmp->rx_buf, buf,
288                                         u_tmp->len)) {
289                                 status = -EFAULT;
290                                 goto done;
291                         }
292                 }
293                 buf += u_tmp->len;
294         }
295         status = total;
296
297 done:
298         kfree(k_xfers);
299         return status;
300 }
301
302 static long
303 spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
304 {
305         int                     err = 0;
306         int                     retval = 0;
307         struct spidev_data      *spidev;
308         struct spi_device       *spi;
309         u32                     tmp;
310         unsigned                n_ioc;
311         struct spi_ioc_transfer *ioc;
312
313         /* Check type and command number */
314         if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
315                 return -ENOTTY;
316
317         /* Check access direction once here; don't repeat below.
318          * IOC_DIR is from the user perspective, while access_ok is
319          * from the kernel perspective; so they look reversed.
320          */
321         if (_IOC_DIR(cmd) & _IOC_READ)
322                 err = !access_ok(VERIFY_WRITE,
323                                 (void __user *)arg, _IOC_SIZE(cmd));
324         if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
325                 err = !access_ok(VERIFY_READ,
326                                 (void __user *)arg, _IOC_SIZE(cmd));
327         if (err)
328                 return -EFAULT;
329
330         /* guard against device removal before, or while,
331          * we issue this ioctl.
332          */
333         spidev = filp->private_data;
334         spin_lock_irq(&spidev->spi_lock);
335         spi = spi_dev_get(spidev->spi);
336         spin_unlock_irq(&spidev->spi_lock);
337
338         if (spi == NULL)
339                 return -ESHUTDOWN;
340
341         /* use the buffer lock here for triple duty:
342          *  - prevent I/O (from us) so calling spi_setup() is safe;
343          *  - prevent concurrent SPI_IOC_WR_* from morphing
344          *    data fields while SPI_IOC_RD_* reads them;
345          *  - SPI_IOC_MESSAGE needs the buffer locked "normally".
346          */
347         mutex_lock(&spidev->buf_lock);
348
349         switch (cmd) {
350         /* read requests */
351         case SPI_IOC_RD_MODE:
352                 retval = __put_user(spi->mode & SPI_MODE_MASK,
353                                         (__u8 __user *)arg);
354                 break;
355         case SPI_IOC_RD_LSB_FIRST:
356                 retval = __put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
357                                         (__u8 __user *)arg);
358                 break;
359         case SPI_IOC_RD_BITS_PER_WORD:
360                 retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
361                 break;
362         case SPI_IOC_RD_MAX_SPEED_HZ:
363                 retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg);
364                 break;
365
366         /* write requests */
367         case SPI_IOC_WR_MODE:
368                 retval = __get_user(tmp, (u8 __user *)arg);
369                 if (retval == 0) {
370                         u8      save = spi->mode;
371
372                         if (tmp & ~SPI_MODE_MASK) {
373                                 retval = -EINVAL;
374                                 break;
375                         }
376
377                         tmp |= spi->mode & ~SPI_MODE_MASK;
378                         spi->mode = (u8)tmp;
379                         retval = spi_setup(spi);
380                         if (retval < 0)
381                                 spi->mode = save;
382                         else
383                                 dev_dbg(&spi->dev, "spi mode %02x\n", tmp);
384                 }
385                 break;
386         case SPI_IOC_WR_LSB_FIRST:
387                 retval = __get_user(tmp, (__u8 __user *)arg);
388                 if (retval == 0) {
389                         u8      save = spi->mode;
390
391                         if (tmp)
392                                 spi->mode |= SPI_LSB_FIRST;
393                         else
394                                 spi->mode &= ~SPI_LSB_FIRST;
395                         retval = spi_setup(spi);
396                         if (retval < 0)
397                                 spi->mode = save;
398                         else
399                                 dev_dbg(&spi->dev, "%csb first\n",
400                                                 tmp ? 'l' : 'm');
401                 }
402                 break;
403         case SPI_IOC_WR_BITS_PER_WORD:
404                 retval = __get_user(tmp, (__u8 __user *)arg);
405                 if (retval == 0) {
406                         u8      save = spi->bits_per_word;
407
408                         spi->bits_per_word = tmp;
409                         retval = spi_setup(spi);
410                         if (retval < 0)
411                                 spi->bits_per_word = save;
412                         else
413                                 dev_dbg(&spi->dev, "%d bits per word\n", tmp);
414                 }
415                 break;
416         case SPI_IOC_WR_MAX_SPEED_HZ:
417                 retval = __get_user(tmp, (__u32 __user *)arg);
418                 if (retval == 0) {
419                         u32     save = spi->max_speed_hz;
420
421                         spi->max_speed_hz = tmp;
422                         retval = spi_setup(spi);
423                         if (retval < 0)
424                                 spi->max_speed_hz = save;
425                         else
426                                 dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
427                 }
428                 break;
429
430         default:
431                 /* segmented and/or full-duplex I/O request */
432                 if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
433                                 || _IOC_DIR(cmd) != _IOC_WRITE) {
434                         retval = -ENOTTY;
435                         break;
436                 }
437
438                 tmp = _IOC_SIZE(cmd);
439                 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
440                         retval = -EINVAL;
441                         break;
442                 }
443                 n_ioc = tmp / sizeof(struct spi_ioc_transfer);
444                 if (n_ioc == 0)
445                         break;
446
447                 /* copy into scratch area */
448                 ioc = kmalloc(tmp, GFP_KERNEL);
449                 if (!ioc) {
450                         retval = -ENOMEM;
451                         break;
452                 }
453                 if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
454                         kfree(ioc);
455                         retval = -EFAULT;
456                         break;
457                 }
458
459                 /* translate to spi_message, execute */
460                 retval = spidev_message(spidev, ioc, n_ioc);
461                 kfree(ioc);
462                 break;
463         }
464
465         mutex_unlock(&spidev->buf_lock);
466         spi_dev_put(spi);
467         return retval;
468 }
469
470 static int spidev_open(struct inode *inode, struct file *filp)
471 {
472         struct spidev_data      *spidev;
473         int                     status = -ENXIO;
474
475         lock_kernel();
476         mutex_lock(&device_list_lock);
477
478         list_for_each_entry(spidev, &device_list, device_entry) {
479                 if (spidev->devt == inode->i_rdev) {
480                         status = 0;
481                         break;
482                 }
483         }
484         if (status == 0) {
485                 if (!spidev->buffer) {
486                         spidev->buffer = kmalloc(bufsiz, GFP_KERNEL);
487                         if (!spidev->buffer) {
488                                 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
489                                 status = -ENOMEM;
490                         }
491                 }
492                 if (status == 0) {
493                         spidev->users++;
494                         filp->private_data = spidev;
495                         nonseekable_open(inode, filp);
496                 }
497         } else
498                 pr_debug("spidev: nothing for minor %d\n", iminor(inode));
499
500         mutex_unlock(&device_list_lock);
501         unlock_kernel();
502         return status;
503 }
504
505 static int spidev_release(struct inode *inode, struct file *filp)
506 {
507         struct spidev_data      *spidev;
508         int                     status = 0;
509
510         mutex_lock(&device_list_lock);
511         spidev = filp->private_data;
512         filp->private_data = NULL;
513
514         /* last close? */
515         spidev->users--;
516         if (!spidev->users) {
517                 int             dofree;
518
519                 kfree(spidev->buffer);
520                 spidev->buffer = NULL;
521
522                 /* ... after we unbound from the underlying device? */
523                 spin_lock_irq(&spidev->spi_lock);
524                 dofree = (spidev->spi == NULL);
525                 spin_unlock_irq(&spidev->spi_lock);
526
527                 if (dofree)
528                         kfree(spidev);
529         }
530         mutex_unlock(&device_list_lock);
531
532         return status;
533 }
534
535 static struct file_operations spidev_fops = {
536         .owner =        THIS_MODULE,
537         /* REVISIT switch to aio primitives, so that userspace
538          * gets more complete API coverage.  It'll simplify things
539          * too, except for the locking.
540          */
541         .write =        spidev_write,
542         .read =         spidev_read,
543         .unlocked_ioctl = spidev_ioctl,
544         .open =         spidev_open,
545         .release =      spidev_release,
546 };
547
548 /*-------------------------------------------------------------------------*/
549
550 /* The main reason to have this class is to make mdev/udev create the
551  * /dev/spidevB.C character device nodes exposing our userspace API.
552  * It also simplifies memory management.
553  */
554
555 static struct class *spidev_class;
556
557 /*-------------------------------------------------------------------------*/
558
559 static int spidev_probe(struct spi_device *spi)
560 {
561         struct spidev_data      *spidev;
562         int                     status;
563         unsigned long           minor;
564
565         /* Allocate driver data */
566         spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
567         if (!spidev)
568                 return -ENOMEM;
569
570         /* Initialize the driver data */
571         spidev->spi = spi;
572         spin_lock_init(&spidev->spi_lock);
573         mutex_init(&spidev->buf_lock);
574
575         INIT_LIST_HEAD(&spidev->device_entry);
576
577         /* If we can allocate a minor number, hook up this device.
578          * Reusing minors is fine so long as udev or mdev is working.
579          */
580         mutex_lock(&device_list_lock);
581         minor = find_first_zero_bit(minors, N_SPI_MINORS);
582         if (minor < N_SPI_MINORS) {
583                 struct device *dev;
584
585                 spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
586                 dev = device_create(spidev_class, &spi->dev, spidev->devt,
587                                     spidev, "spidev%d.%d",
588                                     spi->master->bus_num, spi->chip_select);
589                 status = IS_ERR(dev) ? PTR_ERR(dev) : 0;
590         } else {
591                 dev_dbg(&spi->dev, "no minor number available!\n");
592                 status = -ENODEV;
593         }
594         if (status == 0) {
595                 set_bit(minor, minors);
596                 list_add(&spidev->device_entry, &device_list);
597         }
598         mutex_unlock(&device_list_lock);
599
600         if (status == 0)
601                 spi_set_drvdata(spi, spidev);
602         else
603                 kfree(spidev);
604
605         return status;
606 }
607
608 static int spidev_remove(struct spi_device *spi)
609 {
610         struct spidev_data      *spidev = spi_get_drvdata(spi);
611
612         /* make sure ops on existing fds can abort cleanly */
613         spin_lock_irq(&spidev->spi_lock);
614         spidev->spi = NULL;
615         spi_set_drvdata(spi, NULL);
616         spin_unlock_irq(&spidev->spi_lock);
617
618         /* prevent new opens */
619         mutex_lock(&device_list_lock);
620         list_del(&spidev->device_entry);
621         device_destroy(spidev_class, spidev->devt);
622         clear_bit(MINOR(spidev->devt), minors);
623         if (spidev->users == 0)
624                 kfree(spidev);
625         mutex_unlock(&device_list_lock);
626
627         return 0;
628 }
629
630 static struct spi_driver spidev_spi = {
631         .driver = {
632                 .name =         "spidev",
633                 .owner =        THIS_MODULE,
634         },
635         .probe =        spidev_probe,
636         .remove =       __devexit_p(spidev_remove),
637
638         /* NOTE:  suspend/resume methods are not necessary here.
639          * We don't do anything except pass the requests to/from
640          * the underlying controller.  The refrigerator handles
641          * most issues; the controller driver handles the rest.
642          */
643 };
644
645 /*-------------------------------------------------------------------------*/
646
647 static int __init spidev_init(void)
648 {
649         int status;
650
651         /* Claim our 256 reserved device numbers.  Then register a class
652          * that will key udev/mdev to add/remove /dev nodes.  Last, register
653          * the driver which manages those device numbers.
654          */
655         BUILD_BUG_ON(N_SPI_MINORS > 256);
656         status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
657         if (status < 0)
658                 return status;
659
660         spidev_class = class_create(THIS_MODULE, "spidev");
661         if (IS_ERR(spidev_class)) {
662                 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
663                 return PTR_ERR(spidev_class);
664         }
665
666         status = spi_register_driver(&spidev_spi);
667         if (status < 0) {
668                 class_destroy(spidev_class);
669                 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
670         }
671         return status;
672 }
673 module_init(spidev_init);
674
675 static void __exit spidev_exit(void)
676 {
677         spi_unregister_driver(&spidev_spi);
678         class_destroy(spidev_class);
679         unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
680 }
681 module_exit(spidev_exit);
682
683 MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
684 MODULE_DESCRIPTION("User mode SPI device interface");
685 MODULE_LICENSE("GPL");