USB: Force unbinding of drivers lacking reset_resume or other methods
[linux-2.6] / drivers / spi / spidev.c
1 /*
2  * spidev.c -- simple synchronous userspace interface to SPI devices
3  *
4  * Copyright (C) 2006 SWAPP
5  *      Andrea Paterniani <a.paterniani@swapp-eng.it>
6  * Copyright (C) 2007 David Brownell (simplification, cleanup)
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/ioctl.h>
26 #include <linux/fs.h>
27 #include <linux/device.h>
28 #include <linux/err.h>
29 #include <linux/list.h>
30 #include <linux/errno.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/smp_lock.h>
34
35 #include <linux/spi/spi.h>
36 #include <linux/spi/spidev.h>
37
38 #include <asm/uaccess.h>
39
40
41 /*
42  * This supports acccess to SPI devices using normal userspace I/O calls.
43  * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
44  * and often mask message boundaries, full SPI support requires full duplex
45  * transfers.  There are several kinds of of internal message boundaries to
46  * handle chipselect management and other protocol options.
47  *
48  * SPI has a character major number assigned.  We allocate minor numbers
49  * dynamically using a bitmask.  You must use hotplug tools, such as udev
50  * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
51  * nodes, since there is no fixed association of minor numbers with any
52  * particular SPI bus or device.
53  */
54 #define SPIDEV_MAJOR                    153     /* assigned */
55 #define N_SPI_MINORS                    32      /* ... up to 256 */
56
57 static unsigned long    minors[N_SPI_MINORS / BITS_PER_LONG];
58
59
60 /* Bit masks for spi_device.mode management.  Note that incorrect
61  * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other
62  * devices on a shared bus:  CS_HIGH, because this device will be
63  * active when it shouldn't be;  3WIRE, because when active it won't
64  * behave as it should.
65  *
66  * REVISIT should changing those two modes be privileged?
67  */
68 #define SPI_MODE_MASK           (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
69                                 | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP)
70
71 struct spidev_data {
72         dev_t                   devt;
73         spinlock_t              spi_lock;
74         struct spi_device       *spi;
75         struct list_head        device_entry;
76
77         /* buffer is NULL unless this device is open (users > 0) */
78         struct mutex            buf_lock;
79         unsigned                users;
80         u8                      *buffer;
81 };
82
83 static LIST_HEAD(device_list);
84 static DEFINE_MUTEX(device_list_lock);
85
86 static unsigned bufsiz = 4096;
87 module_param(bufsiz, uint, S_IRUGO);
88 MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
89
90 /*-------------------------------------------------------------------------*/
91
92 /*
93  * We can't use the standard synchronous wrappers for file I/O; we
94  * need to protect against async removal of the underlying spi_device.
95  */
96 static void spidev_complete(void *arg)
97 {
98         complete(arg);
99 }
100
101 static ssize_t
102 spidev_sync(struct spidev_data *spidev, struct spi_message *message)
103 {
104         DECLARE_COMPLETION_ONSTACK(done);
105         int status;
106
107         message->complete = spidev_complete;
108         message->context = &done;
109
110         spin_lock_irq(&spidev->spi_lock);
111         if (spidev->spi == NULL)
112                 status = -ESHUTDOWN;
113         else
114                 status = spi_async(spidev->spi, message);
115         spin_unlock_irq(&spidev->spi_lock);
116
117         if (status == 0) {
118                 wait_for_completion(&done);
119                 status = message->status;
120                 if (status == 0)
121                         status = message->actual_length;
122         }
123         return status;
124 }
125
126 static inline ssize_t
127 spidev_sync_write(struct spidev_data *spidev, size_t len)
128 {
129         struct spi_transfer     t = {
130                         .tx_buf         = spidev->buffer,
131                         .len            = len,
132                 };
133         struct spi_message      m;
134
135         spi_message_init(&m);
136         spi_message_add_tail(&t, &m);
137         return spidev_sync(spidev, &m);
138 }
139
140 static inline ssize_t
141 spidev_sync_read(struct spidev_data *spidev, size_t len)
142 {
143         struct spi_transfer     t = {
144                         .rx_buf         = spidev->buffer,
145                         .len            = len,
146                 };
147         struct spi_message      m;
148
149         spi_message_init(&m);
150         spi_message_add_tail(&t, &m);
151         return spidev_sync(spidev, &m);
152 }
153
154 /*-------------------------------------------------------------------------*/
155
156 /* Read-only message with current device setup */
157 static ssize_t
158 spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
159 {
160         struct spidev_data      *spidev;
161         ssize_t                 status = 0;
162
163         /* chipselect only toggles at start or end of operation */
164         if (count > bufsiz)
165                 return -EMSGSIZE;
166
167         spidev = filp->private_data;
168
169         mutex_lock(&spidev->buf_lock);
170         status = spidev_sync_read(spidev, count);
171         if (status > 0) {
172                 unsigned long   missing;
173
174                 missing = copy_to_user(buf, spidev->buffer, status);
175                 if (missing == status)
176                         status = -EFAULT;
177                 else
178                         status = status - missing;
179         }
180         mutex_unlock(&spidev->buf_lock);
181
182         return status;
183 }
184
185 /* Write-only message with current device setup */
186 static ssize_t
187 spidev_write(struct file *filp, const char __user *buf,
188                 size_t count, loff_t *f_pos)
189 {
190         struct spidev_data      *spidev;
191         ssize_t                 status = 0;
192         unsigned long           missing;
193
194         /* chipselect only toggles at start or end of operation */
195         if (count > bufsiz)
196                 return -EMSGSIZE;
197
198         spidev = filp->private_data;
199
200         mutex_lock(&spidev->buf_lock);
201         missing = copy_from_user(spidev->buffer, buf, count);
202         if (missing == 0) {
203                 status = spidev_sync_write(spidev, count);
204         } else
205                 status = -EFAULT;
206         mutex_unlock(&spidev->buf_lock);
207
208         return status;
209 }
210
211 static int spidev_message(struct spidev_data *spidev,
212                 struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
213 {
214         struct spi_message      msg;
215         struct spi_transfer     *k_xfers;
216         struct spi_transfer     *k_tmp;
217         struct spi_ioc_transfer *u_tmp;
218         unsigned                n, total;
219         u8                      *buf;
220         int                     status = -EFAULT;
221
222         spi_message_init(&msg);
223         k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
224         if (k_xfers == NULL)
225                 return -ENOMEM;
226
227         /* Construct spi_message, copying any tx data to bounce buffer.
228          * We walk the array of user-provided transfers, using each one
229          * to initialize a kernel version of the same transfer.
230          */
231         mutex_lock(&spidev->buf_lock);
232         buf = spidev->buffer;
233         total = 0;
234         for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
235                         n;
236                         n--, k_tmp++, u_tmp++) {
237                 k_tmp->len = u_tmp->len;
238
239                 total += k_tmp->len;
240                 if (total > bufsiz) {
241                         status = -EMSGSIZE;
242                         goto done;
243                 }
244
245                 if (u_tmp->rx_buf) {
246                         k_tmp->rx_buf = buf;
247                         if (!access_ok(VERIFY_WRITE, (u8 __user *)
248                                                 (uintptr_t) u_tmp->rx_buf,
249                                                 u_tmp->len))
250                                 goto done;
251                 }
252                 if (u_tmp->tx_buf) {
253                         k_tmp->tx_buf = buf;
254                         if (copy_from_user(buf, (const u8 __user *)
255                                                 (uintptr_t) u_tmp->tx_buf,
256                                         u_tmp->len))
257                                 goto done;
258                 }
259                 buf += k_tmp->len;
260
261                 k_tmp->cs_change = !!u_tmp->cs_change;
262                 k_tmp->bits_per_word = u_tmp->bits_per_word;
263                 k_tmp->delay_usecs = u_tmp->delay_usecs;
264                 k_tmp->speed_hz = u_tmp->speed_hz;
265 #ifdef VERBOSE
266                 dev_dbg(&spi->dev,
267                         "  xfer len %zd %s%s%s%dbits %u usec %uHz\n",
268                         u_tmp->len,
269                         u_tmp->rx_buf ? "rx " : "",
270                         u_tmp->tx_buf ? "tx " : "",
271                         u_tmp->cs_change ? "cs " : "",
272                         u_tmp->bits_per_word ? : spi->bits_per_word,
273                         u_tmp->delay_usecs,
274                         u_tmp->speed_hz ? : spi->max_speed_hz);
275 #endif
276                 spi_message_add_tail(k_tmp, &msg);
277         }
278
279         status = spidev_sync(spidev, &msg);
280         if (status < 0)
281                 goto done;
282
283         /* copy any rx data out of bounce buffer */
284         buf = spidev->buffer;
285         for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
286                 if (u_tmp->rx_buf) {
287                         if (__copy_to_user((u8 __user *)
288                                         (uintptr_t) u_tmp->rx_buf, buf,
289                                         u_tmp->len)) {
290                                 status = -EFAULT;
291                                 goto done;
292                         }
293                 }
294                 buf += u_tmp->len;
295         }
296         status = total;
297
298 done:
299         mutex_unlock(&spidev->buf_lock);
300         kfree(k_xfers);
301         return status;
302 }
303
304 static int
305 spidev_ioctl(struct inode *inode, struct file *filp,
306                 unsigned int cmd, unsigned long arg)
307 {
308         int                     err = 0;
309         int                     retval = 0;
310         struct spidev_data      *spidev;
311         struct spi_device       *spi;
312         u32                     tmp;
313         unsigned                n_ioc;
314         struct spi_ioc_transfer *ioc;
315
316         /* Check type and command number */
317         if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
318                 return -ENOTTY;
319
320         /* Check access direction once here; don't repeat below.
321          * IOC_DIR is from the user perspective, while access_ok is
322          * from the kernel perspective; so they look reversed.
323          */
324         if (_IOC_DIR(cmd) & _IOC_READ)
325                 err = !access_ok(VERIFY_WRITE,
326                                 (void __user *)arg, _IOC_SIZE(cmd));
327         if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
328                 err = !access_ok(VERIFY_READ,
329                                 (void __user *)arg, _IOC_SIZE(cmd));
330         if (err)
331                 return -EFAULT;
332
333         /* guard against device removal before, or while,
334          * we issue this ioctl.
335          */
336         spidev = filp->private_data;
337         spin_lock_irq(&spidev->spi_lock);
338         spi = spi_dev_get(spidev->spi);
339         spin_unlock_irq(&spidev->spi_lock);
340
341         if (spi == NULL)
342                 return -ESHUTDOWN;
343
344         switch (cmd) {
345         /* read requests */
346         case SPI_IOC_RD_MODE:
347                 retval = __put_user(spi->mode & SPI_MODE_MASK,
348                                         (__u8 __user *)arg);
349                 break;
350         case SPI_IOC_RD_LSB_FIRST:
351                 retval = __put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
352                                         (__u8 __user *)arg);
353                 break;
354         case SPI_IOC_RD_BITS_PER_WORD:
355                 retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
356                 break;
357         case SPI_IOC_RD_MAX_SPEED_HZ:
358                 retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg);
359                 break;
360
361         /* write requests */
362         case SPI_IOC_WR_MODE:
363                 retval = __get_user(tmp, (u8 __user *)arg);
364                 if (retval == 0) {
365                         u8      save = spi->mode;
366
367                         if (tmp & ~SPI_MODE_MASK) {
368                                 retval = -EINVAL;
369                                 break;
370                         }
371
372                         tmp |= spi->mode & ~SPI_MODE_MASK;
373                         spi->mode = (u8)tmp;
374                         retval = spi_setup(spi);
375                         if (retval < 0)
376                                 spi->mode = save;
377                         else
378                                 dev_dbg(&spi->dev, "spi mode %02x\n", tmp);
379                 }
380                 break;
381         case SPI_IOC_WR_LSB_FIRST:
382                 retval = __get_user(tmp, (__u8 __user *)arg);
383                 if (retval == 0) {
384                         u8      save = spi->mode;
385
386                         if (tmp)
387                                 spi->mode |= SPI_LSB_FIRST;
388                         else
389                                 spi->mode &= ~SPI_LSB_FIRST;
390                         retval = spi_setup(spi);
391                         if (retval < 0)
392                                 spi->mode = save;
393                         else
394                                 dev_dbg(&spi->dev, "%csb first\n",
395                                                 tmp ? 'l' : 'm');
396                 }
397                 break;
398         case SPI_IOC_WR_BITS_PER_WORD:
399                 retval = __get_user(tmp, (__u8 __user *)arg);
400                 if (retval == 0) {
401                         u8      save = spi->bits_per_word;
402
403                         spi->bits_per_word = tmp;
404                         retval = spi_setup(spi);
405                         if (retval < 0)
406                                 spi->bits_per_word = save;
407                         else
408                                 dev_dbg(&spi->dev, "%d bits per word\n", tmp);
409                 }
410                 break;
411         case SPI_IOC_WR_MAX_SPEED_HZ:
412                 retval = __get_user(tmp, (__u32 __user *)arg);
413                 if (retval == 0) {
414                         u32     save = spi->max_speed_hz;
415
416                         spi->max_speed_hz = tmp;
417                         retval = spi_setup(spi);
418                         if (retval < 0)
419                                 spi->max_speed_hz = save;
420                         else
421                                 dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
422                 }
423                 break;
424
425         default:
426                 /* segmented and/or full-duplex I/O request */
427                 if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
428                                 || _IOC_DIR(cmd) != _IOC_WRITE) {
429                         retval = -ENOTTY;
430                         break;
431                 }
432
433                 tmp = _IOC_SIZE(cmd);
434                 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
435                         retval = -EINVAL;
436                         break;
437                 }
438                 n_ioc = tmp / sizeof(struct spi_ioc_transfer);
439                 if (n_ioc == 0)
440                         break;
441
442                 /* copy into scratch area */
443                 ioc = kmalloc(tmp, GFP_KERNEL);
444                 if (!ioc) {
445                         retval = -ENOMEM;
446                         break;
447                 }
448                 if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
449                         kfree(ioc);
450                         retval = -EFAULT;
451                         break;
452                 }
453
454                 /* translate to spi_message, execute */
455                 retval = spidev_message(spidev, ioc, n_ioc);
456                 kfree(ioc);
457                 break;
458         }
459         spi_dev_put(spi);
460         return retval;
461 }
462
463 static int spidev_open(struct inode *inode, struct file *filp)
464 {
465         struct spidev_data      *spidev;
466         int                     status = -ENXIO;
467
468         lock_kernel();
469         mutex_lock(&device_list_lock);
470
471         list_for_each_entry(spidev, &device_list, device_entry) {
472                 if (spidev->devt == inode->i_rdev) {
473                         status = 0;
474                         break;
475                 }
476         }
477         if (status == 0) {
478                 if (!spidev->buffer) {
479                         spidev->buffer = kmalloc(bufsiz, GFP_KERNEL);
480                         if (!spidev->buffer) {
481                                 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
482                                 status = -ENOMEM;
483                         }
484                 }
485                 if (status == 0) {
486                         spidev->users++;
487                         filp->private_data = spidev;
488                         nonseekable_open(inode, filp);
489                 }
490         } else
491                 pr_debug("spidev: nothing for minor %d\n", iminor(inode));
492
493         mutex_unlock(&device_list_lock);
494         unlock_kernel();
495         return status;
496 }
497
498 static int spidev_release(struct inode *inode, struct file *filp)
499 {
500         struct spidev_data      *spidev;
501         int                     status = 0;
502
503         mutex_lock(&device_list_lock);
504         spidev = filp->private_data;
505         filp->private_data = NULL;
506
507         /* last close? */
508         spidev->users--;
509         if (!spidev->users) {
510                 int             dofree;
511
512                 kfree(spidev->buffer);
513                 spidev->buffer = NULL;
514
515                 /* ... after we unbound from the underlying device? */
516                 spin_lock_irq(&spidev->spi_lock);
517                 dofree = (spidev->spi == NULL);
518                 spin_unlock_irq(&spidev->spi_lock);
519
520                 if (dofree)
521                         kfree(spidev);
522         }
523         mutex_unlock(&device_list_lock);
524
525         return status;
526 }
527
528 static struct file_operations spidev_fops = {
529         .owner =        THIS_MODULE,
530         /* REVISIT switch to aio primitives, so that userspace
531          * gets more complete API coverage.  It'll simplify things
532          * too, except for the locking.
533          */
534         .write =        spidev_write,
535         .read =         spidev_read,
536         .ioctl =        spidev_ioctl,
537         .open =         spidev_open,
538         .release =      spidev_release,
539 };
540
541 /*-------------------------------------------------------------------------*/
542
543 /* The main reason to have this class is to make mdev/udev create the
544  * /dev/spidevB.C character device nodes exposing our userspace API.
545  * It also simplifies memory management.
546  */
547
548 static struct class *spidev_class;
549
550 /*-------------------------------------------------------------------------*/
551
552 static int spidev_probe(struct spi_device *spi)
553 {
554         struct spidev_data      *spidev;
555         int                     status;
556         unsigned long           minor;
557
558         /* Allocate driver data */
559         spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
560         if (!spidev)
561                 return -ENOMEM;
562
563         /* Initialize the driver data */
564         spidev->spi = spi;
565         spin_lock_init(&spidev->spi_lock);
566         mutex_init(&spidev->buf_lock);
567
568         INIT_LIST_HEAD(&spidev->device_entry);
569
570         /* If we can allocate a minor number, hook up this device.
571          * Reusing minors is fine so long as udev or mdev is working.
572          */
573         mutex_lock(&device_list_lock);
574         minor = find_first_zero_bit(minors, N_SPI_MINORS);
575         if (minor < N_SPI_MINORS) {
576                 struct device *dev;
577
578                 spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
579                 dev = device_create(spidev_class, &spi->dev, spidev->devt,
580                                 "spidev%d.%d",
581                                 spi->master->bus_num, spi->chip_select);
582                 status = IS_ERR(dev) ? PTR_ERR(dev) : 0;
583         } else {
584                 dev_dbg(&spi->dev, "no minor number available!\n");
585                 status = -ENODEV;
586         }
587         if (status == 0) {
588                 set_bit(minor, minors);
589                 spi_set_drvdata(spi, spidev);
590                 list_add(&spidev->device_entry, &device_list);
591         }
592         mutex_unlock(&device_list_lock);
593
594         if (status != 0)
595                 kfree(spidev);
596
597         return status;
598 }
599
600 static int spidev_remove(struct spi_device *spi)
601 {
602         struct spidev_data      *spidev = spi_get_drvdata(spi);
603
604         /* make sure ops on existing fds can abort cleanly */
605         spin_lock_irq(&spidev->spi_lock);
606         spidev->spi = NULL;
607         spi_set_drvdata(spi, NULL);
608         spin_unlock_irq(&spidev->spi_lock);
609
610         /* prevent new opens */
611         mutex_lock(&device_list_lock);
612         list_del(&spidev->device_entry);
613         device_destroy(spidev_class, spidev->devt);
614         clear_bit(MINOR(spidev->devt), minors);
615         if (spidev->users == 0)
616                 kfree(spidev);
617         mutex_unlock(&device_list_lock);
618
619         return 0;
620 }
621
622 static struct spi_driver spidev_spi = {
623         .driver = {
624                 .name =         "spidev",
625                 .owner =        THIS_MODULE,
626         },
627         .probe =        spidev_probe,
628         .remove =       __devexit_p(spidev_remove),
629
630         /* NOTE:  suspend/resume methods are not necessary here.
631          * We don't do anything except pass the requests to/from
632          * the underlying controller.  The refrigerator handles
633          * most issues; the controller driver handles the rest.
634          */
635 };
636
637 /*-------------------------------------------------------------------------*/
638
639 static int __init spidev_init(void)
640 {
641         int status;
642
643         /* Claim our 256 reserved device numbers.  Then register a class
644          * that will key udev/mdev to add/remove /dev nodes.  Last, register
645          * the driver which manages those device numbers.
646          */
647         BUILD_BUG_ON(N_SPI_MINORS > 256);
648         status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
649         if (status < 0)
650                 return status;
651
652         spidev_class = class_create(THIS_MODULE, "spidev");
653         if (IS_ERR(spidev_class)) {
654                 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
655                 return PTR_ERR(spidev_class);
656         }
657
658         status = spi_register_driver(&spidev_spi);
659         if (status < 0) {
660                 class_destroy(spidev_class);
661                 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
662         }
663         return status;
664 }
665 module_init(spidev_init);
666
667 static void __exit spidev_exit(void)
668 {
669         spi_unregister_driver(&spidev_spi);
670         class_destroy(spidev_class);
671         unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
672 }
673 module_exit(spidev_exit);
674
675 MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
676 MODULE_DESCRIPTION("User mode SPI device interface");
677 MODULE_LICENSE("GPL");