79c501dac5f9dc5001dd38dfec4b862973dda0e2
[cascardo/linux.git] / drivers / staging / vme / vme.c
1 /*
2  * VME Bridge Framework
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32
33 #include "vme.h"
34 #include "vme_bridge.h"
35
36 /* Bitmask and mutex to keep track of bridge numbers */
37 static unsigned int vme_bus_numbers;
38 DEFINE_MUTEX(vme_bus_num_mtx);
39
40 static void __exit vme_exit(void);
41 static int __init vme_init(void);
42
43
44 /*
45  * Find the bridge resource associated with a specific device resource
46  */
47 static struct vme_bridge *dev_to_bridge(struct device *dev)
48 {
49         return dev->platform_data;
50 }
51
52 /*
53  * Find the bridge that the resource is associated with.
54  */
55 static struct vme_bridge *find_bridge(struct vme_resource *resource)
56 {
57         /* Get list to search */
58         switch (resource->type) {
59         case VME_MASTER:
60                 return list_entry(resource->entry, struct vme_master_resource,
61                         list)->parent;
62                 break;
63         case VME_SLAVE:
64                 return list_entry(resource->entry, struct vme_slave_resource,
65                         list)->parent;
66                 break;
67         case VME_DMA:
68                 return list_entry(resource->entry, struct vme_dma_resource,
69                         list)->parent;
70                 break;
71         case VME_LM:
72                 return list_entry(resource->entry, struct vme_lm_resource,
73                         list)->parent;
74                 break;
75         default:
76                 printk(KERN_ERR "Unknown resource type\n");
77                 return NULL;
78                 break;
79         }
80 }
81
82 /*
83  * Allocate a contiguous block of memory for use by the driver. This is used to
84  * create the buffers for the slave windows.
85  *
86  * XXX VME bridges could be available on buses other than PCI. At the momment
87  *     this framework only supports PCI devices.
88  */
89 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
90         dma_addr_t *dma)
91 {
92         struct vme_bridge *bridge;
93         struct pci_dev *pdev;
94
95         if (resource == NULL) {
96                 printk(KERN_ERR "No resource\n");
97                 return NULL;
98         }
99
100         bridge = find_bridge(resource);
101         if (bridge == NULL) {
102                 printk(KERN_ERR "Can't find bridge\n");
103                 return NULL;
104         }
105
106         /* Find pci_dev container of dev */
107         if (bridge->parent == NULL) {
108                 printk(KERN_ERR "Dev entry NULL\n");
109                 return NULL;
110         }
111         pdev = container_of(bridge->parent, struct pci_dev, dev);
112
113         return pci_alloc_consistent(pdev, size, dma);
114 }
115 EXPORT_SYMBOL(vme_alloc_consistent);
116
117 /*
118  * Free previously allocated contiguous block of memory.
119  *
120  * XXX VME bridges could be available on buses other than PCI. At the momment
121  *     this framework only supports PCI devices.
122  */
123 void vme_free_consistent(struct vme_resource *resource, size_t size,
124         void *vaddr, dma_addr_t dma)
125 {
126         struct vme_bridge *bridge;
127         struct pci_dev *pdev;
128
129         if (resource == NULL) {
130                 printk(KERN_ERR "No resource\n");
131                 return;
132         }
133
134         bridge = find_bridge(resource);
135         if (bridge == NULL) {
136                 printk(KERN_ERR "Can't find bridge\n");
137                 return;
138         }
139
140         /* Find pci_dev container of dev */
141         pdev = container_of(bridge->parent, struct pci_dev, dev);
142
143         pci_free_consistent(pdev, size, vaddr, dma);
144 }
145 EXPORT_SYMBOL(vme_free_consistent);
146
147 size_t vme_get_size(struct vme_resource *resource)
148 {
149         int enabled, retval;
150         unsigned long long base, size;
151         dma_addr_t buf_base;
152         vme_address_t aspace;
153         vme_cycle_t cycle;
154         vme_width_t dwidth;
155
156         switch (resource->type) {
157         case VME_MASTER:
158                 retval = vme_master_get(resource, &enabled, &base, &size,
159                         &aspace, &cycle, &dwidth);
160
161                 return size;
162                 break;
163         case VME_SLAVE:
164                 retval = vme_slave_get(resource, &enabled, &base, &size,
165                         &buf_base, &aspace, &cycle);
166
167                 return size;
168                 break;
169         case VME_DMA:
170                 return 0;
171                 break;
172         default:
173                 printk(KERN_ERR "Unknown resource type\n");
174                 return 0;
175                 break;
176         }
177 }
178 EXPORT_SYMBOL(vme_get_size);
179
180 static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
181         unsigned long long size)
182 {
183         int retval = 0;
184
185         switch (aspace) {
186         case VME_A16:
187                 if (((vme_base + size) > VME_A16_MAX) ||
188                                 (vme_base > VME_A16_MAX))
189                         retval = -EFAULT;
190                 break;
191         case VME_A24:
192                 if (((vme_base + size) > VME_A24_MAX) ||
193                                 (vme_base > VME_A24_MAX))
194                         retval = -EFAULT;
195                 break;
196         case VME_A32:
197                 if (((vme_base + size) > VME_A32_MAX) ||
198                                 (vme_base > VME_A32_MAX))
199                         retval = -EFAULT;
200                 break;
201         case VME_A64:
202                 /*
203                  * Any value held in an unsigned long long can be used as the
204                  * base
205                  */
206                 break;
207         case VME_CRCSR:
208                 if (((vme_base + size) > VME_CRCSR_MAX) ||
209                                 (vme_base > VME_CRCSR_MAX))
210                         retval = -EFAULT;
211                 break;
212         case VME_USER1:
213         case VME_USER2:
214         case VME_USER3:
215         case VME_USER4:
216                 /* User Defined */
217                 break;
218         default:
219                 printk(KERN_ERR "Invalid address space\n");
220                 retval = -EINVAL;
221                 break;
222         }
223
224         return retval;
225 }
226
227 /*
228  * Request a slave image with specific attributes, return some unique
229  * identifier.
230  */
231 struct vme_resource *vme_slave_request(struct device *dev,
232         vme_address_t address, vme_cycle_t cycle)
233 {
234         struct vme_bridge *bridge;
235         struct list_head *slave_pos = NULL;
236         struct vme_slave_resource *allocated_image = NULL;
237         struct vme_slave_resource *slave_image = NULL;
238         struct vme_resource *resource = NULL;
239
240         bridge = dev_to_bridge(dev);
241         if (bridge == NULL) {
242                 printk(KERN_ERR "Can't find VME bus\n");
243                 goto err_bus;
244         }
245
246         /* Loop through slave resources */
247         list_for_each(slave_pos, &(bridge->slave_resources)) {
248                 slave_image = list_entry(slave_pos,
249                         struct vme_slave_resource, list);
250
251                 if (slave_image == NULL) {
252                         printk(KERN_ERR "Registered NULL Slave resource\n");
253                         continue;
254                 }
255
256                 /* Find an unlocked and compatible image */
257                 mutex_lock(&(slave_image->mtx));
258                 if (((slave_image->address_attr & address) == address) &&
259                         ((slave_image->cycle_attr & cycle) == cycle) &&
260                         (slave_image->locked == 0)) {
261
262                         slave_image->locked = 1;
263                         mutex_unlock(&(slave_image->mtx));
264                         allocated_image = slave_image;
265                         break;
266                 }
267                 mutex_unlock(&(slave_image->mtx));
268         }
269
270         /* No free image */
271         if (allocated_image == NULL)
272                 goto err_image;
273
274         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
275         if (resource == NULL) {
276                 printk(KERN_WARNING "Unable to allocate resource structure\n");
277                 goto err_alloc;
278         }
279         resource->type = VME_SLAVE;
280         resource->entry = &(allocated_image->list);
281
282         return resource;
283
284 err_alloc:
285         /* Unlock image */
286         mutex_lock(&(slave_image->mtx));
287         slave_image->locked = 0;
288         mutex_unlock(&(slave_image->mtx));
289 err_image:
290 err_bus:
291         return NULL;
292 }
293 EXPORT_SYMBOL(vme_slave_request);
294
295 int vme_slave_set(struct vme_resource *resource, int enabled,
296         unsigned long long vme_base, unsigned long long size,
297         dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
298 {
299         struct vme_bridge *bridge = find_bridge(resource);
300         struct vme_slave_resource *image;
301         int retval;
302
303         if (resource->type != VME_SLAVE) {
304                 printk(KERN_ERR "Not a slave resource\n");
305                 return -EINVAL;
306         }
307
308         image = list_entry(resource->entry, struct vme_slave_resource, list);
309
310         if (bridge->slave_set == NULL) {
311                 printk(KERN_ERR "Function not supported\n");
312                 return -ENOSYS;
313         }
314
315         if (!(((image->address_attr & aspace) == aspace) &&
316                 ((image->cycle_attr & cycle) == cycle))) {
317                 printk(KERN_ERR "Invalid attributes\n");
318                 return -EINVAL;
319         }
320
321         retval = vme_check_window(aspace, vme_base, size);
322         if (retval)
323                 return retval;
324
325         return bridge->slave_set(image, enabled, vme_base, size, buf_base,
326                 aspace, cycle);
327 }
328 EXPORT_SYMBOL(vme_slave_set);
329
330 int vme_slave_get(struct vme_resource *resource, int *enabled,
331         unsigned long long *vme_base, unsigned long long *size,
332         dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
333 {
334         struct vme_bridge *bridge = find_bridge(resource);
335         struct vme_slave_resource *image;
336
337         if (resource->type != VME_SLAVE) {
338                 printk(KERN_ERR "Not a slave resource\n");
339                 return -EINVAL;
340         }
341
342         image = list_entry(resource->entry, struct vme_slave_resource, list);
343
344         if (bridge->slave_get == NULL) {
345                 printk(KERN_ERR "vme_slave_get not supported\n");
346                 return -EINVAL;
347         }
348
349         return bridge->slave_get(image, enabled, vme_base, size, buf_base,
350                 aspace, cycle);
351 }
352 EXPORT_SYMBOL(vme_slave_get);
353
354 void vme_slave_free(struct vme_resource *resource)
355 {
356         struct vme_slave_resource *slave_image;
357
358         if (resource->type != VME_SLAVE) {
359                 printk(KERN_ERR "Not a slave resource\n");
360                 return;
361         }
362
363         slave_image = list_entry(resource->entry, struct vme_slave_resource,
364                 list);
365         if (slave_image == NULL) {
366                 printk(KERN_ERR "Can't find slave resource\n");
367                 return;
368         }
369
370         /* Unlock image */
371         mutex_lock(&(slave_image->mtx));
372         if (slave_image->locked == 0)
373                 printk(KERN_ERR "Image is already free\n");
374
375         slave_image->locked = 0;
376         mutex_unlock(&(slave_image->mtx));
377
378         /* Free up resource memory */
379         kfree(resource);
380 }
381 EXPORT_SYMBOL(vme_slave_free);
382
383 /*
384  * Request a master image with specific attributes, return some unique
385  * identifier.
386  */
387 struct vme_resource *vme_master_request(struct device *dev,
388         vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
389 {
390         struct vme_bridge *bridge;
391         struct list_head *master_pos = NULL;
392         struct vme_master_resource *allocated_image = NULL;
393         struct vme_master_resource *master_image = NULL;
394         struct vme_resource *resource = NULL;
395
396         bridge = dev_to_bridge(dev);
397         if (bridge == NULL) {
398                 printk(KERN_ERR "Can't find VME bus\n");
399                 goto err_bus;
400         }
401
402         /* Loop through master resources */
403         list_for_each(master_pos, &(bridge->master_resources)) {
404                 master_image = list_entry(master_pos,
405                         struct vme_master_resource, list);
406
407                 if (master_image == NULL) {
408                         printk(KERN_WARNING "Registered NULL master resource\n");
409                         continue;
410                 }
411
412                 /* Find an unlocked and compatible image */
413                 spin_lock(&(master_image->lock));
414                 if (((master_image->address_attr & address) == address) &&
415                         ((master_image->cycle_attr & cycle) == cycle) &&
416                         ((master_image->width_attr & dwidth) == dwidth) &&
417                         (master_image->locked == 0)) {
418
419                         master_image->locked = 1;
420                         spin_unlock(&(master_image->lock));
421                         allocated_image = master_image;
422                         break;
423                 }
424                 spin_unlock(&(master_image->lock));
425         }
426
427         /* Check to see if we found a resource */
428         if (allocated_image == NULL) {
429                 printk(KERN_ERR "Can't find a suitable resource\n");
430                 goto err_image;
431         }
432
433         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
434         if (resource == NULL) {
435                 printk(KERN_ERR "Unable to allocate resource structure\n");
436                 goto err_alloc;
437         }
438         resource->type = VME_MASTER;
439         resource->entry = &(allocated_image->list);
440
441         return resource;
442
443         kfree(resource);
444 err_alloc:
445         /* Unlock image */
446         spin_lock(&(master_image->lock));
447         master_image->locked = 0;
448         spin_unlock(&(master_image->lock));
449 err_image:
450 err_bus:
451         return NULL;
452 }
453 EXPORT_SYMBOL(vme_master_request);
454
455 int vme_master_set(struct vme_resource *resource, int enabled,
456         unsigned long long vme_base, unsigned long long size,
457         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
458 {
459         struct vme_bridge *bridge = find_bridge(resource);
460         struct vme_master_resource *image;
461         int retval;
462
463         if (resource->type != VME_MASTER) {
464                 printk(KERN_ERR "Not a master resource\n");
465                 return -EINVAL;
466         }
467
468         image = list_entry(resource->entry, struct vme_master_resource, list);
469
470         if (bridge->master_set == NULL) {
471                 printk(KERN_WARNING "vme_master_set not supported\n");
472                 return -EINVAL;
473         }
474
475         if (!(((image->address_attr & aspace) == aspace) &&
476                 ((image->cycle_attr & cycle) == cycle) &&
477                 ((image->width_attr & dwidth) == dwidth))) {
478                 printk(KERN_WARNING "Invalid attributes\n");
479                 return -EINVAL;
480         }
481
482         retval = vme_check_window(aspace, vme_base, size);
483         if (retval)
484                 return retval;
485
486         return bridge->master_set(image, enabled, vme_base, size, aspace,
487                 cycle, dwidth);
488 }
489 EXPORT_SYMBOL(vme_master_set);
490
491 int vme_master_get(struct vme_resource *resource, int *enabled,
492         unsigned long long *vme_base, unsigned long long *size,
493         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
494 {
495         struct vme_bridge *bridge = find_bridge(resource);
496         struct vme_master_resource *image;
497
498         if (resource->type != VME_MASTER) {
499                 printk(KERN_ERR "Not a master resource\n");
500                 return -EINVAL;
501         }
502
503         image = list_entry(resource->entry, struct vme_master_resource, list);
504
505         if (bridge->master_get == NULL) {
506                 printk(KERN_WARNING "vme_master_set not supported\n");
507                 return -EINVAL;
508         }
509
510         return bridge->master_get(image, enabled, vme_base, size, aspace,
511                 cycle, dwidth);
512 }
513 EXPORT_SYMBOL(vme_master_get);
514
515 /*
516  * Read data out of VME space into a buffer.
517  */
518 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
519         loff_t offset)
520 {
521         struct vme_bridge *bridge = find_bridge(resource);
522         struct vme_master_resource *image;
523         size_t length;
524
525         if (bridge->master_read == NULL) {
526                 printk(KERN_WARNING "Reading from resource not supported\n");
527                 return -EINVAL;
528         }
529
530         if (resource->type != VME_MASTER) {
531                 printk(KERN_ERR "Not a master resource\n");
532                 return -EINVAL;
533         }
534
535         image = list_entry(resource->entry, struct vme_master_resource, list);
536
537         length = vme_get_size(resource);
538
539         if (offset > length) {
540                 printk(KERN_WARNING "Invalid Offset\n");
541                 return -EFAULT;
542         }
543
544         if ((offset + count) > length)
545                 count = length - offset;
546
547         return bridge->master_read(image, buf, count, offset);
548
549 }
550 EXPORT_SYMBOL(vme_master_read);
551
552 /*
553  * Write data out to VME space from a buffer.
554  */
555 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
556         size_t count, loff_t offset)
557 {
558         struct vme_bridge *bridge = find_bridge(resource);
559         struct vme_master_resource *image;
560         size_t length;
561
562         if (bridge->master_write == NULL) {
563                 printk(KERN_WARNING "Writing to resource not supported\n");
564                 return -EINVAL;
565         }
566
567         if (resource->type != VME_MASTER) {
568                 printk(KERN_ERR "Not a master resource\n");
569                 return -EINVAL;
570         }
571
572         image = list_entry(resource->entry, struct vme_master_resource, list);
573
574         length = vme_get_size(resource);
575
576         if (offset > length) {
577                 printk(KERN_WARNING "Invalid Offset\n");
578                 return -EFAULT;
579         }
580
581         if ((offset + count) > length)
582                 count = length - offset;
583
584         return bridge->master_write(image, buf, count, offset);
585 }
586 EXPORT_SYMBOL(vme_master_write);
587
588 /*
589  * Perform RMW cycle to provided location.
590  */
591 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
592         unsigned int compare, unsigned int swap, loff_t offset)
593 {
594         struct vme_bridge *bridge = find_bridge(resource);
595         struct vme_master_resource *image;
596
597         if (bridge->master_rmw == NULL) {
598                 printk(KERN_WARNING "Writing to resource not supported\n");
599                 return -EINVAL;
600         }
601
602         if (resource->type != VME_MASTER) {
603                 printk(KERN_ERR "Not a master resource\n");
604                 return -EINVAL;
605         }
606
607         image = list_entry(resource->entry, struct vme_master_resource, list);
608
609         return bridge->master_rmw(image, mask, compare, swap, offset);
610 }
611 EXPORT_SYMBOL(vme_master_rmw);
612
613 void vme_master_free(struct vme_resource *resource)
614 {
615         struct vme_master_resource *master_image;
616
617         if (resource->type != VME_MASTER) {
618                 printk(KERN_ERR "Not a master resource\n");
619                 return;
620         }
621
622         master_image = list_entry(resource->entry, struct vme_master_resource,
623                 list);
624         if (master_image == NULL) {
625                 printk(KERN_ERR "Can't find master resource\n");
626                 return;
627         }
628
629         /* Unlock image */
630         spin_lock(&(master_image->lock));
631         if (master_image->locked == 0)
632                 printk(KERN_ERR "Image is already free\n");
633
634         master_image->locked = 0;
635         spin_unlock(&(master_image->lock));
636
637         /* Free up resource memory */
638         kfree(resource);
639 }
640 EXPORT_SYMBOL(vme_master_free);
641
642 /*
643  * Request a DMA controller with specific attributes, return some unique
644  * identifier.
645  */
646 struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
647 {
648         struct vme_bridge *bridge;
649         struct list_head *dma_pos = NULL;
650         struct vme_dma_resource *allocated_ctrlr = NULL;
651         struct vme_dma_resource *dma_ctrlr = NULL;
652         struct vme_resource *resource = NULL;
653
654         /* XXX Not checking resource attributes */
655         printk(KERN_ERR "No VME resource Attribute tests done\n");
656
657         bridge = dev_to_bridge(dev);
658         if (bridge == NULL) {
659                 printk(KERN_ERR "Can't find VME bus\n");
660                 goto err_bus;
661         }
662
663         /* Loop through DMA resources */
664         list_for_each(dma_pos, &(bridge->dma_resources)) {
665                 dma_ctrlr = list_entry(dma_pos,
666                         struct vme_dma_resource, list);
667
668                 if (dma_ctrlr == NULL) {
669                         printk(KERN_ERR "Registered NULL DMA resource\n");
670                         continue;
671                 }
672
673                 /* Find an unlocked and compatible controller */
674                 mutex_lock(&(dma_ctrlr->mtx));
675                 if (((dma_ctrlr->route_attr & route) == route) &&
676                         (dma_ctrlr->locked == 0)) {
677
678                         dma_ctrlr->locked = 1;
679                         mutex_unlock(&(dma_ctrlr->mtx));
680                         allocated_ctrlr = dma_ctrlr;
681                         break;
682                 }
683                 mutex_unlock(&(dma_ctrlr->mtx));
684         }
685
686         /* Check to see if we found a resource */
687         if (allocated_ctrlr == NULL)
688                 goto err_ctrlr;
689
690         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
691         if (resource == NULL) {
692                 printk(KERN_WARNING "Unable to allocate resource structure\n");
693                 goto err_alloc;
694         }
695         resource->type = VME_DMA;
696         resource->entry = &(allocated_ctrlr->list);
697
698         return resource;
699
700 err_alloc:
701         /* Unlock image */
702         mutex_lock(&(dma_ctrlr->mtx));
703         dma_ctrlr->locked = 0;
704         mutex_unlock(&(dma_ctrlr->mtx));
705 err_ctrlr:
706 err_bus:
707         return NULL;
708 }
709 EXPORT_SYMBOL(vme_dma_request);
710
711 /*
712  * Start new list
713  */
714 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
715 {
716         struct vme_dma_resource *ctrlr;
717         struct vme_dma_list *dma_list;
718
719         if (resource->type != VME_DMA) {
720                 printk(KERN_ERR "Not a DMA resource\n");
721                 return NULL;
722         }
723
724         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
725
726         dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
727         if (dma_list == NULL) {
728                 printk(KERN_ERR "Unable to allocate memory for new dma list\n");
729                 return NULL;
730         }
731         INIT_LIST_HEAD(&(dma_list->entries));
732         dma_list->parent = ctrlr;
733         mutex_init(&(dma_list->mtx));
734
735         return dma_list;
736 }
737 EXPORT_SYMBOL(vme_new_dma_list);
738
739 /*
740  * Create "Pattern" type attributes
741  */
742 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
743         vme_pattern_t type)
744 {
745         struct vme_dma_attr *attributes;
746         struct vme_dma_pattern *pattern_attr;
747
748         attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
749         if (attributes == NULL) {
750                 printk(KERN_ERR "Unable to allocate memory for attributes "
751                         "structure\n");
752                 goto err_attr;
753         }
754
755         pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
756         if (pattern_attr == NULL) {
757                 printk(KERN_ERR "Unable to allocate memory for pattern "
758                         "attributes\n");
759                 goto err_pat;
760         }
761
762         attributes->type = VME_DMA_PATTERN;
763         attributes->private = (void *)pattern_attr;
764
765         pattern_attr->pattern = pattern;
766         pattern_attr->type = type;
767
768         return attributes;
769
770         kfree(pattern_attr);
771 err_pat:
772         kfree(attributes);
773 err_attr:
774         return NULL;
775 }
776 EXPORT_SYMBOL(vme_dma_pattern_attribute);
777
778 /*
779  * Create "PCI" type attributes
780  */
781 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
782 {
783         struct vme_dma_attr *attributes;
784         struct vme_dma_pci *pci_attr;
785
786         /* XXX Run some sanity checks here */
787
788         attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
789         if (attributes == NULL) {
790                 printk(KERN_ERR "Unable to allocate memory for attributes "
791                         "structure\n");
792                 goto err_attr;
793         }
794
795         pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
796         if (pci_attr == NULL) {
797                 printk(KERN_ERR "Unable to allocate memory for pci "
798                         "attributes\n");
799                 goto err_pci;
800         }
801
802
803
804         attributes->type = VME_DMA_PCI;
805         attributes->private = (void *)pci_attr;
806
807         pci_attr->address = address;
808
809         return attributes;
810
811         kfree(pci_attr);
812 err_pci:
813         kfree(attributes);
814 err_attr:
815         return NULL;
816 }
817 EXPORT_SYMBOL(vme_dma_pci_attribute);
818
819 /*
820  * Create "VME" type attributes
821  */
822 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
823         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
824 {
825         struct vme_dma_attr *attributes;
826         struct vme_dma_vme *vme_attr;
827
828         /* XXX Run some sanity checks here */
829
830         attributes = kmalloc(
831                 sizeof(struct vme_dma_attr), GFP_KERNEL);
832         if (attributes == NULL) {
833                 printk(KERN_ERR "Unable to allocate memory for attributes "
834                         "structure\n");
835                 goto err_attr;
836         }
837
838         vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
839         if (vme_attr == NULL) {
840                 printk(KERN_ERR "Unable to allocate memory for vme "
841                         "attributes\n");
842                 goto err_vme;
843         }
844
845         attributes->type = VME_DMA_VME;
846         attributes->private = (void *)vme_attr;
847
848         vme_attr->address = address;
849         vme_attr->aspace = aspace;
850         vme_attr->cycle = cycle;
851         vme_attr->dwidth = dwidth;
852
853         return attributes;
854
855         kfree(vme_attr);
856 err_vme:
857         kfree(attributes);
858 err_attr:
859         return NULL;
860 }
861 EXPORT_SYMBOL(vme_dma_vme_attribute);
862
863 /*
864  * Free attribute
865  */
866 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
867 {
868         kfree(attributes->private);
869         kfree(attributes);
870 }
871 EXPORT_SYMBOL(vme_dma_free_attribute);
872
873 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
874         struct vme_dma_attr *dest, size_t count)
875 {
876         struct vme_bridge *bridge = list->parent->parent;
877         int retval;
878
879         if (bridge->dma_list_add == NULL) {
880                 printk(KERN_WARNING "Link List DMA generation not supported\n");
881                 return -EINVAL;
882         }
883
884         if (!mutex_trylock(&(list->mtx))) {
885                 printk(KERN_ERR "Link List already submitted\n");
886                 return -EINVAL;
887         }
888
889         retval = bridge->dma_list_add(list, src, dest, count);
890
891         mutex_unlock(&(list->mtx));
892
893         return retval;
894 }
895 EXPORT_SYMBOL(vme_dma_list_add);
896
897 int vme_dma_list_exec(struct vme_dma_list *list)
898 {
899         struct vme_bridge *bridge = list->parent->parent;
900         int retval;
901
902         if (bridge->dma_list_exec == NULL) {
903                 printk(KERN_ERR "Link List DMA execution not supported\n");
904                 return -EINVAL;
905         }
906
907         mutex_lock(&(list->mtx));
908
909         retval = bridge->dma_list_exec(list);
910
911         mutex_unlock(&(list->mtx));
912
913         return retval;
914 }
915 EXPORT_SYMBOL(vme_dma_list_exec);
916
917 int vme_dma_list_free(struct vme_dma_list *list)
918 {
919         struct vme_bridge *bridge = list->parent->parent;
920         int retval;
921
922         if (bridge->dma_list_empty == NULL) {
923                 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
924                 return -EINVAL;
925         }
926
927         if (!mutex_trylock(&(list->mtx))) {
928                 printk(KERN_ERR "Link List in use\n");
929                 return -EINVAL;
930         }
931
932         /*
933          * Empty out all of the entries from the dma list. We need to go to the
934          * low level driver as dma entries are driver specific.
935          */
936         retval = bridge->dma_list_empty(list);
937         if (retval) {
938                 printk(KERN_ERR "Unable to empty link-list entries\n");
939                 mutex_unlock(&(list->mtx));
940                 return retval;
941         }
942         mutex_unlock(&(list->mtx));
943         kfree(list);
944
945         return retval;
946 }
947 EXPORT_SYMBOL(vme_dma_list_free);
948
949 int vme_dma_free(struct vme_resource *resource)
950 {
951         struct vme_dma_resource *ctrlr;
952
953         if (resource->type != VME_DMA) {
954                 printk(KERN_ERR "Not a DMA resource\n");
955                 return -EINVAL;
956         }
957
958         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
959
960         if (!mutex_trylock(&(ctrlr->mtx))) {
961                 printk(KERN_ERR "Resource busy, can't free\n");
962                 return -EBUSY;
963         }
964
965         if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
966                 printk(KERN_WARNING "Resource still processing transfers\n");
967                 mutex_unlock(&(ctrlr->mtx));
968                 return -EBUSY;
969         }
970
971         ctrlr->locked = 0;
972
973         mutex_unlock(&(ctrlr->mtx));
974
975         return 0;
976 }
977 EXPORT_SYMBOL(vme_dma_free);
978
979 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
980 {
981         void (*call)(int, int, void *);
982         void *priv_data;
983
984         call = bridge->irq[level - 1].callback[statid].func;
985         priv_data = bridge->irq[level - 1].callback[statid].priv_data;
986
987         if (call != NULL)
988                 call(level, statid, priv_data);
989         else
990                 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
991                         "vector:%x\n", level, statid);
992 }
993 EXPORT_SYMBOL(vme_irq_handler);
994
995 int vme_irq_request(struct device *dev, int level, int statid,
996         void (*callback)(int level, int vector, void *priv_data),
997         void *priv_data)
998 {
999         struct vme_bridge *bridge;
1000
1001         bridge = dev_to_bridge(dev);
1002         if (bridge == NULL) {
1003                 printk(KERN_ERR "Can't find VME bus\n");
1004                 return -EINVAL;
1005         }
1006
1007         if ((level < 1) || (level > 7)) {
1008                 printk(KERN_ERR "Invalid interrupt level\n");
1009                 return -EINVAL;
1010         }
1011
1012         if (bridge->irq_set == NULL) {
1013                 printk(KERN_ERR "Configuring interrupts not supported\n");
1014                 return -EINVAL;
1015         }
1016
1017         mutex_lock(&(bridge->irq_mtx));
1018
1019         if (bridge->irq[level - 1].callback[statid].func) {
1020                 mutex_unlock(&(bridge->irq_mtx));
1021                 printk(KERN_WARNING "VME Interrupt already taken\n");
1022                 return -EBUSY;
1023         }
1024
1025         bridge->irq[level - 1].count++;
1026         bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1027         bridge->irq[level - 1].callback[statid].func = callback;
1028
1029         /* Enable IRQ level */
1030         bridge->irq_set(level, 1, 1);
1031
1032         mutex_unlock(&(bridge->irq_mtx));
1033
1034         return 0;
1035 }
1036 EXPORT_SYMBOL(vme_irq_request);
1037
1038 void vme_irq_free(struct device *dev, int level, int statid)
1039 {
1040         struct vme_bridge *bridge;
1041
1042         bridge = dev_to_bridge(dev);
1043         if (bridge == NULL) {
1044                 printk(KERN_ERR "Can't find VME bus\n");
1045                 return;
1046         }
1047
1048         if ((level < 1) || (level > 7)) {
1049                 printk(KERN_ERR "Invalid interrupt level\n");
1050                 return;
1051         }
1052
1053         if (bridge->irq_set == NULL) {
1054                 printk(KERN_ERR "Configuring interrupts not supported\n");
1055                 return;
1056         }
1057
1058         mutex_lock(&(bridge->irq_mtx));
1059
1060         bridge->irq[level - 1].count--;
1061
1062         /* Disable IRQ level if no more interrupts attached at this level*/
1063         if (bridge->irq[level - 1].count == 0)
1064                 bridge->irq_set(level, 0, 1);
1065
1066         bridge->irq[level - 1].callback[statid].func = NULL;
1067         bridge->irq[level - 1].callback[statid].priv_data = NULL;
1068
1069         mutex_unlock(&(bridge->irq_mtx));
1070 }
1071 EXPORT_SYMBOL(vme_irq_free);
1072
1073 int vme_irq_generate(struct device *dev, int level, int statid)
1074 {
1075         struct vme_bridge *bridge;
1076
1077         bridge = dev_to_bridge(dev);
1078         if (bridge == NULL) {
1079                 printk(KERN_ERR "Can't find VME bus\n");
1080                 return -EINVAL;
1081         }
1082
1083         if ((level < 1) || (level > 7)) {
1084                 printk(KERN_WARNING "Invalid interrupt level\n");
1085                 return -EINVAL;
1086         }
1087
1088         if (bridge->irq_generate == NULL) {
1089                 printk(KERN_WARNING "Interrupt generation not supported\n");
1090                 return -EINVAL;
1091         }
1092
1093         return bridge->irq_generate(level, statid);
1094 }
1095 EXPORT_SYMBOL(vme_irq_generate);
1096
1097 /*
1098  * Request the location monitor, return resource or NULL
1099  */
1100 struct vme_resource *vme_lm_request(struct device *dev)
1101 {
1102         struct vme_bridge *bridge;
1103         struct list_head *lm_pos = NULL;
1104         struct vme_lm_resource *allocated_lm = NULL;
1105         struct vme_lm_resource *lm = NULL;
1106         struct vme_resource *resource = NULL;
1107
1108         bridge = dev_to_bridge(dev);
1109         if (bridge == NULL) {
1110                 printk(KERN_ERR "Can't find VME bus\n");
1111                 goto err_bus;
1112         }
1113
1114         /* Loop through DMA resources */
1115         list_for_each(lm_pos, &(bridge->lm_resources)) {
1116                 lm = list_entry(lm_pos,
1117                         struct vme_lm_resource, list);
1118
1119                 if (lm == NULL) {
1120                         printk(KERN_ERR "Registered NULL Location Monitor "
1121                                 "resource\n");
1122                         continue;
1123                 }
1124
1125                 /* Find an unlocked controller */
1126                 mutex_lock(&(lm->mtx));
1127                 if (lm->locked == 0) {
1128                         lm->locked = 1;
1129                         mutex_unlock(&(lm->mtx));
1130                         allocated_lm = lm;
1131                         break;
1132                 }
1133                 mutex_unlock(&(lm->mtx));
1134         }
1135
1136         /* Check to see if we found a resource */
1137         if (allocated_lm == NULL)
1138                 goto err_lm;
1139
1140         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1141         if (resource == NULL) {
1142                 printk(KERN_ERR "Unable to allocate resource structure\n");
1143                 goto err_alloc;
1144         }
1145         resource->type = VME_LM;
1146         resource->entry = &(allocated_lm->list);
1147
1148         return resource;
1149
1150 err_alloc:
1151         /* Unlock image */
1152         mutex_lock(&(lm->mtx));
1153         lm->locked = 0;
1154         mutex_unlock(&(lm->mtx));
1155 err_lm:
1156 err_bus:
1157         return NULL;
1158 }
1159 EXPORT_SYMBOL(vme_lm_request);
1160
1161 int vme_lm_count(struct vme_resource *resource)
1162 {
1163         struct vme_lm_resource *lm;
1164
1165         if (resource->type != VME_LM) {
1166                 printk(KERN_ERR "Not a Location Monitor resource\n");
1167                 return -EINVAL;
1168         }
1169
1170         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1171
1172         return lm->monitors;
1173 }
1174 EXPORT_SYMBOL(vme_lm_count);
1175
1176 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1177         vme_address_t aspace, vme_cycle_t cycle)
1178 {
1179         struct vme_bridge *bridge = find_bridge(resource);
1180         struct vme_lm_resource *lm;
1181
1182         if (resource->type != VME_LM) {
1183                 printk(KERN_ERR "Not a Location Monitor resource\n");
1184                 return -EINVAL;
1185         }
1186
1187         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1188
1189         if (bridge->lm_set == NULL) {
1190                 printk(KERN_ERR "vme_lm_set not supported\n");
1191                 return -EINVAL;
1192         }
1193
1194         /* XXX Check parameters */
1195
1196         return bridge->lm_set(lm, lm_base, aspace, cycle);
1197 }
1198 EXPORT_SYMBOL(vme_lm_set);
1199
1200 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1201         vme_address_t *aspace, vme_cycle_t *cycle)
1202 {
1203         struct vme_bridge *bridge = find_bridge(resource);
1204         struct vme_lm_resource *lm;
1205
1206         if (resource->type != VME_LM) {
1207                 printk(KERN_ERR "Not a Location Monitor resource\n");
1208                 return -EINVAL;
1209         }
1210
1211         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1212
1213         if (bridge->lm_get == NULL) {
1214                 printk(KERN_ERR "vme_lm_get not supported\n");
1215                 return -EINVAL;
1216         }
1217
1218         return bridge->lm_get(lm, lm_base, aspace, cycle);
1219 }
1220 EXPORT_SYMBOL(vme_lm_get);
1221
1222 int vme_lm_attach(struct vme_resource *resource, int monitor,
1223         void (*callback)(int))
1224 {
1225         struct vme_bridge *bridge = find_bridge(resource);
1226         struct vme_lm_resource *lm;
1227
1228         if (resource->type != VME_LM) {
1229                 printk(KERN_ERR "Not a Location Monitor resource\n");
1230                 return -EINVAL;
1231         }
1232
1233         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1234
1235         if (bridge->lm_attach == NULL) {
1236                 printk(KERN_ERR "vme_lm_attach not supported\n");
1237                 return -EINVAL;
1238         }
1239
1240         return bridge->lm_attach(lm, monitor, callback);
1241 }
1242 EXPORT_SYMBOL(vme_lm_attach);
1243
1244 int vme_lm_detach(struct vme_resource *resource, int monitor)
1245 {
1246         struct vme_bridge *bridge = find_bridge(resource);
1247         struct vme_lm_resource *lm;
1248
1249         if (resource->type != VME_LM) {
1250                 printk(KERN_ERR "Not a Location Monitor resource\n");
1251                 return -EINVAL;
1252         }
1253
1254         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1255
1256         if (bridge->lm_detach == NULL) {
1257                 printk(KERN_ERR "vme_lm_detach not supported\n");
1258                 return -EINVAL;
1259         }
1260
1261         return bridge->lm_detach(lm, monitor);
1262 }
1263 EXPORT_SYMBOL(vme_lm_detach);
1264
1265 void vme_lm_free(struct vme_resource *resource)
1266 {
1267         struct vme_lm_resource *lm;
1268
1269         if (resource->type != VME_LM) {
1270                 printk(KERN_ERR "Not a Location Monitor resource\n");
1271                 return;
1272         }
1273
1274         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1275
1276         mutex_lock(&(lm->mtx));
1277
1278         /* XXX
1279          * Check to see that there aren't any callbacks still attached, if
1280          * there are we should probably be detaching them!
1281          */
1282
1283         lm->locked = 0;
1284
1285         mutex_unlock(&(lm->mtx));
1286
1287         kfree(resource);
1288 }
1289 EXPORT_SYMBOL(vme_lm_free);
1290
1291 int vme_slot_get(struct device *bus)
1292 {
1293         struct vme_bridge *bridge;
1294
1295         bridge = dev_to_bridge(bus);
1296         if (bridge == NULL) {
1297                 printk(KERN_ERR "Can't find VME bus\n");
1298                 return -EINVAL;
1299         }
1300
1301         if (bridge->slot_get == NULL) {
1302                 printk(KERN_WARNING "vme_slot_get not supported\n");
1303                 return -EINVAL;
1304         }
1305
1306         return bridge->slot_get();
1307 }
1308 EXPORT_SYMBOL(vme_slot_get);
1309
1310
1311 /* - Bridge Registration --------------------------------------------------- */
1312
1313 static int vme_alloc_bus_num(void)
1314 {
1315         int i;
1316
1317         mutex_lock(&vme_bus_num_mtx);
1318         for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1319                 if (((vme_bus_numbers >> i) & 0x1) == 0) {
1320                         vme_bus_numbers |= (0x1 << i);
1321                         break;
1322                 }
1323         }
1324         mutex_unlock(&vme_bus_num_mtx);
1325
1326         return i;
1327 }
1328
1329 static void vme_free_bus_num(int bus)
1330 {
1331         mutex_lock(&vme_bus_num_mtx);
1332         vme_bus_numbers |= ~(0x1 << bus);
1333         mutex_unlock(&vme_bus_num_mtx);
1334 }
1335
1336 int vme_register_bridge(struct vme_bridge *bridge)
1337 {
1338         struct device *dev;
1339         int retval;
1340         int i;
1341
1342         bridge->num = vme_alloc_bus_num();
1343
1344         /* This creates 32 vme "slot" devices. This equates to a slot for each
1345          * ID available in a system conforming to the ANSI/VITA 1-1994
1346          * specification.
1347          */
1348         for (i = 0; i < VME_SLOTS_MAX; i++) {
1349                 dev = &(bridge->dev[i]);
1350                 memset(dev, 0, sizeof(struct device));
1351
1352                 dev->parent = bridge->parent;
1353                 dev->bus = &(vme_bus_type);
1354                 /*
1355                  * We save a pointer to the bridge in platform_data so that we
1356                  * can get to it later. We keep driver_data for use by the
1357                  * driver that binds against the slot
1358                  */
1359                 dev->platform_data = bridge;
1360                 dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1361
1362                 retval = device_register(dev);
1363                 if (retval)
1364                         goto err_reg;
1365         }
1366
1367         return retval;
1368
1369         i = VME_SLOTS_MAX;
1370 err_reg:
1371         while (i > -1) {
1372                 dev = &(bridge->dev[i]);
1373                 device_unregister(dev);
1374         }
1375         vme_free_bus_num(bridge->num);
1376         return retval;
1377 }
1378 EXPORT_SYMBOL(vme_register_bridge);
1379
1380 void vme_unregister_bridge(struct vme_bridge *bridge)
1381 {
1382         int i;
1383         struct device *dev;
1384
1385
1386         for (i = 0; i < VME_SLOTS_MAX; i++) {
1387                 dev = &(bridge->dev[i]);
1388                 device_unregister(dev);
1389         }
1390         vme_free_bus_num(bridge->num);
1391 }
1392 EXPORT_SYMBOL(vme_unregister_bridge);
1393
1394
1395 /* - Driver Registration --------------------------------------------------- */
1396
1397 int vme_register_driver(struct vme_driver *drv)
1398 {
1399         drv->driver.name = drv->name;
1400         drv->driver.bus = &vme_bus_type;
1401
1402         return driver_register(&drv->driver);
1403 }
1404 EXPORT_SYMBOL(vme_register_driver);
1405
1406 void vme_unregister_driver(struct vme_driver *drv)
1407 {
1408         driver_unregister(&drv->driver);
1409 }
1410 EXPORT_SYMBOL(vme_unregister_driver);
1411
1412 /* - Bus Registration ------------------------------------------------------ */
1413
1414 int vme_calc_slot(struct device *dev)
1415 {
1416         struct vme_bridge *bridge;
1417         int num;
1418
1419         bridge = dev_to_bridge(dev);
1420
1421         /* Determine slot number */
1422         num = 0;
1423         while (num < VME_SLOTS_MAX) {
1424                 if (&(bridge->dev[num]) == dev)
1425                         break;
1426
1427                 num++;
1428         }
1429         if (num == VME_SLOTS_MAX) {
1430                 dev_err(dev, "Failed to identify slot\n");
1431                 num = 0;
1432                 goto err_dev;
1433         }
1434         num++;
1435
1436 err_dev:
1437         return num;
1438 }
1439
1440 static struct vme_driver *dev_to_vme_driver(struct device *dev)
1441 {
1442         if (dev->driver == NULL)
1443                 printk(KERN_ERR "Bugger dev->driver is NULL\n");
1444
1445         return container_of(dev->driver, struct vme_driver, driver);
1446 }
1447
1448 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1449 {
1450         struct vme_bridge *bridge;
1451         struct vme_driver *driver;
1452         int i, num;
1453
1454         bridge = dev_to_bridge(dev);
1455         driver = container_of(drv, struct vme_driver, driver);
1456
1457         num = vme_calc_slot(dev);
1458         if (!num)
1459                 goto err_dev;
1460
1461         if (driver->bind_table == NULL) {
1462                 dev_err(dev, "Bind table NULL\n");
1463                 goto err_table;
1464         }
1465
1466         i = 0;
1467         while ((driver->bind_table[i].bus != 0) ||
1468                 (driver->bind_table[i].slot != 0)) {
1469
1470                 if (bridge->num == driver->bind_table[i].bus) {
1471                         if (num == driver->bind_table[i].slot)
1472                                 return 1;
1473
1474                         if (driver->bind_table[i].slot == VME_SLOT_ALL)
1475                                 return 1;
1476
1477                         if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1478                                 (num == vme_slot_get(dev)))
1479                                 return 1;
1480                 }
1481                 i++;
1482         }
1483
1484 err_dev:
1485 err_table:
1486         return 0;
1487 }
1488
1489 static int vme_bus_probe(struct device *dev)
1490 {
1491         struct vme_bridge *bridge;
1492         struct vme_driver *driver;
1493         int retval = -ENODEV;
1494
1495         driver = dev_to_vme_driver(dev);
1496         bridge = dev_to_bridge(dev);
1497
1498         if (driver->probe != NULL)
1499                 retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1500
1501         return retval;
1502 }
1503
1504 static int vme_bus_remove(struct device *dev)
1505 {
1506         struct vme_bridge *bridge;
1507         struct vme_driver *driver;
1508         int retval = -ENODEV;
1509
1510         driver = dev_to_vme_driver(dev);
1511         bridge = dev_to_bridge(dev);
1512
1513         if (driver->remove != NULL)
1514                 retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1515
1516         return retval;
1517 }
1518
1519 struct bus_type vme_bus_type = {
1520         .name = "vme",
1521         .match = vme_bus_match,
1522         .probe = vme_bus_probe,
1523         .remove = vme_bus_remove,
1524 };
1525 EXPORT_SYMBOL(vme_bus_type);
1526
1527 static int __init vme_init(void)
1528 {
1529         return bus_register(&vme_bus_type);
1530 }
1531
1532 static void __exit vme_exit(void)
1533 {
1534         bus_unregister(&vme_bus_type);
1535 }
1536
1537 MODULE_DESCRIPTION("VME bridge driver framework");
1538 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1539 MODULE_LICENSE("GPL");
1540
1541 module_init(vme_init);
1542 module_exit(vme_exit);