4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
34 #include "vme_bridge.h"
36 /* Bitmask and mutex to keep track of bridge numbers */
37 static unsigned int vme_bus_numbers;
38 DEFINE_MUTEX(vme_bus_num_mtx);
40 static void __exit vme_exit(void);
41 static int __init vme_init(void);
45 * Find the bridge resource associated with a specific device resource
47 static struct vme_bridge *dev_to_bridge(struct device *dev)
49 return dev->platform_data;
53 * Find the bridge that the resource is associated with.
55 static struct vme_bridge *find_bridge(struct vme_resource *resource)
57 /* Get list to search */
58 switch (resource->type) {
60 return list_entry(resource->entry, struct vme_master_resource,
64 return list_entry(resource->entry, struct vme_slave_resource,
68 return list_entry(resource->entry, struct vme_dma_resource,
72 return list_entry(resource->entry, struct vme_lm_resource,
76 printk(KERN_ERR "Unknown resource type\n");
83 * Allocate a contiguous block of memory for use by the driver. This is used to
84 * create the buffers for the slave windows.
86 * XXX VME bridges could be available on buses other than PCI. At the momment
87 * this framework only supports PCI devices.
89 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
92 struct vme_bridge *bridge;
95 if (resource == NULL) {
96 printk(KERN_ERR "No resource\n");
100 bridge = find_bridge(resource);
101 if (bridge == NULL) {
102 printk(KERN_ERR "Can't find bridge\n");
106 /* Find pci_dev container of dev */
107 if (bridge->parent == NULL) {
108 printk(KERN_ERR "Dev entry NULL\n");
111 pdev = container_of(bridge->parent, struct pci_dev, dev);
113 return pci_alloc_consistent(pdev, size, dma);
115 EXPORT_SYMBOL(vme_alloc_consistent);
118 * Free previously allocated contiguous block of memory.
120 * XXX VME bridges could be available on buses other than PCI. At the momment
121 * this framework only supports PCI devices.
123 void vme_free_consistent(struct vme_resource *resource, size_t size,
124 void *vaddr, dma_addr_t dma)
126 struct vme_bridge *bridge;
127 struct pci_dev *pdev;
129 if (resource == NULL) {
130 printk(KERN_ERR "No resource\n");
134 bridge = find_bridge(resource);
135 if (bridge == NULL) {
136 printk(KERN_ERR "Can't find bridge\n");
140 /* Find pci_dev container of dev */
141 pdev = container_of(bridge->parent, struct pci_dev, dev);
143 pci_free_consistent(pdev, size, vaddr, dma);
145 EXPORT_SYMBOL(vme_free_consistent);
147 size_t vme_get_size(struct vme_resource *resource)
150 unsigned long long base, size;
152 vme_address_t aspace;
156 switch (resource->type) {
158 retval = vme_master_get(resource, &enabled, &base, &size,
159 &aspace, &cycle, &dwidth);
164 retval = vme_slave_get(resource, &enabled, &base, &size,
165 &buf_base, &aspace, &cycle);
173 printk(KERN_ERR "Unknown resource type\n");
178 EXPORT_SYMBOL(vme_get_size);
180 static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
181 unsigned long long size)
187 if (((vme_base + size) > VME_A16_MAX) ||
188 (vme_base > VME_A16_MAX))
192 if (((vme_base + size) > VME_A24_MAX) ||
193 (vme_base > VME_A24_MAX))
197 if (((vme_base + size) > VME_A32_MAX) ||
198 (vme_base > VME_A32_MAX))
203 * Any value held in an unsigned long long can be used as the
208 if (((vme_base + size) > VME_CRCSR_MAX) ||
209 (vme_base > VME_CRCSR_MAX))
219 printk(KERN_ERR "Invalid address space\n");
228 * Request a slave image with specific attributes, return some unique
231 struct vme_resource *vme_slave_request(struct device *dev,
232 vme_address_t address, vme_cycle_t cycle)
234 struct vme_bridge *bridge;
235 struct list_head *slave_pos = NULL;
236 struct vme_slave_resource *allocated_image = NULL;
237 struct vme_slave_resource *slave_image = NULL;
238 struct vme_resource *resource = NULL;
240 bridge = dev_to_bridge(dev);
241 if (bridge == NULL) {
242 printk(KERN_ERR "Can't find VME bus\n");
246 /* Loop through slave resources */
247 list_for_each(slave_pos, &(bridge->slave_resources)) {
248 slave_image = list_entry(slave_pos,
249 struct vme_slave_resource, list);
251 if (slave_image == NULL) {
252 printk(KERN_ERR "Registered NULL Slave resource\n");
256 /* Find an unlocked and compatible image */
257 mutex_lock(&(slave_image->mtx));
258 if (((slave_image->address_attr & address) == address) &&
259 ((slave_image->cycle_attr & cycle) == cycle) &&
260 (slave_image->locked == 0)) {
262 slave_image->locked = 1;
263 mutex_unlock(&(slave_image->mtx));
264 allocated_image = slave_image;
267 mutex_unlock(&(slave_image->mtx));
271 if (allocated_image == NULL)
274 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
275 if (resource == NULL) {
276 printk(KERN_WARNING "Unable to allocate resource structure\n");
279 resource->type = VME_SLAVE;
280 resource->entry = &(allocated_image->list);
286 mutex_lock(&(slave_image->mtx));
287 slave_image->locked = 0;
288 mutex_unlock(&(slave_image->mtx));
293 EXPORT_SYMBOL(vme_slave_request);
295 int vme_slave_set(struct vme_resource *resource, int enabled,
296 unsigned long long vme_base, unsigned long long size,
297 dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
299 struct vme_bridge *bridge = find_bridge(resource);
300 struct vme_slave_resource *image;
303 if (resource->type != VME_SLAVE) {
304 printk(KERN_ERR "Not a slave resource\n");
308 image = list_entry(resource->entry, struct vme_slave_resource, list);
310 if (bridge->slave_set == NULL) {
311 printk(KERN_ERR "Function not supported\n");
315 if (!(((image->address_attr & aspace) == aspace) &&
316 ((image->cycle_attr & cycle) == cycle))) {
317 printk(KERN_ERR "Invalid attributes\n");
321 retval = vme_check_window(aspace, vme_base, size);
325 return bridge->slave_set(image, enabled, vme_base, size, buf_base,
328 EXPORT_SYMBOL(vme_slave_set);
330 int vme_slave_get(struct vme_resource *resource, int *enabled,
331 unsigned long long *vme_base, unsigned long long *size,
332 dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
334 struct vme_bridge *bridge = find_bridge(resource);
335 struct vme_slave_resource *image;
337 if (resource->type != VME_SLAVE) {
338 printk(KERN_ERR "Not a slave resource\n");
342 image = list_entry(resource->entry, struct vme_slave_resource, list);
344 if (bridge->slave_get == NULL) {
345 printk(KERN_ERR "vme_slave_get not supported\n");
349 return bridge->slave_get(image, enabled, vme_base, size, buf_base,
352 EXPORT_SYMBOL(vme_slave_get);
354 void vme_slave_free(struct vme_resource *resource)
356 struct vme_slave_resource *slave_image;
358 if (resource->type != VME_SLAVE) {
359 printk(KERN_ERR "Not a slave resource\n");
363 slave_image = list_entry(resource->entry, struct vme_slave_resource,
365 if (slave_image == NULL) {
366 printk(KERN_ERR "Can't find slave resource\n");
371 mutex_lock(&(slave_image->mtx));
372 if (slave_image->locked == 0)
373 printk(KERN_ERR "Image is already free\n");
375 slave_image->locked = 0;
376 mutex_unlock(&(slave_image->mtx));
378 /* Free up resource memory */
381 EXPORT_SYMBOL(vme_slave_free);
384 * Request a master image with specific attributes, return some unique
387 struct vme_resource *vme_master_request(struct device *dev,
388 vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
390 struct vme_bridge *bridge;
391 struct list_head *master_pos = NULL;
392 struct vme_master_resource *allocated_image = NULL;
393 struct vme_master_resource *master_image = NULL;
394 struct vme_resource *resource = NULL;
396 bridge = dev_to_bridge(dev);
397 if (bridge == NULL) {
398 printk(KERN_ERR "Can't find VME bus\n");
402 /* Loop through master resources */
403 list_for_each(master_pos, &(bridge->master_resources)) {
404 master_image = list_entry(master_pos,
405 struct vme_master_resource, list);
407 if (master_image == NULL) {
408 printk(KERN_WARNING "Registered NULL master resource\n");
412 /* Find an unlocked and compatible image */
413 spin_lock(&(master_image->lock));
414 if (((master_image->address_attr & address) == address) &&
415 ((master_image->cycle_attr & cycle) == cycle) &&
416 ((master_image->width_attr & dwidth) == dwidth) &&
417 (master_image->locked == 0)) {
419 master_image->locked = 1;
420 spin_unlock(&(master_image->lock));
421 allocated_image = master_image;
424 spin_unlock(&(master_image->lock));
427 /* Check to see if we found a resource */
428 if (allocated_image == NULL) {
429 printk(KERN_ERR "Can't find a suitable resource\n");
433 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
434 if (resource == NULL) {
435 printk(KERN_ERR "Unable to allocate resource structure\n");
438 resource->type = VME_MASTER;
439 resource->entry = &(allocated_image->list);
446 spin_lock(&(master_image->lock));
447 master_image->locked = 0;
448 spin_unlock(&(master_image->lock));
453 EXPORT_SYMBOL(vme_master_request);
455 int vme_master_set(struct vme_resource *resource, int enabled,
456 unsigned long long vme_base, unsigned long long size,
457 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
459 struct vme_bridge *bridge = find_bridge(resource);
460 struct vme_master_resource *image;
463 if (resource->type != VME_MASTER) {
464 printk(KERN_ERR "Not a master resource\n");
468 image = list_entry(resource->entry, struct vme_master_resource, list);
470 if (bridge->master_set == NULL) {
471 printk(KERN_WARNING "vme_master_set not supported\n");
475 if (!(((image->address_attr & aspace) == aspace) &&
476 ((image->cycle_attr & cycle) == cycle) &&
477 ((image->width_attr & dwidth) == dwidth))) {
478 printk(KERN_WARNING "Invalid attributes\n");
482 retval = vme_check_window(aspace, vme_base, size);
486 return bridge->master_set(image, enabled, vme_base, size, aspace,
489 EXPORT_SYMBOL(vme_master_set);
491 int vme_master_get(struct vme_resource *resource, int *enabled,
492 unsigned long long *vme_base, unsigned long long *size,
493 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
495 struct vme_bridge *bridge = find_bridge(resource);
496 struct vme_master_resource *image;
498 if (resource->type != VME_MASTER) {
499 printk(KERN_ERR "Not a master resource\n");
503 image = list_entry(resource->entry, struct vme_master_resource, list);
505 if (bridge->master_get == NULL) {
506 printk(KERN_WARNING "vme_master_set not supported\n");
510 return bridge->master_get(image, enabled, vme_base, size, aspace,
513 EXPORT_SYMBOL(vme_master_get);
516 * Read data out of VME space into a buffer.
518 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
521 struct vme_bridge *bridge = find_bridge(resource);
522 struct vme_master_resource *image;
525 if (bridge->master_read == NULL) {
526 printk(KERN_WARNING "Reading from resource not supported\n");
530 if (resource->type != VME_MASTER) {
531 printk(KERN_ERR "Not a master resource\n");
535 image = list_entry(resource->entry, struct vme_master_resource, list);
537 length = vme_get_size(resource);
539 if (offset > length) {
540 printk(KERN_WARNING "Invalid Offset\n");
544 if ((offset + count) > length)
545 count = length - offset;
547 return bridge->master_read(image, buf, count, offset);
550 EXPORT_SYMBOL(vme_master_read);
553 * Write data out to VME space from a buffer.
555 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
556 size_t count, loff_t offset)
558 struct vme_bridge *bridge = find_bridge(resource);
559 struct vme_master_resource *image;
562 if (bridge->master_write == NULL) {
563 printk(KERN_WARNING "Writing to resource not supported\n");
567 if (resource->type != VME_MASTER) {
568 printk(KERN_ERR "Not a master resource\n");
572 image = list_entry(resource->entry, struct vme_master_resource, list);
574 length = vme_get_size(resource);
576 if (offset > length) {
577 printk(KERN_WARNING "Invalid Offset\n");
581 if ((offset + count) > length)
582 count = length - offset;
584 return bridge->master_write(image, buf, count, offset);
586 EXPORT_SYMBOL(vme_master_write);
589 * Perform RMW cycle to provided location.
591 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
592 unsigned int compare, unsigned int swap, loff_t offset)
594 struct vme_bridge *bridge = find_bridge(resource);
595 struct vme_master_resource *image;
597 if (bridge->master_rmw == NULL) {
598 printk(KERN_WARNING "Writing to resource not supported\n");
602 if (resource->type != VME_MASTER) {
603 printk(KERN_ERR "Not a master resource\n");
607 image = list_entry(resource->entry, struct vme_master_resource, list);
609 return bridge->master_rmw(image, mask, compare, swap, offset);
611 EXPORT_SYMBOL(vme_master_rmw);
613 void vme_master_free(struct vme_resource *resource)
615 struct vme_master_resource *master_image;
617 if (resource->type != VME_MASTER) {
618 printk(KERN_ERR "Not a master resource\n");
622 master_image = list_entry(resource->entry, struct vme_master_resource,
624 if (master_image == NULL) {
625 printk(KERN_ERR "Can't find master resource\n");
630 spin_lock(&(master_image->lock));
631 if (master_image->locked == 0)
632 printk(KERN_ERR "Image is already free\n");
634 master_image->locked = 0;
635 spin_unlock(&(master_image->lock));
637 /* Free up resource memory */
640 EXPORT_SYMBOL(vme_master_free);
643 * Request a DMA controller with specific attributes, return some unique
646 struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
648 struct vme_bridge *bridge;
649 struct list_head *dma_pos = NULL;
650 struct vme_dma_resource *allocated_ctrlr = NULL;
651 struct vme_dma_resource *dma_ctrlr = NULL;
652 struct vme_resource *resource = NULL;
654 /* XXX Not checking resource attributes */
655 printk(KERN_ERR "No VME resource Attribute tests done\n");
657 bridge = dev_to_bridge(dev);
658 if (bridge == NULL) {
659 printk(KERN_ERR "Can't find VME bus\n");
663 /* Loop through DMA resources */
664 list_for_each(dma_pos, &(bridge->dma_resources)) {
665 dma_ctrlr = list_entry(dma_pos,
666 struct vme_dma_resource, list);
668 if (dma_ctrlr == NULL) {
669 printk(KERN_ERR "Registered NULL DMA resource\n");
673 /* Find an unlocked and compatible controller */
674 mutex_lock(&(dma_ctrlr->mtx));
675 if (((dma_ctrlr->route_attr & route) == route) &&
676 (dma_ctrlr->locked == 0)) {
678 dma_ctrlr->locked = 1;
679 mutex_unlock(&(dma_ctrlr->mtx));
680 allocated_ctrlr = dma_ctrlr;
683 mutex_unlock(&(dma_ctrlr->mtx));
686 /* Check to see if we found a resource */
687 if (allocated_ctrlr == NULL)
690 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
691 if (resource == NULL) {
692 printk(KERN_WARNING "Unable to allocate resource structure\n");
695 resource->type = VME_DMA;
696 resource->entry = &(allocated_ctrlr->list);
702 mutex_lock(&(dma_ctrlr->mtx));
703 dma_ctrlr->locked = 0;
704 mutex_unlock(&(dma_ctrlr->mtx));
709 EXPORT_SYMBOL(vme_dma_request);
714 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
716 struct vme_dma_resource *ctrlr;
717 struct vme_dma_list *dma_list;
719 if (resource->type != VME_DMA) {
720 printk(KERN_ERR "Not a DMA resource\n");
724 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
726 dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
727 if (dma_list == NULL) {
728 printk(KERN_ERR "Unable to allocate memory for new dma list\n");
731 INIT_LIST_HEAD(&(dma_list->entries));
732 dma_list->parent = ctrlr;
733 mutex_init(&(dma_list->mtx));
737 EXPORT_SYMBOL(vme_new_dma_list);
740 * Create "Pattern" type attributes
742 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
745 struct vme_dma_attr *attributes;
746 struct vme_dma_pattern *pattern_attr;
748 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
749 if (attributes == NULL) {
750 printk(KERN_ERR "Unable to allocate memory for attributes "
755 pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
756 if (pattern_attr == NULL) {
757 printk(KERN_ERR "Unable to allocate memory for pattern "
762 attributes->type = VME_DMA_PATTERN;
763 attributes->private = (void *)pattern_attr;
765 pattern_attr->pattern = pattern;
766 pattern_attr->type = type;
776 EXPORT_SYMBOL(vme_dma_pattern_attribute);
779 * Create "PCI" type attributes
781 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
783 struct vme_dma_attr *attributes;
784 struct vme_dma_pci *pci_attr;
786 /* XXX Run some sanity checks here */
788 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
789 if (attributes == NULL) {
790 printk(KERN_ERR "Unable to allocate memory for attributes "
795 pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
796 if (pci_attr == NULL) {
797 printk(KERN_ERR "Unable to allocate memory for pci "
804 attributes->type = VME_DMA_PCI;
805 attributes->private = (void *)pci_attr;
807 pci_attr->address = address;
817 EXPORT_SYMBOL(vme_dma_pci_attribute);
820 * Create "VME" type attributes
822 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
823 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
825 struct vme_dma_attr *attributes;
826 struct vme_dma_vme *vme_attr;
828 /* XXX Run some sanity checks here */
830 attributes = kmalloc(
831 sizeof(struct vme_dma_attr), GFP_KERNEL);
832 if (attributes == NULL) {
833 printk(KERN_ERR "Unable to allocate memory for attributes "
838 vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
839 if (vme_attr == NULL) {
840 printk(KERN_ERR "Unable to allocate memory for vme "
845 attributes->type = VME_DMA_VME;
846 attributes->private = (void *)vme_attr;
848 vme_attr->address = address;
849 vme_attr->aspace = aspace;
850 vme_attr->cycle = cycle;
851 vme_attr->dwidth = dwidth;
861 EXPORT_SYMBOL(vme_dma_vme_attribute);
866 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
868 kfree(attributes->private);
871 EXPORT_SYMBOL(vme_dma_free_attribute);
873 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
874 struct vme_dma_attr *dest, size_t count)
876 struct vme_bridge *bridge = list->parent->parent;
879 if (bridge->dma_list_add == NULL) {
880 printk(KERN_WARNING "Link List DMA generation not supported\n");
884 if (!mutex_trylock(&(list->mtx))) {
885 printk(KERN_ERR "Link List already submitted\n");
889 retval = bridge->dma_list_add(list, src, dest, count);
891 mutex_unlock(&(list->mtx));
895 EXPORT_SYMBOL(vme_dma_list_add);
897 int vme_dma_list_exec(struct vme_dma_list *list)
899 struct vme_bridge *bridge = list->parent->parent;
902 if (bridge->dma_list_exec == NULL) {
903 printk(KERN_ERR "Link List DMA execution not supported\n");
907 mutex_lock(&(list->mtx));
909 retval = bridge->dma_list_exec(list);
911 mutex_unlock(&(list->mtx));
915 EXPORT_SYMBOL(vme_dma_list_exec);
917 int vme_dma_list_free(struct vme_dma_list *list)
919 struct vme_bridge *bridge = list->parent->parent;
922 if (bridge->dma_list_empty == NULL) {
923 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
927 if (!mutex_trylock(&(list->mtx))) {
928 printk(KERN_ERR "Link List in use\n");
933 * Empty out all of the entries from the dma list. We need to go to the
934 * low level driver as dma entries are driver specific.
936 retval = bridge->dma_list_empty(list);
938 printk(KERN_ERR "Unable to empty link-list entries\n");
939 mutex_unlock(&(list->mtx));
942 mutex_unlock(&(list->mtx));
947 EXPORT_SYMBOL(vme_dma_list_free);
949 int vme_dma_free(struct vme_resource *resource)
951 struct vme_dma_resource *ctrlr;
953 if (resource->type != VME_DMA) {
954 printk(KERN_ERR "Not a DMA resource\n");
958 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
960 if (!mutex_trylock(&(ctrlr->mtx))) {
961 printk(KERN_ERR "Resource busy, can't free\n");
965 if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
966 printk(KERN_WARNING "Resource still processing transfers\n");
967 mutex_unlock(&(ctrlr->mtx));
973 mutex_unlock(&(ctrlr->mtx));
977 EXPORT_SYMBOL(vme_dma_free);
979 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
981 void (*call)(int, int, void *);
984 call = bridge->irq[level - 1].callback[statid].func;
985 priv_data = bridge->irq[level - 1].callback[statid].priv_data;
988 call(level, statid, priv_data);
990 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
991 "vector:%x\n", level, statid);
993 EXPORT_SYMBOL(vme_irq_handler);
995 int vme_irq_request(struct device *dev, int level, int statid,
996 void (*callback)(int, int, void *),
999 struct vme_bridge *bridge;
1001 bridge = dev_to_bridge(dev);
1002 if (bridge == NULL) {
1003 printk(KERN_ERR "Can't find VME bus\n");
1007 if ((level < 1) || (level > 7)) {
1008 printk(KERN_ERR "Invalid interrupt level\n");
1012 if (bridge->irq_set == NULL) {
1013 printk(KERN_ERR "Configuring interrupts not supported\n");
1017 mutex_lock(&(bridge->irq_mtx));
1019 if (bridge->irq[level - 1].callback[statid].func) {
1020 mutex_unlock(&(bridge->irq_mtx));
1021 printk(KERN_WARNING "VME Interrupt already taken\n");
1025 bridge->irq[level - 1].count++;
1026 bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1027 bridge->irq[level - 1].callback[statid].func = callback;
1029 /* Enable IRQ level */
1030 bridge->irq_set(bridge, level, 1, 1);
1032 mutex_unlock(&(bridge->irq_mtx));
1036 EXPORT_SYMBOL(vme_irq_request);
1038 void vme_irq_free(struct device *dev, int level, int statid)
1040 struct vme_bridge *bridge;
1042 bridge = dev_to_bridge(dev);
1043 if (bridge == NULL) {
1044 printk(KERN_ERR "Can't find VME bus\n");
1048 if ((level < 1) || (level > 7)) {
1049 printk(KERN_ERR "Invalid interrupt level\n");
1053 if (bridge->irq_set == NULL) {
1054 printk(KERN_ERR "Configuring interrupts not supported\n");
1058 mutex_lock(&(bridge->irq_mtx));
1060 bridge->irq[level - 1].count--;
1062 /* Disable IRQ level if no more interrupts attached at this level*/
1063 if (bridge->irq[level - 1].count == 0)
1064 bridge->irq_set(bridge, level, 0, 1);
1066 bridge->irq[level - 1].callback[statid].func = NULL;
1067 bridge->irq[level - 1].callback[statid].priv_data = NULL;
1069 mutex_unlock(&(bridge->irq_mtx));
1071 EXPORT_SYMBOL(vme_irq_free);
1073 int vme_irq_generate(struct device *dev, int level, int statid)
1075 struct vme_bridge *bridge;
1077 bridge = dev_to_bridge(dev);
1078 if (bridge == NULL) {
1079 printk(KERN_ERR "Can't find VME bus\n");
1083 if ((level < 1) || (level > 7)) {
1084 printk(KERN_WARNING "Invalid interrupt level\n");
1088 if (bridge->irq_generate == NULL) {
1089 printk(KERN_WARNING "Interrupt generation not supported\n");
1093 return bridge->irq_generate(bridge, level, statid);
1095 EXPORT_SYMBOL(vme_irq_generate);
1098 * Request the location monitor, return resource or NULL
1100 struct vme_resource *vme_lm_request(struct device *dev)
1102 struct vme_bridge *bridge;
1103 struct list_head *lm_pos = NULL;
1104 struct vme_lm_resource *allocated_lm = NULL;
1105 struct vme_lm_resource *lm = NULL;
1106 struct vme_resource *resource = NULL;
1108 bridge = dev_to_bridge(dev);
1109 if (bridge == NULL) {
1110 printk(KERN_ERR "Can't find VME bus\n");
1114 /* Loop through DMA resources */
1115 list_for_each(lm_pos, &(bridge->lm_resources)) {
1116 lm = list_entry(lm_pos,
1117 struct vme_lm_resource, list);
1120 printk(KERN_ERR "Registered NULL Location Monitor "
1125 /* Find an unlocked controller */
1126 mutex_lock(&(lm->mtx));
1127 if (lm->locked == 0) {
1129 mutex_unlock(&(lm->mtx));
1133 mutex_unlock(&(lm->mtx));
1136 /* Check to see if we found a resource */
1137 if (allocated_lm == NULL)
1140 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1141 if (resource == NULL) {
1142 printk(KERN_ERR "Unable to allocate resource structure\n");
1145 resource->type = VME_LM;
1146 resource->entry = &(allocated_lm->list);
1152 mutex_lock(&(lm->mtx));
1154 mutex_unlock(&(lm->mtx));
1159 EXPORT_SYMBOL(vme_lm_request);
1161 int vme_lm_count(struct vme_resource *resource)
1163 struct vme_lm_resource *lm;
1165 if (resource->type != VME_LM) {
1166 printk(KERN_ERR "Not a Location Monitor resource\n");
1170 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1172 return lm->monitors;
1174 EXPORT_SYMBOL(vme_lm_count);
1176 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1177 vme_address_t aspace, vme_cycle_t cycle)
1179 struct vme_bridge *bridge = find_bridge(resource);
1180 struct vme_lm_resource *lm;
1182 if (resource->type != VME_LM) {
1183 printk(KERN_ERR "Not a Location Monitor resource\n");
1187 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1189 if (bridge->lm_set == NULL) {
1190 printk(KERN_ERR "vme_lm_set not supported\n");
1194 /* XXX Check parameters */
1196 return bridge->lm_set(lm, lm_base, aspace, cycle);
1198 EXPORT_SYMBOL(vme_lm_set);
1200 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1201 vme_address_t *aspace, vme_cycle_t *cycle)
1203 struct vme_bridge *bridge = find_bridge(resource);
1204 struct vme_lm_resource *lm;
1206 if (resource->type != VME_LM) {
1207 printk(KERN_ERR "Not a Location Monitor resource\n");
1211 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1213 if (bridge->lm_get == NULL) {
1214 printk(KERN_ERR "vme_lm_get not supported\n");
1218 return bridge->lm_get(lm, lm_base, aspace, cycle);
1220 EXPORT_SYMBOL(vme_lm_get);
1222 int vme_lm_attach(struct vme_resource *resource, int monitor,
1223 void (*callback)(int))
1225 struct vme_bridge *bridge = find_bridge(resource);
1226 struct vme_lm_resource *lm;
1228 if (resource->type != VME_LM) {
1229 printk(KERN_ERR "Not a Location Monitor resource\n");
1233 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1235 if (bridge->lm_attach == NULL) {
1236 printk(KERN_ERR "vme_lm_attach not supported\n");
1240 return bridge->lm_attach(lm, monitor, callback);
1242 EXPORT_SYMBOL(vme_lm_attach);
1244 int vme_lm_detach(struct vme_resource *resource, int monitor)
1246 struct vme_bridge *bridge = find_bridge(resource);
1247 struct vme_lm_resource *lm;
1249 if (resource->type != VME_LM) {
1250 printk(KERN_ERR "Not a Location Monitor resource\n");
1254 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1256 if (bridge->lm_detach == NULL) {
1257 printk(KERN_ERR "vme_lm_detach not supported\n");
1261 return bridge->lm_detach(lm, monitor);
1263 EXPORT_SYMBOL(vme_lm_detach);
1265 void vme_lm_free(struct vme_resource *resource)
1267 struct vme_lm_resource *lm;
1269 if (resource->type != VME_LM) {
1270 printk(KERN_ERR "Not a Location Monitor resource\n");
1274 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1276 mutex_lock(&(lm->mtx));
1279 * Check to see that there aren't any callbacks still attached, if
1280 * there are we should probably be detaching them!
1285 mutex_unlock(&(lm->mtx));
1289 EXPORT_SYMBOL(vme_lm_free);
1291 int vme_slot_get(struct device *bus)
1293 struct vme_bridge *bridge;
1295 bridge = dev_to_bridge(bus);
1296 if (bridge == NULL) {
1297 printk(KERN_ERR "Can't find VME bus\n");
1301 if (bridge->slot_get == NULL) {
1302 printk(KERN_WARNING "vme_slot_get not supported\n");
1306 return bridge->slot_get(bridge);
1308 EXPORT_SYMBOL(vme_slot_get);
1311 /* - Bridge Registration --------------------------------------------------- */
1313 static int vme_alloc_bus_num(void)
1317 mutex_lock(&vme_bus_num_mtx);
1318 for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1319 if (((vme_bus_numbers >> i) & 0x1) == 0) {
1320 vme_bus_numbers |= (0x1 << i);
1324 mutex_unlock(&vme_bus_num_mtx);
1329 static void vme_free_bus_num(int bus)
1331 mutex_lock(&vme_bus_num_mtx);
1332 vme_bus_numbers |= ~(0x1 << bus);
1333 mutex_unlock(&vme_bus_num_mtx);
1336 int vme_register_bridge(struct vme_bridge *bridge)
1342 bridge->num = vme_alloc_bus_num();
1344 /* This creates 32 vme "slot" devices. This equates to a slot for each
1345 * ID available in a system conforming to the ANSI/VITA 1-1994
1348 for (i = 0; i < VME_SLOTS_MAX; i++) {
1349 dev = &(bridge->dev[i]);
1350 memset(dev, 0, sizeof(struct device));
1352 dev->parent = bridge->parent;
1353 dev->bus = &(vme_bus_type);
1355 * We save a pointer to the bridge in platform_data so that we
1356 * can get to it later. We keep driver_data for use by the
1357 * driver that binds against the slot
1359 dev->platform_data = bridge;
1360 dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1362 retval = device_register(dev);
1372 dev = &(bridge->dev[i]);
1373 device_unregister(dev);
1375 vme_free_bus_num(bridge->num);
1378 EXPORT_SYMBOL(vme_register_bridge);
1380 void vme_unregister_bridge(struct vme_bridge *bridge)
1386 for (i = 0; i < VME_SLOTS_MAX; i++) {
1387 dev = &(bridge->dev[i]);
1388 device_unregister(dev);
1390 vme_free_bus_num(bridge->num);
1392 EXPORT_SYMBOL(vme_unregister_bridge);
1395 /* - Driver Registration --------------------------------------------------- */
1397 int vme_register_driver(struct vme_driver *drv)
1399 drv->driver.name = drv->name;
1400 drv->driver.bus = &vme_bus_type;
1402 return driver_register(&drv->driver);
1404 EXPORT_SYMBOL(vme_register_driver);
1406 void vme_unregister_driver(struct vme_driver *drv)
1408 driver_unregister(&drv->driver);
1410 EXPORT_SYMBOL(vme_unregister_driver);
1412 /* - Bus Registration ------------------------------------------------------ */
1414 int vme_calc_slot(struct device *dev)
1416 struct vme_bridge *bridge;
1419 bridge = dev_to_bridge(dev);
1421 /* Determine slot number */
1423 while (num < VME_SLOTS_MAX) {
1424 if (&(bridge->dev[num]) == dev)
1429 if (num == VME_SLOTS_MAX) {
1430 dev_err(dev, "Failed to identify slot\n");
1440 static struct vme_driver *dev_to_vme_driver(struct device *dev)
1442 if (dev->driver == NULL)
1443 printk(KERN_ERR "Bugger dev->driver is NULL\n");
1445 return container_of(dev->driver, struct vme_driver, driver);
1448 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1450 struct vme_bridge *bridge;
1451 struct vme_driver *driver;
1454 bridge = dev_to_bridge(dev);
1455 driver = container_of(drv, struct vme_driver, driver);
1457 num = vme_calc_slot(dev);
1461 if (driver->bind_table == NULL) {
1462 dev_err(dev, "Bind table NULL\n");
1467 while ((driver->bind_table[i].bus != 0) ||
1468 (driver->bind_table[i].slot != 0)) {
1470 if (bridge->num == driver->bind_table[i].bus) {
1471 if (num == driver->bind_table[i].slot)
1474 if (driver->bind_table[i].slot == VME_SLOT_ALL)
1477 if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1478 (num == vme_slot_get(dev)))
1489 static int vme_bus_probe(struct device *dev)
1491 struct vme_bridge *bridge;
1492 struct vme_driver *driver;
1493 int retval = -ENODEV;
1495 driver = dev_to_vme_driver(dev);
1496 bridge = dev_to_bridge(dev);
1498 if (driver->probe != NULL)
1499 retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1504 static int vme_bus_remove(struct device *dev)
1506 struct vme_bridge *bridge;
1507 struct vme_driver *driver;
1508 int retval = -ENODEV;
1510 driver = dev_to_vme_driver(dev);
1511 bridge = dev_to_bridge(dev);
1513 if (driver->remove != NULL)
1514 retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1519 struct bus_type vme_bus_type = {
1521 .match = vme_bus_match,
1522 .probe = vme_bus_probe,
1523 .remove = vme_bus_remove,
1525 EXPORT_SYMBOL(vme_bus_type);
1527 static int __init vme_init(void)
1529 return bus_register(&vme_bus_type);
1532 static void __exit vme_exit(void)
1534 bus_unregister(&vme_bus_type);
1537 MODULE_DESCRIPTION("VME bridge driver framework");
1538 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1539 MODULE_LICENSE("GPL");
1541 module_init(vme_init);
1542 module_exit(vme_exit);