vme.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520
  1. /*
  2. * VME Bridge Framework
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/highmem.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/mutex.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/slab.h>
  32. #include <linux/vme.h>
  33. #include "vme_bridge.h"
  34. /* Bitmask and list of registered buses both protected by common mutex */
  35. static unsigned int vme_bus_numbers;
  36. static LIST_HEAD(vme_bus_list);
  37. static DEFINE_MUTEX(vme_buses_lock);
  38. static void __exit vme_exit(void);
  39. static int __init vme_init(void);
  40. static struct vme_dev *dev_to_vme_dev(struct device *dev)
  41. {
  42. return container_of(dev, struct vme_dev, dev);
  43. }
  44. /*
  45. * Find the bridge that the resource is associated with.
  46. */
  47. static struct vme_bridge *find_bridge(struct vme_resource *resource)
  48. {
  49. /* Get list to search */
  50. switch (resource->type) {
  51. case VME_MASTER:
  52. return list_entry(resource->entry, struct vme_master_resource,
  53. list)->parent;
  54. break;
  55. case VME_SLAVE:
  56. return list_entry(resource->entry, struct vme_slave_resource,
  57. list)->parent;
  58. break;
  59. case VME_DMA:
  60. return list_entry(resource->entry, struct vme_dma_resource,
  61. list)->parent;
  62. break;
  63. case VME_LM:
  64. return list_entry(resource->entry, struct vme_lm_resource,
  65. list)->parent;
  66. break;
  67. default:
  68. printk(KERN_ERR "Unknown resource type\n");
  69. return NULL;
  70. break;
  71. }
  72. }
  73. /*
  74. * Allocate a contiguous block of memory for use by the driver. This is used to
  75. * create the buffers for the slave windows.
  76. */
  77. void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  78. dma_addr_t *dma)
  79. {
  80. struct vme_bridge *bridge;
  81. if (resource == NULL) {
  82. printk(KERN_ERR "No resource\n");
  83. return NULL;
  84. }
  85. bridge = find_bridge(resource);
  86. if (bridge == NULL) {
  87. printk(KERN_ERR "Can't find bridge\n");
  88. return NULL;
  89. }
  90. if (bridge->parent == NULL) {
  91. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  92. return NULL;
  93. }
  94. if (bridge->alloc_consistent == NULL) {
  95. printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
  96. bridge->name);
  97. return NULL;
  98. }
  99. return bridge->alloc_consistent(bridge->parent, size, dma);
  100. }
  101. EXPORT_SYMBOL(vme_alloc_consistent);
  102. /*
  103. * Free previously allocated contiguous block of memory.
  104. */
  105. void vme_free_consistent(struct vme_resource *resource, size_t size,
  106. void *vaddr, dma_addr_t dma)
  107. {
  108. struct vme_bridge *bridge;
  109. if (resource == NULL) {
  110. printk(KERN_ERR "No resource\n");
  111. return;
  112. }
  113. bridge = find_bridge(resource);
  114. if (bridge == NULL) {
  115. printk(KERN_ERR "Can't find bridge\n");
  116. return;
  117. }
  118. if (bridge->parent == NULL) {
  119. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  120. return;
  121. }
  122. if (bridge->free_consistent == NULL) {
  123. printk(KERN_ERR "free_consistent not supported by bridge %s\n",
  124. bridge->name);
  125. return;
  126. }
  127. bridge->free_consistent(bridge->parent, size, vaddr, dma);
  128. }
  129. EXPORT_SYMBOL(vme_free_consistent);
  130. size_t vme_get_size(struct vme_resource *resource)
  131. {
  132. int enabled, retval;
  133. unsigned long long base, size;
  134. dma_addr_t buf_base;
  135. u32 aspace, cycle, dwidth;
  136. switch (resource->type) {
  137. case VME_MASTER:
  138. retval = vme_master_get(resource, &enabled, &base, &size,
  139. &aspace, &cycle, &dwidth);
  140. return size;
  141. break;
  142. case VME_SLAVE:
  143. retval = vme_slave_get(resource, &enabled, &base, &size,
  144. &buf_base, &aspace, &cycle);
  145. return size;
  146. break;
  147. case VME_DMA:
  148. return 0;
  149. break;
  150. default:
  151. printk(KERN_ERR "Unknown resource type\n");
  152. return 0;
  153. break;
  154. }
  155. }
  156. EXPORT_SYMBOL(vme_get_size);
  157. static int vme_check_window(u32 aspace, unsigned long long vme_base,
  158. unsigned long long size)
  159. {
  160. int retval = 0;
  161. switch (aspace) {
  162. case VME_A16:
  163. if (((vme_base + size) > VME_A16_MAX) ||
  164. (vme_base > VME_A16_MAX))
  165. retval = -EFAULT;
  166. break;
  167. case VME_A24:
  168. if (((vme_base + size) > VME_A24_MAX) ||
  169. (vme_base > VME_A24_MAX))
  170. retval = -EFAULT;
  171. break;
  172. case VME_A32:
  173. if (((vme_base + size) > VME_A32_MAX) ||
  174. (vme_base > VME_A32_MAX))
  175. retval = -EFAULT;
  176. break;
  177. case VME_A64:
  178. /*
  179. * Any value held in an unsigned long long can be used as the
  180. * base
  181. */
  182. break;
  183. case VME_CRCSR:
  184. if (((vme_base + size) > VME_CRCSR_MAX) ||
  185. (vme_base > VME_CRCSR_MAX))
  186. retval = -EFAULT;
  187. break;
  188. case VME_USER1:
  189. case VME_USER2:
  190. case VME_USER3:
  191. case VME_USER4:
  192. /* User Defined */
  193. break;
  194. default:
  195. printk(KERN_ERR "Invalid address space\n");
  196. retval = -EINVAL;
  197. break;
  198. }
  199. return retval;
  200. }
  201. /*
  202. * Request a slave image with specific attributes, return some unique
  203. * identifier.
  204. */
  205. struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
  206. u32 cycle)
  207. {
  208. struct vme_bridge *bridge;
  209. struct list_head *slave_pos = NULL;
  210. struct vme_slave_resource *allocated_image = NULL;
  211. struct vme_slave_resource *slave_image = NULL;
  212. struct vme_resource *resource = NULL;
  213. bridge = vdev->bridge;
  214. if (bridge == NULL) {
  215. printk(KERN_ERR "Can't find VME bus\n");
  216. goto err_bus;
  217. }
  218. /* Loop through slave resources */
  219. list_for_each(slave_pos, &bridge->slave_resources) {
  220. slave_image = list_entry(slave_pos,
  221. struct vme_slave_resource, list);
  222. if (slave_image == NULL) {
  223. printk(KERN_ERR "Registered NULL Slave resource\n");
  224. continue;
  225. }
  226. /* Find an unlocked and compatible image */
  227. mutex_lock(&slave_image->mtx);
  228. if (((slave_image->address_attr & address) == address) &&
  229. ((slave_image->cycle_attr & cycle) == cycle) &&
  230. (slave_image->locked == 0)) {
  231. slave_image->locked = 1;
  232. mutex_unlock(&slave_image->mtx);
  233. allocated_image = slave_image;
  234. break;
  235. }
  236. mutex_unlock(&slave_image->mtx);
  237. }
  238. /* No free image */
  239. if (allocated_image == NULL)
  240. goto err_image;
  241. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  242. if (resource == NULL) {
  243. printk(KERN_WARNING "Unable to allocate resource structure\n");
  244. goto err_alloc;
  245. }
  246. resource->type = VME_SLAVE;
  247. resource->entry = &allocated_image->list;
  248. return resource;
  249. err_alloc:
  250. /* Unlock image */
  251. mutex_lock(&slave_image->mtx);
  252. slave_image->locked = 0;
  253. mutex_unlock(&slave_image->mtx);
  254. err_image:
  255. err_bus:
  256. return NULL;
  257. }
  258. EXPORT_SYMBOL(vme_slave_request);
  259. int vme_slave_set(struct vme_resource *resource, int enabled,
  260. unsigned long long vme_base, unsigned long long size,
  261. dma_addr_t buf_base, u32 aspace, u32 cycle)
  262. {
  263. struct vme_bridge *bridge = find_bridge(resource);
  264. struct vme_slave_resource *image;
  265. int retval;
  266. if (resource->type != VME_SLAVE) {
  267. printk(KERN_ERR "Not a slave resource\n");
  268. return -EINVAL;
  269. }
  270. image = list_entry(resource->entry, struct vme_slave_resource, list);
  271. if (bridge->slave_set == NULL) {
  272. printk(KERN_ERR "Function not supported\n");
  273. return -ENOSYS;
  274. }
  275. if (!(((image->address_attr & aspace) == aspace) &&
  276. ((image->cycle_attr & cycle) == cycle))) {
  277. printk(KERN_ERR "Invalid attributes\n");
  278. return -EINVAL;
  279. }
  280. retval = vme_check_window(aspace, vme_base, size);
  281. if (retval)
  282. return retval;
  283. return bridge->slave_set(image, enabled, vme_base, size, buf_base,
  284. aspace, cycle);
  285. }
  286. EXPORT_SYMBOL(vme_slave_set);
  287. int vme_slave_get(struct vme_resource *resource, int *enabled,
  288. unsigned long long *vme_base, unsigned long long *size,
  289. dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
  290. {
  291. struct vme_bridge *bridge = find_bridge(resource);
  292. struct vme_slave_resource *image;
  293. if (resource->type != VME_SLAVE) {
  294. printk(KERN_ERR "Not a slave resource\n");
  295. return -EINVAL;
  296. }
  297. image = list_entry(resource->entry, struct vme_slave_resource, list);
  298. if (bridge->slave_get == NULL) {
  299. printk(KERN_ERR "vme_slave_get not supported\n");
  300. return -EINVAL;
  301. }
  302. return bridge->slave_get(image, enabled, vme_base, size, buf_base,
  303. aspace, cycle);
  304. }
  305. EXPORT_SYMBOL(vme_slave_get);
  306. void vme_slave_free(struct vme_resource *resource)
  307. {
  308. struct vme_slave_resource *slave_image;
  309. if (resource->type != VME_SLAVE) {
  310. printk(KERN_ERR "Not a slave resource\n");
  311. return;
  312. }
  313. slave_image = list_entry(resource->entry, struct vme_slave_resource,
  314. list);
  315. if (slave_image == NULL) {
  316. printk(KERN_ERR "Can't find slave resource\n");
  317. return;
  318. }
  319. /* Unlock image */
  320. mutex_lock(&slave_image->mtx);
  321. if (slave_image->locked == 0)
  322. printk(KERN_ERR "Image is already free\n");
  323. slave_image->locked = 0;
  324. mutex_unlock(&slave_image->mtx);
  325. /* Free up resource memory */
  326. kfree(resource);
  327. }
  328. EXPORT_SYMBOL(vme_slave_free);
  329. /*
  330. * Request a master image with specific attributes, return some unique
  331. * identifier.
  332. */
  333. struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
  334. u32 cycle, u32 dwidth)
  335. {
  336. struct vme_bridge *bridge;
  337. struct list_head *master_pos = NULL;
  338. struct vme_master_resource *allocated_image = NULL;
  339. struct vme_master_resource *master_image = NULL;
  340. struct vme_resource *resource = NULL;
  341. bridge = vdev->bridge;
  342. if (bridge == NULL) {
  343. printk(KERN_ERR "Can't find VME bus\n");
  344. goto err_bus;
  345. }
  346. /* Loop through master resources */
  347. list_for_each(master_pos, &bridge->master_resources) {
  348. master_image = list_entry(master_pos,
  349. struct vme_master_resource, list);
  350. if (master_image == NULL) {
  351. printk(KERN_WARNING "Registered NULL master resource\n");
  352. continue;
  353. }
  354. /* Find an unlocked and compatible image */
  355. spin_lock(&master_image->lock);
  356. if (((master_image->address_attr & address) == address) &&
  357. ((master_image->cycle_attr & cycle) == cycle) &&
  358. ((master_image->width_attr & dwidth) == dwidth) &&
  359. (master_image->locked == 0)) {
  360. master_image->locked = 1;
  361. spin_unlock(&master_image->lock);
  362. allocated_image = master_image;
  363. break;
  364. }
  365. spin_unlock(&master_image->lock);
  366. }
  367. /* Check to see if we found a resource */
  368. if (allocated_image == NULL) {
  369. printk(KERN_ERR "Can't find a suitable resource\n");
  370. goto err_image;
  371. }
  372. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  373. if (resource == NULL) {
  374. printk(KERN_ERR "Unable to allocate resource structure\n");
  375. goto err_alloc;
  376. }
  377. resource->type = VME_MASTER;
  378. resource->entry = &allocated_image->list;
  379. return resource;
  380. err_alloc:
  381. /* Unlock image */
  382. spin_lock(&master_image->lock);
  383. master_image->locked = 0;
  384. spin_unlock(&master_image->lock);
  385. err_image:
  386. err_bus:
  387. return NULL;
  388. }
  389. EXPORT_SYMBOL(vme_master_request);
  390. int vme_master_set(struct vme_resource *resource, int enabled,
  391. unsigned long long vme_base, unsigned long long size, u32 aspace,
  392. u32 cycle, u32 dwidth)
  393. {
  394. struct vme_bridge *bridge = find_bridge(resource);
  395. struct vme_master_resource *image;
  396. int retval;
  397. if (resource->type != VME_MASTER) {
  398. printk(KERN_ERR "Not a master resource\n");
  399. return -EINVAL;
  400. }
  401. image = list_entry(resource->entry, struct vme_master_resource, list);
  402. if (bridge->master_set == NULL) {
  403. printk(KERN_WARNING "vme_master_set not supported\n");
  404. return -EINVAL;
  405. }
  406. if (!(((image->address_attr & aspace) == aspace) &&
  407. ((image->cycle_attr & cycle) == cycle) &&
  408. ((image->width_attr & dwidth) == dwidth))) {
  409. printk(KERN_WARNING "Invalid attributes\n");
  410. return -EINVAL;
  411. }
  412. retval = vme_check_window(aspace, vme_base, size);
  413. if (retval)
  414. return retval;
  415. return bridge->master_set(image, enabled, vme_base, size, aspace,
  416. cycle, dwidth);
  417. }
  418. EXPORT_SYMBOL(vme_master_set);
  419. int vme_master_get(struct vme_resource *resource, int *enabled,
  420. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  421. u32 *cycle, u32 *dwidth)
  422. {
  423. struct vme_bridge *bridge = find_bridge(resource);
  424. struct vme_master_resource *image;
  425. if (resource->type != VME_MASTER) {
  426. printk(KERN_ERR "Not a master resource\n");
  427. return -EINVAL;
  428. }
  429. image = list_entry(resource->entry, struct vme_master_resource, list);
  430. if (bridge->master_get == NULL) {
  431. printk(KERN_WARNING "vme_master_set not supported\n");
  432. return -EINVAL;
  433. }
  434. return bridge->master_get(image, enabled, vme_base, size, aspace,
  435. cycle, dwidth);
  436. }
  437. EXPORT_SYMBOL(vme_master_get);
  438. /*
  439. * Read data out of VME space into a buffer.
  440. */
  441. ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
  442. loff_t offset)
  443. {
  444. struct vme_bridge *bridge = find_bridge(resource);
  445. struct vme_master_resource *image;
  446. size_t length;
  447. if (bridge->master_read == NULL) {
  448. printk(KERN_WARNING "Reading from resource not supported\n");
  449. return -EINVAL;
  450. }
  451. if (resource->type != VME_MASTER) {
  452. printk(KERN_ERR "Not a master resource\n");
  453. return -EINVAL;
  454. }
  455. image = list_entry(resource->entry, struct vme_master_resource, list);
  456. length = vme_get_size(resource);
  457. if (offset > length) {
  458. printk(KERN_WARNING "Invalid Offset\n");
  459. return -EFAULT;
  460. }
  461. if ((offset + count) > length)
  462. count = length - offset;
  463. return bridge->master_read(image, buf, count, offset);
  464. }
  465. EXPORT_SYMBOL(vme_master_read);
  466. /*
  467. * Write data out to VME space from a buffer.
  468. */
  469. ssize_t vme_master_write(struct vme_resource *resource, void *buf,
  470. size_t count, loff_t offset)
  471. {
  472. struct vme_bridge *bridge = find_bridge(resource);
  473. struct vme_master_resource *image;
  474. size_t length;
  475. if (bridge->master_write == NULL) {
  476. printk(KERN_WARNING "Writing to resource not supported\n");
  477. return -EINVAL;
  478. }
  479. if (resource->type != VME_MASTER) {
  480. printk(KERN_ERR "Not a master resource\n");
  481. return -EINVAL;
  482. }
  483. image = list_entry(resource->entry, struct vme_master_resource, list);
  484. length = vme_get_size(resource);
  485. if (offset > length) {
  486. printk(KERN_WARNING "Invalid Offset\n");
  487. return -EFAULT;
  488. }
  489. if ((offset + count) > length)
  490. count = length - offset;
  491. return bridge->master_write(image, buf, count, offset);
  492. }
  493. EXPORT_SYMBOL(vme_master_write);
  494. /*
  495. * Perform RMW cycle to provided location.
  496. */
  497. unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
  498. unsigned int compare, unsigned int swap, loff_t offset)
  499. {
  500. struct vme_bridge *bridge = find_bridge(resource);
  501. struct vme_master_resource *image;
  502. if (bridge->master_rmw == NULL) {
  503. printk(KERN_WARNING "Writing to resource not supported\n");
  504. return -EINVAL;
  505. }
  506. if (resource->type != VME_MASTER) {
  507. printk(KERN_ERR "Not a master resource\n");
  508. return -EINVAL;
  509. }
  510. image = list_entry(resource->entry, struct vme_master_resource, list);
  511. return bridge->master_rmw(image, mask, compare, swap, offset);
  512. }
  513. EXPORT_SYMBOL(vme_master_rmw);
  514. void vme_master_free(struct vme_resource *resource)
  515. {
  516. struct vme_master_resource *master_image;
  517. if (resource->type != VME_MASTER) {
  518. printk(KERN_ERR "Not a master resource\n");
  519. return;
  520. }
  521. master_image = list_entry(resource->entry, struct vme_master_resource,
  522. list);
  523. if (master_image == NULL) {
  524. printk(KERN_ERR "Can't find master resource\n");
  525. return;
  526. }
  527. /* Unlock image */
  528. spin_lock(&master_image->lock);
  529. if (master_image->locked == 0)
  530. printk(KERN_ERR "Image is already free\n");
  531. master_image->locked = 0;
  532. spin_unlock(&master_image->lock);
  533. /* Free up resource memory */
  534. kfree(resource);
  535. }
  536. EXPORT_SYMBOL(vme_master_free);
  537. /*
  538. * Request a DMA controller with specific attributes, return some unique
  539. * identifier.
  540. */
  541. struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
  542. {
  543. struct vme_bridge *bridge;
  544. struct list_head *dma_pos = NULL;
  545. struct vme_dma_resource *allocated_ctrlr = NULL;
  546. struct vme_dma_resource *dma_ctrlr = NULL;
  547. struct vme_resource *resource = NULL;
  548. /* XXX Not checking resource attributes */
  549. printk(KERN_ERR "No VME resource Attribute tests done\n");
  550. bridge = vdev->bridge;
  551. if (bridge == NULL) {
  552. printk(KERN_ERR "Can't find VME bus\n");
  553. goto err_bus;
  554. }
  555. /* Loop through DMA resources */
  556. list_for_each(dma_pos, &bridge->dma_resources) {
  557. dma_ctrlr = list_entry(dma_pos,
  558. struct vme_dma_resource, list);
  559. if (dma_ctrlr == NULL) {
  560. printk(KERN_ERR "Registered NULL DMA resource\n");
  561. continue;
  562. }
  563. /* Find an unlocked and compatible controller */
  564. mutex_lock(&dma_ctrlr->mtx);
  565. if (((dma_ctrlr->route_attr & route) == route) &&
  566. (dma_ctrlr->locked == 0)) {
  567. dma_ctrlr->locked = 1;
  568. mutex_unlock(&dma_ctrlr->mtx);
  569. allocated_ctrlr = dma_ctrlr;
  570. break;
  571. }
  572. mutex_unlock(&dma_ctrlr->mtx);
  573. }
  574. /* Check to see if we found a resource */
  575. if (allocated_ctrlr == NULL)
  576. goto err_ctrlr;
  577. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  578. if (resource == NULL) {
  579. printk(KERN_WARNING "Unable to allocate resource structure\n");
  580. goto err_alloc;
  581. }
  582. resource->type = VME_DMA;
  583. resource->entry = &allocated_ctrlr->list;
  584. return resource;
  585. err_alloc:
  586. /* Unlock image */
  587. mutex_lock(&dma_ctrlr->mtx);
  588. dma_ctrlr->locked = 0;
  589. mutex_unlock(&dma_ctrlr->mtx);
  590. err_ctrlr:
  591. err_bus:
  592. return NULL;
  593. }
  594. EXPORT_SYMBOL(vme_dma_request);
  595. /*
  596. * Start new list
  597. */
  598. struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
  599. {
  600. struct vme_dma_resource *ctrlr;
  601. struct vme_dma_list *dma_list;
  602. if (resource->type != VME_DMA) {
  603. printk(KERN_ERR "Not a DMA resource\n");
  604. return NULL;
  605. }
  606. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  607. dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
  608. if (dma_list == NULL) {
  609. printk(KERN_ERR "Unable to allocate memory for new dma list\n");
  610. return NULL;
  611. }
  612. INIT_LIST_HEAD(&dma_list->entries);
  613. dma_list->parent = ctrlr;
  614. mutex_init(&dma_list->mtx);
  615. return dma_list;
  616. }
  617. EXPORT_SYMBOL(vme_new_dma_list);
  618. /*
  619. * Create "Pattern" type attributes
  620. */
  621. struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
  622. {
  623. struct vme_dma_attr *attributes;
  624. struct vme_dma_pattern *pattern_attr;
  625. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  626. if (attributes == NULL) {
  627. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  628. goto err_attr;
  629. }
  630. pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
  631. if (pattern_attr == NULL) {
  632. printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
  633. goto err_pat;
  634. }
  635. attributes->type = VME_DMA_PATTERN;
  636. attributes->private = (void *)pattern_attr;
  637. pattern_attr->pattern = pattern;
  638. pattern_attr->type = type;
  639. return attributes;
  640. err_pat:
  641. kfree(attributes);
  642. err_attr:
  643. return NULL;
  644. }
  645. EXPORT_SYMBOL(vme_dma_pattern_attribute);
  646. /*
  647. * Create "PCI" type attributes
  648. */
  649. struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
  650. {
  651. struct vme_dma_attr *attributes;
  652. struct vme_dma_pci *pci_attr;
  653. /* XXX Run some sanity checks here */
  654. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  655. if (attributes == NULL) {
  656. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  657. goto err_attr;
  658. }
  659. pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
  660. if (pci_attr == NULL) {
  661. printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
  662. goto err_pci;
  663. }
  664. attributes->type = VME_DMA_PCI;
  665. attributes->private = (void *)pci_attr;
  666. pci_attr->address = address;
  667. return attributes;
  668. err_pci:
  669. kfree(attributes);
  670. err_attr:
  671. return NULL;
  672. }
  673. EXPORT_SYMBOL(vme_dma_pci_attribute);
  674. /*
  675. * Create "VME" type attributes
  676. */
  677. struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
  678. u32 aspace, u32 cycle, u32 dwidth)
  679. {
  680. struct vme_dma_attr *attributes;
  681. struct vme_dma_vme *vme_attr;
  682. attributes = kmalloc(
  683. sizeof(struct vme_dma_attr), GFP_KERNEL);
  684. if (attributes == NULL) {
  685. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  686. goto err_attr;
  687. }
  688. vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
  689. if (vme_attr == NULL) {
  690. printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
  691. goto err_vme;
  692. }
  693. attributes->type = VME_DMA_VME;
  694. attributes->private = (void *)vme_attr;
  695. vme_attr->address = address;
  696. vme_attr->aspace = aspace;
  697. vme_attr->cycle = cycle;
  698. vme_attr->dwidth = dwidth;
  699. return attributes;
  700. err_vme:
  701. kfree(attributes);
  702. err_attr:
  703. return NULL;
  704. }
  705. EXPORT_SYMBOL(vme_dma_vme_attribute);
  706. /*
  707. * Free attribute
  708. */
  709. void vme_dma_free_attribute(struct vme_dma_attr *attributes)
  710. {
  711. kfree(attributes->private);
  712. kfree(attributes);
  713. }
  714. EXPORT_SYMBOL(vme_dma_free_attribute);
  715. int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
  716. struct vme_dma_attr *dest, size_t count)
  717. {
  718. struct vme_bridge *bridge = list->parent->parent;
  719. int retval;
  720. if (bridge->dma_list_add == NULL) {
  721. printk(KERN_WARNING "Link List DMA generation not supported\n");
  722. return -EINVAL;
  723. }
  724. if (!mutex_trylock(&list->mtx)) {
  725. printk(KERN_ERR "Link List already submitted\n");
  726. return -EINVAL;
  727. }
  728. retval = bridge->dma_list_add(list, src, dest, count);
  729. mutex_unlock(&list->mtx);
  730. return retval;
  731. }
  732. EXPORT_SYMBOL(vme_dma_list_add);
  733. int vme_dma_list_exec(struct vme_dma_list *list)
  734. {
  735. struct vme_bridge *bridge = list->parent->parent;
  736. int retval;
  737. if (bridge->dma_list_exec == NULL) {
  738. printk(KERN_ERR "Link List DMA execution not supported\n");
  739. return -EINVAL;
  740. }
  741. mutex_lock(&list->mtx);
  742. retval = bridge->dma_list_exec(list);
  743. mutex_unlock(&list->mtx);
  744. return retval;
  745. }
  746. EXPORT_SYMBOL(vme_dma_list_exec);
  747. int vme_dma_list_free(struct vme_dma_list *list)
  748. {
  749. struct vme_bridge *bridge = list->parent->parent;
  750. int retval;
  751. if (bridge->dma_list_empty == NULL) {
  752. printk(KERN_WARNING "Emptying of Link Lists not supported\n");
  753. return -EINVAL;
  754. }
  755. if (!mutex_trylock(&list->mtx)) {
  756. printk(KERN_ERR "Link List in use\n");
  757. return -EINVAL;
  758. }
  759. /*
  760. * Empty out all of the entries from the dma list. We need to go to the
  761. * low level driver as dma entries are driver specific.
  762. */
  763. retval = bridge->dma_list_empty(list);
  764. if (retval) {
  765. printk(KERN_ERR "Unable to empty link-list entries\n");
  766. mutex_unlock(&list->mtx);
  767. return retval;
  768. }
  769. mutex_unlock(&list->mtx);
  770. kfree(list);
  771. return retval;
  772. }
  773. EXPORT_SYMBOL(vme_dma_list_free);
  774. int vme_dma_free(struct vme_resource *resource)
  775. {
  776. struct vme_dma_resource *ctrlr;
  777. if (resource->type != VME_DMA) {
  778. printk(KERN_ERR "Not a DMA resource\n");
  779. return -EINVAL;
  780. }
  781. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  782. if (!mutex_trylock(&ctrlr->mtx)) {
  783. printk(KERN_ERR "Resource busy, can't free\n");
  784. return -EBUSY;
  785. }
  786. if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
  787. printk(KERN_WARNING "Resource still processing transfers\n");
  788. mutex_unlock(&ctrlr->mtx);
  789. return -EBUSY;
  790. }
  791. ctrlr->locked = 0;
  792. mutex_unlock(&ctrlr->mtx);
  793. kfree(resource);
  794. return 0;
  795. }
  796. EXPORT_SYMBOL(vme_dma_free);
  797. void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
  798. {
  799. void (*call)(int, int, void *);
  800. void *priv_data;
  801. call = bridge->irq[level - 1].callback[statid].func;
  802. priv_data = bridge->irq[level - 1].callback[statid].priv_data;
  803. if (call != NULL)
  804. call(level, statid, priv_data);
  805. else
  806. printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
  807. level, statid);
  808. }
  809. EXPORT_SYMBOL(vme_irq_handler);
  810. int vme_irq_request(struct vme_dev *vdev, int level, int statid,
  811. void (*callback)(int, int, void *),
  812. void *priv_data)
  813. {
  814. struct vme_bridge *bridge;
  815. bridge = vdev->bridge;
  816. if (bridge == NULL) {
  817. printk(KERN_ERR "Can't find VME bus\n");
  818. return -EINVAL;
  819. }
  820. if ((level < 1) || (level > 7)) {
  821. printk(KERN_ERR "Invalid interrupt level\n");
  822. return -EINVAL;
  823. }
  824. if (bridge->irq_set == NULL) {
  825. printk(KERN_ERR "Configuring interrupts not supported\n");
  826. return -EINVAL;
  827. }
  828. mutex_lock(&bridge->irq_mtx);
  829. if (bridge->irq[level - 1].callback[statid].func) {
  830. mutex_unlock(&bridge->irq_mtx);
  831. printk(KERN_WARNING "VME Interrupt already taken\n");
  832. return -EBUSY;
  833. }
  834. bridge->irq[level - 1].count++;
  835. bridge->irq[level - 1].callback[statid].priv_data = priv_data;
  836. bridge->irq[level - 1].callback[statid].func = callback;
  837. /* Enable IRQ level */
  838. bridge->irq_set(bridge, level, 1, 1);
  839. mutex_unlock(&bridge->irq_mtx);
  840. return 0;
  841. }
  842. EXPORT_SYMBOL(vme_irq_request);
  843. void vme_irq_free(struct vme_dev *vdev, int level, int statid)
  844. {
  845. struct vme_bridge *bridge;
  846. bridge = vdev->bridge;
  847. if (bridge == NULL) {
  848. printk(KERN_ERR "Can't find VME bus\n");
  849. return;
  850. }
  851. if ((level < 1) || (level > 7)) {
  852. printk(KERN_ERR "Invalid interrupt level\n");
  853. return;
  854. }
  855. if (bridge->irq_set == NULL) {
  856. printk(KERN_ERR "Configuring interrupts not supported\n");
  857. return;
  858. }
  859. mutex_lock(&bridge->irq_mtx);
  860. bridge->irq[level - 1].count--;
  861. /* Disable IRQ level if no more interrupts attached at this level*/
  862. if (bridge->irq[level - 1].count == 0)
  863. bridge->irq_set(bridge, level, 0, 1);
  864. bridge->irq[level - 1].callback[statid].func = NULL;
  865. bridge->irq[level - 1].callback[statid].priv_data = NULL;
  866. mutex_unlock(&bridge->irq_mtx);
  867. }
  868. EXPORT_SYMBOL(vme_irq_free);
  869. int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
  870. {
  871. struct vme_bridge *bridge;
  872. bridge = vdev->bridge;
  873. if (bridge == NULL) {
  874. printk(KERN_ERR "Can't find VME bus\n");
  875. return -EINVAL;
  876. }
  877. if ((level < 1) || (level > 7)) {
  878. printk(KERN_WARNING "Invalid interrupt level\n");
  879. return -EINVAL;
  880. }
  881. if (bridge->irq_generate == NULL) {
  882. printk(KERN_WARNING "Interrupt generation not supported\n");
  883. return -EINVAL;
  884. }
  885. return bridge->irq_generate(bridge, level, statid);
  886. }
  887. EXPORT_SYMBOL(vme_irq_generate);
  888. /*
  889. * Request the location monitor, return resource or NULL
  890. */
  891. struct vme_resource *vme_lm_request(struct vme_dev *vdev)
  892. {
  893. struct vme_bridge *bridge;
  894. struct list_head *lm_pos = NULL;
  895. struct vme_lm_resource *allocated_lm = NULL;
  896. struct vme_lm_resource *lm = NULL;
  897. struct vme_resource *resource = NULL;
  898. bridge = vdev->bridge;
  899. if (bridge == NULL) {
  900. printk(KERN_ERR "Can't find VME bus\n");
  901. goto err_bus;
  902. }
  903. /* Loop through DMA resources */
  904. list_for_each(lm_pos, &bridge->lm_resources) {
  905. lm = list_entry(lm_pos,
  906. struct vme_lm_resource, list);
  907. if (lm == NULL) {
  908. printk(KERN_ERR "Registered NULL Location Monitor resource\n");
  909. continue;
  910. }
  911. /* Find an unlocked controller */
  912. mutex_lock(&lm->mtx);
  913. if (lm->locked == 0) {
  914. lm->locked = 1;
  915. mutex_unlock(&lm->mtx);
  916. allocated_lm = lm;
  917. break;
  918. }
  919. mutex_unlock(&lm->mtx);
  920. }
  921. /* Check to see if we found a resource */
  922. if (allocated_lm == NULL)
  923. goto err_lm;
  924. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  925. if (resource == NULL) {
  926. printk(KERN_ERR "Unable to allocate resource structure\n");
  927. goto err_alloc;
  928. }
  929. resource->type = VME_LM;
  930. resource->entry = &allocated_lm->list;
  931. return resource;
  932. err_alloc:
  933. /* Unlock image */
  934. mutex_lock(&lm->mtx);
  935. lm->locked = 0;
  936. mutex_unlock(&lm->mtx);
  937. err_lm:
  938. err_bus:
  939. return NULL;
  940. }
  941. EXPORT_SYMBOL(vme_lm_request);
  942. int vme_lm_count(struct vme_resource *resource)
  943. {
  944. struct vme_lm_resource *lm;
  945. if (resource->type != VME_LM) {
  946. printk(KERN_ERR "Not a Location Monitor resource\n");
  947. return -EINVAL;
  948. }
  949. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  950. return lm->monitors;
  951. }
  952. EXPORT_SYMBOL(vme_lm_count);
  953. int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
  954. u32 aspace, u32 cycle)
  955. {
  956. struct vme_bridge *bridge = find_bridge(resource);
  957. struct vme_lm_resource *lm;
  958. if (resource->type != VME_LM) {
  959. printk(KERN_ERR "Not a Location Monitor resource\n");
  960. return -EINVAL;
  961. }
  962. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  963. if (bridge->lm_set == NULL) {
  964. printk(KERN_ERR "vme_lm_set not supported\n");
  965. return -EINVAL;
  966. }
  967. return bridge->lm_set(lm, lm_base, aspace, cycle);
  968. }
  969. EXPORT_SYMBOL(vme_lm_set);
  970. int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
  971. u32 *aspace, u32 *cycle)
  972. {
  973. struct vme_bridge *bridge = find_bridge(resource);
  974. struct vme_lm_resource *lm;
  975. if (resource->type != VME_LM) {
  976. printk(KERN_ERR "Not a Location Monitor resource\n");
  977. return -EINVAL;
  978. }
  979. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  980. if (bridge->lm_get == NULL) {
  981. printk(KERN_ERR "vme_lm_get not supported\n");
  982. return -EINVAL;
  983. }
  984. return bridge->lm_get(lm, lm_base, aspace, cycle);
  985. }
  986. EXPORT_SYMBOL(vme_lm_get);
  987. int vme_lm_attach(struct vme_resource *resource, int monitor,
  988. void (*callback)(int))
  989. {
  990. struct vme_bridge *bridge = find_bridge(resource);
  991. struct vme_lm_resource *lm;
  992. if (resource->type != VME_LM) {
  993. printk(KERN_ERR "Not a Location Monitor resource\n");
  994. return -EINVAL;
  995. }
  996. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  997. if (bridge->lm_attach == NULL) {
  998. printk(KERN_ERR "vme_lm_attach not supported\n");
  999. return -EINVAL;
  1000. }
  1001. return bridge->lm_attach(lm, monitor, callback);
  1002. }
  1003. EXPORT_SYMBOL(vme_lm_attach);
  1004. int vme_lm_detach(struct vme_resource *resource, int monitor)
  1005. {
  1006. struct vme_bridge *bridge = find_bridge(resource);
  1007. struct vme_lm_resource *lm;
  1008. if (resource->type != VME_LM) {
  1009. printk(KERN_ERR "Not a Location Monitor resource\n");
  1010. return -EINVAL;
  1011. }
  1012. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1013. if (bridge->lm_detach == NULL) {
  1014. printk(KERN_ERR "vme_lm_detach not supported\n");
  1015. return -EINVAL;
  1016. }
  1017. return bridge->lm_detach(lm, monitor);
  1018. }
  1019. EXPORT_SYMBOL(vme_lm_detach);
  1020. void vme_lm_free(struct vme_resource *resource)
  1021. {
  1022. struct vme_lm_resource *lm;
  1023. if (resource->type != VME_LM) {
  1024. printk(KERN_ERR "Not a Location Monitor resource\n");
  1025. return;
  1026. }
  1027. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1028. mutex_lock(&lm->mtx);
  1029. /* XXX
  1030. * Check to see that there aren't any callbacks still attached, if
  1031. * there are we should probably be detaching them!
  1032. */
  1033. lm->locked = 0;
  1034. mutex_unlock(&lm->mtx);
  1035. kfree(resource);
  1036. }
  1037. EXPORT_SYMBOL(vme_lm_free);
  1038. int vme_slot_get(struct vme_dev *vdev)
  1039. {
  1040. struct vme_bridge *bridge;
  1041. bridge = vdev->bridge;
  1042. if (bridge == NULL) {
  1043. printk(KERN_ERR "Can't find VME bus\n");
  1044. return -EINVAL;
  1045. }
  1046. if (bridge->slot_get == NULL) {
  1047. printk(KERN_WARNING "vme_slot_get not supported\n");
  1048. return -EINVAL;
  1049. }
  1050. return bridge->slot_get(bridge);
  1051. }
  1052. EXPORT_SYMBOL(vme_slot_get);
  1053. /* - Bridge Registration --------------------------------------------------- */
  1054. static void vme_dev_release(struct device *dev)
  1055. {
  1056. kfree(dev_to_vme_dev(dev));
  1057. }
  1058. int vme_register_bridge(struct vme_bridge *bridge)
  1059. {
  1060. int i;
  1061. int ret = -1;
  1062. mutex_lock(&vme_buses_lock);
  1063. for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
  1064. if ((vme_bus_numbers & (1 << i)) == 0) {
  1065. vme_bus_numbers |= (1 << i);
  1066. bridge->num = i;
  1067. INIT_LIST_HEAD(&bridge->devices);
  1068. list_add_tail(&bridge->bus_list, &vme_bus_list);
  1069. ret = 0;
  1070. break;
  1071. }
  1072. }
  1073. mutex_unlock(&vme_buses_lock);
  1074. return ret;
  1075. }
  1076. EXPORT_SYMBOL(vme_register_bridge);
  1077. void vme_unregister_bridge(struct vme_bridge *bridge)
  1078. {
  1079. struct vme_dev *vdev;
  1080. struct vme_dev *tmp;
  1081. mutex_lock(&vme_buses_lock);
  1082. vme_bus_numbers &= ~(1 << bridge->num);
  1083. list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
  1084. list_del(&vdev->drv_list);
  1085. list_del(&vdev->bridge_list);
  1086. device_unregister(&vdev->dev);
  1087. }
  1088. list_del(&bridge->bus_list);
  1089. mutex_unlock(&vme_buses_lock);
  1090. }
  1091. EXPORT_SYMBOL(vme_unregister_bridge);
  1092. /* - Driver Registration --------------------------------------------------- */
  1093. static int __vme_register_driver_bus(struct vme_driver *drv,
  1094. struct vme_bridge *bridge, unsigned int ndevs)
  1095. {
  1096. int err;
  1097. unsigned int i;
  1098. struct vme_dev *vdev;
  1099. struct vme_dev *tmp;
  1100. for (i = 0; i < ndevs; i++) {
  1101. vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
  1102. if (!vdev) {
  1103. err = -ENOMEM;
  1104. goto err_devalloc;
  1105. }
  1106. vdev->num = i;
  1107. vdev->bridge = bridge;
  1108. vdev->dev.platform_data = drv;
  1109. vdev->dev.release = vme_dev_release;
  1110. vdev->dev.parent = bridge->parent;
  1111. vdev->dev.bus = &vme_bus_type;
  1112. dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
  1113. vdev->num);
  1114. err = device_register(&vdev->dev);
  1115. if (err)
  1116. goto err_reg;
  1117. if (vdev->dev.platform_data) {
  1118. list_add_tail(&vdev->drv_list, &drv->devices);
  1119. list_add_tail(&vdev->bridge_list, &bridge->devices);
  1120. } else
  1121. device_unregister(&vdev->dev);
  1122. }
  1123. return 0;
  1124. err_reg:
  1125. put_device(&vdev->dev);
  1126. kfree(vdev);
  1127. err_devalloc:
  1128. list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
  1129. list_del(&vdev->drv_list);
  1130. list_del(&vdev->bridge_list);
  1131. device_unregister(&vdev->dev);
  1132. }
  1133. return err;
  1134. }
  1135. static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1136. {
  1137. struct vme_bridge *bridge;
  1138. int err = 0;
  1139. mutex_lock(&vme_buses_lock);
  1140. list_for_each_entry(bridge, &vme_bus_list, bus_list) {
  1141. /*
  1142. * This cannot cause trouble as we already have vme_buses_lock
  1143. * and if the bridge is removed, it will have to go through
  1144. * vme_unregister_bridge() to do it (which calls remove() on
  1145. * the bridge which in turn tries to acquire vme_buses_lock and
  1146. * will have to wait).
  1147. */
  1148. err = __vme_register_driver_bus(drv, bridge, ndevs);
  1149. if (err)
  1150. break;
  1151. }
  1152. mutex_unlock(&vme_buses_lock);
  1153. return err;
  1154. }
  1155. int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1156. {
  1157. int err;
  1158. drv->driver.name = drv->name;
  1159. drv->driver.bus = &vme_bus_type;
  1160. INIT_LIST_HEAD(&drv->devices);
  1161. err = driver_register(&drv->driver);
  1162. if (err)
  1163. return err;
  1164. err = __vme_register_driver(drv, ndevs);
  1165. if (err)
  1166. driver_unregister(&drv->driver);
  1167. return err;
  1168. }
  1169. EXPORT_SYMBOL(vme_register_driver);
  1170. void vme_unregister_driver(struct vme_driver *drv)
  1171. {
  1172. struct vme_dev *dev, *dev_tmp;
  1173. mutex_lock(&vme_buses_lock);
  1174. list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
  1175. list_del(&dev->drv_list);
  1176. list_del(&dev->bridge_list);
  1177. device_unregister(&dev->dev);
  1178. }
  1179. mutex_unlock(&vme_buses_lock);
  1180. driver_unregister(&drv->driver);
  1181. }
  1182. EXPORT_SYMBOL(vme_unregister_driver);
  1183. /* - Bus Registration ------------------------------------------------------ */
  1184. static int vme_bus_match(struct device *dev, struct device_driver *drv)
  1185. {
  1186. struct vme_driver *vme_drv;
  1187. vme_drv = container_of(drv, struct vme_driver, driver);
  1188. if (dev->platform_data == vme_drv) {
  1189. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1190. if (vme_drv->match && vme_drv->match(vdev))
  1191. return 1;
  1192. dev->platform_data = NULL;
  1193. }
  1194. return 0;
  1195. }
  1196. static int vme_bus_probe(struct device *dev)
  1197. {
  1198. int retval = -ENODEV;
  1199. struct vme_driver *driver;
  1200. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1201. driver = dev->platform_data;
  1202. if (driver->probe != NULL)
  1203. retval = driver->probe(vdev);
  1204. return retval;
  1205. }
  1206. static int vme_bus_remove(struct device *dev)
  1207. {
  1208. int retval = -ENODEV;
  1209. struct vme_driver *driver;
  1210. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1211. driver = dev->platform_data;
  1212. if (driver->remove != NULL)
  1213. retval = driver->remove(vdev);
  1214. return retval;
  1215. }
  1216. struct bus_type vme_bus_type = {
  1217. .name = "vme",
  1218. .match = vme_bus_match,
  1219. .probe = vme_bus_probe,
  1220. .remove = vme_bus_remove,
  1221. };
  1222. EXPORT_SYMBOL(vme_bus_type);
  1223. static int __init vme_init(void)
  1224. {
  1225. return bus_register(&vme_bus_type);
  1226. }
  1227. static void __exit vme_exit(void)
  1228. {
  1229. bus_unregister(&vme_bus_type);
  1230. }
  1231. MODULE_DESCRIPTION("VME bridge driver framework");
  1232. MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
  1233. MODULE_LICENSE("GPL");
  1234. module_init(vme_init);
  1235. module_exit(vme_exit);