vme.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518
  1. /*
  2. * VME Bridge Framework
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/highmem.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/mutex.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/slab.h>
  32. #include <linux/vme.h>
  33. #include "vme_bridge.h"
  34. /* Bitmask and list of registered buses both protected by common mutex */
  35. static unsigned int vme_bus_numbers;
  36. static LIST_HEAD(vme_bus_list);
  37. static DEFINE_MUTEX(vme_buses_lock);
  38. static void __exit vme_exit(void);
  39. static int __init vme_init(void);
  40. static struct vme_dev *dev_to_vme_dev(struct device *dev)
  41. {
  42. return container_of(dev, struct vme_dev, dev);
  43. }
  44. /*
  45. * Find the bridge that the resource is associated with.
  46. */
  47. static struct vme_bridge *find_bridge(struct vme_resource *resource)
  48. {
  49. /* Get list to search */
  50. switch (resource->type) {
  51. case VME_MASTER:
  52. return list_entry(resource->entry, struct vme_master_resource,
  53. list)->parent;
  54. break;
  55. case VME_SLAVE:
  56. return list_entry(resource->entry, struct vme_slave_resource,
  57. list)->parent;
  58. break;
  59. case VME_DMA:
  60. return list_entry(resource->entry, struct vme_dma_resource,
  61. list)->parent;
  62. break;
  63. case VME_LM:
  64. return list_entry(resource->entry, struct vme_lm_resource,
  65. list)->parent;
  66. break;
  67. default:
  68. printk(KERN_ERR "Unknown resource type\n");
  69. return NULL;
  70. break;
  71. }
  72. }
  73. /*
  74. * Allocate a contiguous block of memory for use by the driver. This is used to
  75. * create the buffers for the slave windows.
  76. */
  77. void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  78. dma_addr_t *dma)
  79. {
  80. struct vme_bridge *bridge;
  81. if (resource == NULL) {
  82. printk(KERN_ERR "No resource\n");
  83. return NULL;
  84. }
  85. bridge = find_bridge(resource);
  86. if (bridge == NULL) {
  87. printk(KERN_ERR "Can't find bridge\n");
  88. return NULL;
  89. }
  90. if (bridge->parent == NULL) {
  91. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  92. return NULL;
  93. }
  94. if (bridge->alloc_consistent == NULL) {
  95. printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
  96. bridge->name);
  97. return NULL;
  98. }
  99. return bridge->alloc_consistent(bridge->parent, size, dma);
  100. }
  101. EXPORT_SYMBOL(vme_alloc_consistent);
  102. /*
  103. * Free previously allocated contiguous block of memory.
  104. */
  105. void vme_free_consistent(struct vme_resource *resource, size_t size,
  106. void *vaddr, dma_addr_t dma)
  107. {
  108. struct vme_bridge *bridge;
  109. if (resource == NULL) {
  110. printk(KERN_ERR "No resource\n");
  111. return;
  112. }
  113. bridge = find_bridge(resource);
  114. if (bridge == NULL) {
  115. printk(KERN_ERR "Can't find bridge\n");
  116. return;
  117. }
  118. if (bridge->parent == NULL) {
  119. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  120. return;
  121. }
  122. if (bridge->free_consistent == NULL) {
  123. printk(KERN_ERR "free_consistent not supported by bridge %s\n",
  124. bridge->name);
  125. return;
  126. }
  127. bridge->free_consistent(bridge->parent, size, vaddr, dma);
  128. }
  129. EXPORT_SYMBOL(vme_free_consistent);
  130. size_t vme_get_size(struct vme_resource *resource)
  131. {
  132. int enabled, retval;
  133. unsigned long long base, size;
  134. dma_addr_t buf_base;
  135. u32 aspace, cycle, dwidth;
  136. switch (resource->type) {
  137. case VME_MASTER:
  138. retval = vme_master_get(resource, &enabled, &base, &size,
  139. &aspace, &cycle, &dwidth);
  140. return size;
  141. break;
  142. case VME_SLAVE:
  143. retval = vme_slave_get(resource, &enabled, &base, &size,
  144. &buf_base, &aspace, &cycle);
  145. return size;
  146. break;
  147. case VME_DMA:
  148. return 0;
  149. break;
  150. default:
  151. printk(KERN_ERR "Unknown resource type\n");
  152. return 0;
  153. break;
  154. }
  155. }
  156. EXPORT_SYMBOL(vme_get_size);
  157. static int vme_check_window(u32 aspace, unsigned long long vme_base,
  158. unsigned long long size)
  159. {
  160. int retval = 0;
  161. switch (aspace) {
  162. case VME_A16:
  163. if (((vme_base + size) > VME_A16_MAX) ||
  164. (vme_base > VME_A16_MAX))
  165. retval = -EFAULT;
  166. break;
  167. case VME_A24:
  168. if (((vme_base + size) > VME_A24_MAX) ||
  169. (vme_base > VME_A24_MAX))
  170. retval = -EFAULT;
  171. break;
  172. case VME_A32:
  173. if (((vme_base + size) > VME_A32_MAX) ||
  174. (vme_base > VME_A32_MAX))
  175. retval = -EFAULT;
  176. break;
  177. case VME_A64:
  178. /*
  179. * Any value held in an unsigned long long can be used as the
  180. * base
  181. */
  182. break;
  183. case VME_CRCSR:
  184. if (((vme_base + size) > VME_CRCSR_MAX) ||
  185. (vme_base > VME_CRCSR_MAX))
  186. retval = -EFAULT;
  187. break;
  188. case VME_USER1:
  189. case VME_USER2:
  190. case VME_USER3:
  191. case VME_USER4:
  192. /* User Defined */
  193. break;
  194. default:
  195. printk(KERN_ERR "Invalid address space\n");
  196. retval = -EINVAL;
  197. break;
  198. }
  199. return retval;
  200. }
  201. /*
  202. * Request a slave image with specific attributes, return some unique
  203. * identifier.
  204. */
  205. struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
  206. u32 cycle)
  207. {
  208. struct vme_bridge *bridge;
  209. struct list_head *slave_pos = NULL;
  210. struct vme_slave_resource *allocated_image = NULL;
  211. struct vme_slave_resource *slave_image = NULL;
  212. struct vme_resource *resource = NULL;
  213. bridge = vdev->bridge;
  214. if (bridge == NULL) {
  215. printk(KERN_ERR "Can't find VME bus\n");
  216. goto err_bus;
  217. }
  218. /* Loop through slave resources */
  219. list_for_each(slave_pos, &bridge->slave_resources) {
  220. slave_image = list_entry(slave_pos,
  221. struct vme_slave_resource, list);
  222. if (slave_image == NULL) {
  223. printk(KERN_ERR "Registered NULL Slave resource\n");
  224. continue;
  225. }
  226. /* Find an unlocked and compatible image */
  227. mutex_lock(&slave_image->mtx);
  228. if (((slave_image->address_attr & address) == address) &&
  229. ((slave_image->cycle_attr & cycle) == cycle) &&
  230. (slave_image->locked == 0)) {
  231. slave_image->locked = 1;
  232. mutex_unlock(&slave_image->mtx);
  233. allocated_image = slave_image;
  234. break;
  235. }
  236. mutex_unlock(&slave_image->mtx);
  237. }
  238. /* No free image */
  239. if (allocated_image == NULL)
  240. goto err_image;
  241. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  242. if (resource == NULL) {
  243. printk(KERN_WARNING "Unable to allocate resource structure\n");
  244. goto err_alloc;
  245. }
  246. resource->type = VME_SLAVE;
  247. resource->entry = &allocated_image->list;
  248. return resource;
  249. err_alloc:
  250. /* Unlock image */
  251. mutex_lock(&slave_image->mtx);
  252. slave_image->locked = 0;
  253. mutex_unlock(&slave_image->mtx);
  254. err_image:
  255. err_bus:
  256. return NULL;
  257. }
  258. EXPORT_SYMBOL(vme_slave_request);
  259. int vme_slave_set(struct vme_resource *resource, int enabled,
  260. unsigned long long vme_base, unsigned long long size,
  261. dma_addr_t buf_base, u32 aspace, u32 cycle)
  262. {
  263. struct vme_bridge *bridge = find_bridge(resource);
  264. struct vme_slave_resource *image;
  265. int retval;
  266. if (resource->type != VME_SLAVE) {
  267. printk(KERN_ERR "Not a slave resource\n");
  268. return -EINVAL;
  269. }
  270. image = list_entry(resource->entry, struct vme_slave_resource, list);
  271. if (bridge->slave_set == NULL) {
  272. printk(KERN_ERR "Function not supported\n");
  273. return -ENOSYS;
  274. }
  275. if (!(((image->address_attr & aspace) == aspace) &&
  276. ((image->cycle_attr & cycle) == cycle))) {
  277. printk(KERN_ERR "Invalid attributes\n");
  278. return -EINVAL;
  279. }
  280. retval = vme_check_window(aspace, vme_base, size);
  281. if (retval)
  282. return retval;
  283. return bridge->slave_set(image, enabled, vme_base, size, buf_base,
  284. aspace, cycle);
  285. }
  286. EXPORT_SYMBOL(vme_slave_set);
  287. int vme_slave_get(struct vme_resource *resource, int *enabled,
  288. unsigned long long *vme_base, unsigned long long *size,
  289. dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
  290. {
  291. struct vme_bridge *bridge = find_bridge(resource);
  292. struct vme_slave_resource *image;
  293. if (resource->type != VME_SLAVE) {
  294. printk(KERN_ERR "Not a slave resource\n");
  295. return -EINVAL;
  296. }
  297. image = list_entry(resource->entry, struct vme_slave_resource, list);
  298. if (bridge->slave_get == NULL) {
  299. printk(KERN_ERR "vme_slave_get not supported\n");
  300. return -EINVAL;
  301. }
  302. return bridge->slave_get(image, enabled, vme_base, size, buf_base,
  303. aspace, cycle);
  304. }
  305. EXPORT_SYMBOL(vme_slave_get);
  306. void vme_slave_free(struct vme_resource *resource)
  307. {
  308. struct vme_slave_resource *slave_image;
  309. if (resource->type != VME_SLAVE) {
  310. printk(KERN_ERR "Not a slave resource\n");
  311. return;
  312. }
  313. slave_image = list_entry(resource->entry, struct vme_slave_resource,
  314. list);
  315. if (slave_image == NULL) {
  316. printk(KERN_ERR "Can't find slave resource\n");
  317. return;
  318. }
  319. /* Unlock image */
  320. mutex_lock(&slave_image->mtx);
  321. if (slave_image->locked == 0)
  322. printk(KERN_ERR "Image is already free\n");
  323. slave_image->locked = 0;
  324. mutex_unlock(&slave_image->mtx);
  325. /* Free up resource memory */
  326. kfree(resource);
  327. }
  328. EXPORT_SYMBOL(vme_slave_free);
  329. /*
  330. * Request a master image with specific attributes, return some unique
  331. * identifier.
  332. */
  333. struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
  334. u32 cycle, u32 dwidth)
  335. {
  336. struct vme_bridge *bridge;
  337. struct list_head *master_pos = NULL;
  338. struct vme_master_resource *allocated_image = NULL;
  339. struct vme_master_resource *master_image = NULL;
  340. struct vme_resource *resource = NULL;
  341. bridge = vdev->bridge;
  342. if (bridge == NULL) {
  343. printk(KERN_ERR "Can't find VME bus\n");
  344. goto err_bus;
  345. }
  346. /* Loop through master resources */
  347. list_for_each(master_pos, &bridge->master_resources) {
  348. master_image = list_entry(master_pos,
  349. struct vme_master_resource, list);
  350. if (master_image == NULL) {
  351. printk(KERN_WARNING "Registered NULL master resource\n");
  352. continue;
  353. }
  354. /* Find an unlocked and compatible image */
  355. spin_lock(&master_image->lock);
  356. if (((master_image->address_attr & address) == address) &&
  357. ((master_image->cycle_attr & cycle) == cycle) &&
  358. ((master_image->width_attr & dwidth) == dwidth) &&
  359. (master_image->locked == 0)) {
  360. master_image->locked = 1;
  361. spin_unlock(&master_image->lock);
  362. allocated_image = master_image;
  363. break;
  364. }
  365. spin_unlock(&master_image->lock);
  366. }
  367. /* Check to see if we found a resource */
  368. if (allocated_image == NULL) {
  369. printk(KERN_ERR "Can't find a suitable resource\n");
  370. goto err_image;
  371. }
  372. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  373. if (resource == NULL) {
  374. printk(KERN_ERR "Unable to allocate resource structure\n");
  375. goto err_alloc;
  376. }
  377. resource->type = VME_MASTER;
  378. resource->entry = &allocated_image->list;
  379. return resource;
  380. err_alloc:
  381. /* Unlock image */
  382. spin_lock(&master_image->lock);
  383. master_image->locked = 0;
  384. spin_unlock(&master_image->lock);
  385. err_image:
  386. err_bus:
  387. return NULL;
  388. }
  389. EXPORT_SYMBOL(vme_master_request);
  390. int vme_master_set(struct vme_resource *resource, int enabled,
  391. unsigned long long vme_base, unsigned long long size, u32 aspace,
  392. u32 cycle, u32 dwidth)
  393. {
  394. struct vme_bridge *bridge = find_bridge(resource);
  395. struct vme_master_resource *image;
  396. int retval;
  397. if (resource->type != VME_MASTER) {
  398. printk(KERN_ERR "Not a master resource\n");
  399. return -EINVAL;
  400. }
  401. image = list_entry(resource->entry, struct vme_master_resource, list);
  402. if (bridge->master_set == NULL) {
  403. printk(KERN_WARNING "vme_master_set not supported\n");
  404. return -EINVAL;
  405. }
  406. if (!(((image->address_attr & aspace) == aspace) &&
  407. ((image->cycle_attr & cycle) == cycle) &&
  408. ((image->width_attr & dwidth) == dwidth))) {
  409. printk(KERN_WARNING "Invalid attributes\n");
  410. return -EINVAL;
  411. }
  412. retval = vme_check_window(aspace, vme_base, size);
  413. if (retval)
  414. return retval;
  415. return bridge->master_set(image, enabled, vme_base, size, aspace,
  416. cycle, dwidth);
  417. }
  418. EXPORT_SYMBOL(vme_master_set);
  419. int vme_master_get(struct vme_resource *resource, int *enabled,
  420. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  421. u32 *cycle, u32 *dwidth)
  422. {
  423. struct vme_bridge *bridge = find_bridge(resource);
  424. struct vme_master_resource *image;
  425. if (resource->type != VME_MASTER) {
  426. printk(KERN_ERR "Not a master resource\n");
  427. return -EINVAL;
  428. }
  429. image = list_entry(resource->entry, struct vme_master_resource, list);
  430. if (bridge->master_get == NULL) {
  431. printk(KERN_WARNING "vme_master_set not supported\n");
  432. return -EINVAL;
  433. }
  434. return bridge->master_get(image, enabled, vme_base, size, aspace,
  435. cycle, dwidth);
  436. }
  437. EXPORT_SYMBOL(vme_master_get);
  438. /*
  439. * Read data out of VME space into a buffer.
  440. */
  441. ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
  442. loff_t offset)
  443. {
  444. struct vme_bridge *bridge = find_bridge(resource);
  445. struct vme_master_resource *image;
  446. size_t length;
  447. if (bridge->master_read == NULL) {
  448. printk(KERN_WARNING "Reading from resource not supported\n");
  449. return -EINVAL;
  450. }
  451. if (resource->type != VME_MASTER) {
  452. printk(KERN_ERR "Not a master resource\n");
  453. return -EINVAL;
  454. }
  455. image = list_entry(resource->entry, struct vme_master_resource, list);
  456. length = vme_get_size(resource);
  457. if (offset > length) {
  458. printk(KERN_WARNING "Invalid Offset\n");
  459. return -EFAULT;
  460. }
  461. if ((offset + count) > length)
  462. count = length - offset;
  463. return bridge->master_read(image, buf, count, offset);
  464. }
  465. EXPORT_SYMBOL(vme_master_read);
  466. /*
  467. * Write data out to VME space from a buffer.
  468. */
  469. ssize_t vme_master_write(struct vme_resource *resource, void *buf,
  470. size_t count, loff_t offset)
  471. {
  472. struct vme_bridge *bridge = find_bridge(resource);
  473. struct vme_master_resource *image;
  474. size_t length;
  475. if (bridge->master_write == NULL) {
  476. printk(KERN_WARNING "Writing to resource not supported\n");
  477. return -EINVAL;
  478. }
  479. if (resource->type != VME_MASTER) {
  480. printk(KERN_ERR "Not a master resource\n");
  481. return -EINVAL;
  482. }
  483. image = list_entry(resource->entry, struct vme_master_resource, list);
  484. length = vme_get_size(resource);
  485. if (offset > length) {
  486. printk(KERN_WARNING "Invalid Offset\n");
  487. return -EFAULT;
  488. }
  489. if ((offset + count) > length)
  490. count = length - offset;
  491. return bridge->master_write(image, buf, count, offset);
  492. }
  493. EXPORT_SYMBOL(vme_master_write);
  494. /*
  495. * Perform RMW cycle to provided location.
  496. */
  497. unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
  498. unsigned int compare, unsigned int swap, loff_t offset)
  499. {
  500. struct vme_bridge *bridge = find_bridge(resource);
  501. struct vme_master_resource *image;
  502. if (bridge->master_rmw == NULL) {
  503. printk(KERN_WARNING "Writing to resource not supported\n");
  504. return -EINVAL;
  505. }
  506. if (resource->type != VME_MASTER) {
  507. printk(KERN_ERR "Not a master resource\n");
  508. return -EINVAL;
  509. }
  510. image = list_entry(resource->entry, struct vme_master_resource, list);
  511. return bridge->master_rmw(image, mask, compare, swap, offset);
  512. }
  513. EXPORT_SYMBOL(vme_master_rmw);
  514. void vme_master_free(struct vme_resource *resource)
  515. {
  516. struct vme_master_resource *master_image;
  517. if (resource->type != VME_MASTER) {
  518. printk(KERN_ERR "Not a master resource\n");
  519. return;
  520. }
  521. master_image = list_entry(resource->entry, struct vme_master_resource,
  522. list);
  523. if (master_image == NULL) {
  524. printk(KERN_ERR "Can't find master resource\n");
  525. return;
  526. }
  527. /* Unlock image */
  528. spin_lock(&master_image->lock);
  529. if (master_image->locked == 0)
  530. printk(KERN_ERR "Image is already free\n");
  531. master_image->locked = 0;
  532. spin_unlock(&master_image->lock);
  533. /* Free up resource memory */
  534. kfree(resource);
  535. }
  536. EXPORT_SYMBOL(vme_master_free);
  537. /*
  538. * Request a DMA controller with specific attributes, return some unique
  539. * identifier.
  540. */
  541. struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
  542. {
  543. struct vme_bridge *bridge;
  544. struct list_head *dma_pos = NULL;
  545. struct vme_dma_resource *allocated_ctrlr = NULL;
  546. struct vme_dma_resource *dma_ctrlr = NULL;
  547. struct vme_resource *resource = NULL;
  548. /* XXX Not checking resource attributes */
  549. printk(KERN_ERR "No VME resource Attribute tests done\n");
  550. bridge = vdev->bridge;
  551. if (bridge == NULL) {
  552. printk(KERN_ERR "Can't find VME bus\n");
  553. goto err_bus;
  554. }
  555. /* Loop through DMA resources */
  556. list_for_each(dma_pos, &bridge->dma_resources) {
  557. dma_ctrlr = list_entry(dma_pos,
  558. struct vme_dma_resource, list);
  559. if (dma_ctrlr == NULL) {
  560. printk(KERN_ERR "Registered NULL DMA resource\n");
  561. continue;
  562. }
  563. /* Find an unlocked and compatible controller */
  564. mutex_lock(&dma_ctrlr->mtx);
  565. if (((dma_ctrlr->route_attr & route) == route) &&
  566. (dma_ctrlr->locked == 0)) {
  567. dma_ctrlr->locked = 1;
  568. mutex_unlock(&dma_ctrlr->mtx);
  569. allocated_ctrlr = dma_ctrlr;
  570. break;
  571. }
  572. mutex_unlock(&dma_ctrlr->mtx);
  573. }
  574. /* Check to see if we found a resource */
  575. if (allocated_ctrlr == NULL)
  576. goto err_ctrlr;
  577. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  578. if (resource == NULL) {
  579. printk(KERN_WARNING "Unable to allocate resource structure\n");
  580. goto err_alloc;
  581. }
  582. resource->type = VME_DMA;
  583. resource->entry = &allocated_ctrlr->list;
  584. return resource;
  585. err_alloc:
  586. /* Unlock image */
  587. mutex_lock(&dma_ctrlr->mtx);
  588. dma_ctrlr->locked = 0;
  589. mutex_unlock(&dma_ctrlr->mtx);
  590. err_ctrlr:
  591. err_bus:
  592. return NULL;
  593. }
  594. EXPORT_SYMBOL(vme_dma_request);
  595. /*
  596. * Start new list
  597. */
  598. struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
  599. {
  600. struct vme_dma_resource *ctrlr;
  601. struct vme_dma_list *dma_list;
  602. if (resource->type != VME_DMA) {
  603. printk(KERN_ERR "Not a DMA resource\n");
  604. return NULL;
  605. }
  606. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  607. dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
  608. if (dma_list == NULL) {
  609. printk(KERN_ERR "Unable to allocate memory for new dma list\n");
  610. return NULL;
  611. }
  612. INIT_LIST_HEAD(&dma_list->entries);
  613. dma_list->parent = ctrlr;
  614. mutex_init(&dma_list->mtx);
  615. return dma_list;
  616. }
  617. EXPORT_SYMBOL(vme_new_dma_list);
  618. /*
  619. * Create "Pattern" type attributes
  620. */
  621. struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
  622. {
  623. struct vme_dma_attr *attributes;
  624. struct vme_dma_pattern *pattern_attr;
  625. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  626. if (attributes == NULL) {
  627. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  628. goto err_attr;
  629. }
  630. pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
  631. if (pattern_attr == NULL) {
  632. printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
  633. goto err_pat;
  634. }
  635. attributes->type = VME_DMA_PATTERN;
  636. attributes->private = (void *)pattern_attr;
  637. pattern_attr->pattern = pattern;
  638. pattern_attr->type = type;
  639. return attributes;
  640. err_pat:
  641. kfree(attributes);
  642. err_attr:
  643. return NULL;
  644. }
  645. EXPORT_SYMBOL(vme_dma_pattern_attribute);
  646. /*
  647. * Create "PCI" type attributes
  648. */
  649. struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
  650. {
  651. struct vme_dma_attr *attributes;
  652. struct vme_dma_pci *pci_attr;
  653. /* XXX Run some sanity checks here */
  654. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  655. if (attributes == NULL) {
  656. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  657. goto err_attr;
  658. }
  659. pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
  660. if (pci_attr == NULL) {
  661. printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
  662. goto err_pci;
  663. }
  664. attributes->type = VME_DMA_PCI;
  665. attributes->private = (void *)pci_attr;
  666. pci_attr->address = address;
  667. return attributes;
  668. err_pci:
  669. kfree(attributes);
  670. err_attr:
  671. return NULL;
  672. }
  673. EXPORT_SYMBOL(vme_dma_pci_attribute);
  674. /*
  675. * Create "VME" type attributes
  676. */
  677. struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
  678. u32 aspace, u32 cycle, u32 dwidth)
  679. {
  680. struct vme_dma_attr *attributes;
  681. struct vme_dma_vme *vme_attr;
  682. attributes = kmalloc(
  683. sizeof(struct vme_dma_attr), GFP_KERNEL);
  684. if (attributes == NULL) {
  685. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  686. goto err_attr;
  687. }
  688. vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
  689. if (vme_attr == NULL) {
  690. printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
  691. goto err_vme;
  692. }
  693. attributes->type = VME_DMA_VME;
  694. attributes->private = (void *)vme_attr;
  695. vme_attr->address = address;
  696. vme_attr->aspace = aspace;
  697. vme_attr->cycle = cycle;
  698. vme_attr->dwidth = dwidth;
  699. return attributes;
  700. err_vme:
  701. kfree(attributes);
  702. err_attr:
  703. return NULL;
  704. }
  705. EXPORT_SYMBOL(vme_dma_vme_attribute);
  706. /*
  707. * Free attribute
  708. */
  709. void vme_dma_free_attribute(struct vme_dma_attr *attributes)
  710. {
  711. kfree(attributes->private);
  712. kfree(attributes);
  713. }
  714. EXPORT_SYMBOL(vme_dma_free_attribute);
  715. int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
  716. struct vme_dma_attr *dest, size_t count)
  717. {
  718. struct vme_bridge *bridge = list->parent->parent;
  719. int retval;
  720. if (bridge->dma_list_add == NULL) {
  721. printk(KERN_WARNING "Link List DMA generation not supported\n");
  722. return -EINVAL;
  723. }
  724. if (!mutex_trylock(&list->mtx)) {
  725. printk(KERN_ERR "Link List already submitted\n");
  726. return -EINVAL;
  727. }
  728. retval = bridge->dma_list_add(list, src, dest, count);
  729. mutex_unlock(&list->mtx);
  730. return retval;
  731. }
  732. EXPORT_SYMBOL(vme_dma_list_add);
  733. int vme_dma_list_exec(struct vme_dma_list *list)
  734. {
  735. struct vme_bridge *bridge = list->parent->parent;
  736. int retval;
  737. if (bridge->dma_list_exec == NULL) {
  738. printk(KERN_ERR "Link List DMA execution not supported\n");
  739. return -EINVAL;
  740. }
  741. mutex_lock(&list->mtx);
  742. retval = bridge->dma_list_exec(list);
  743. mutex_unlock(&list->mtx);
  744. return retval;
  745. }
  746. EXPORT_SYMBOL(vme_dma_list_exec);
  747. int vme_dma_list_free(struct vme_dma_list *list)
  748. {
  749. struct vme_bridge *bridge = list->parent->parent;
  750. int retval;
  751. if (bridge->dma_list_empty == NULL) {
  752. printk(KERN_WARNING "Emptying of Link Lists not supported\n");
  753. return -EINVAL;
  754. }
  755. if (!mutex_trylock(&list->mtx)) {
  756. printk(KERN_ERR "Link List in use\n");
  757. return -EINVAL;
  758. }
  759. /*
  760. * Empty out all of the entries from the dma list. We need to go to the
  761. * low level driver as dma entries are driver specific.
  762. */
  763. retval = bridge->dma_list_empty(list);
  764. if (retval) {
  765. printk(KERN_ERR "Unable to empty link-list entries\n");
  766. mutex_unlock(&list->mtx);
  767. return retval;
  768. }
  769. mutex_unlock(&list->mtx);
  770. kfree(list);
  771. return retval;
  772. }
  773. EXPORT_SYMBOL(vme_dma_list_free);
  774. int vme_dma_free(struct vme_resource *resource)
  775. {
  776. struct vme_dma_resource *ctrlr;
  777. if (resource->type != VME_DMA) {
  778. printk(KERN_ERR "Not a DMA resource\n");
  779. return -EINVAL;
  780. }
  781. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  782. if (!mutex_trylock(&ctrlr->mtx)) {
  783. printk(KERN_ERR "Resource busy, can't free\n");
  784. return -EBUSY;
  785. }
  786. if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
  787. printk(KERN_WARNING "Resource still processing transfers\n");
  788. mutex_unlock(&ctrlr->mtx);
  789. return -EBUSY;
  790. }
  791. ctrlr->locked = 0;
  792. mutex_unlock(&ctrlr->mtx);
  793. return 0;
  794. }
  795. EXPORT_SYMBOL(vme_dma_free);
  796. void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
  797. {
  798. void (*call)(int, int, void *);
  799. void *priv_data;
  800. call = bridge->irq[level - 1].callback[statid].func;
  801. priv_data = bridge->irq[level - 1].callback[statid].priv_data;
  802. if (call != NULL)
  803. call(level, statid, priv_data);
  804. else
  805. printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
  806. level, statid);
  807. }
  808. EXPORT_SYMBOL(vme_irq_handler);
  809. int vme_irq_request(struct vme_dev *vdev, int level, int statid,
  810. void (*callback)(int, int, void *),
  811. void *priv_data)
  812. {
  813. struct vme_bridge *bridge;
  814. bridge = vdev->bridge;
  815. if (bridge == NULL) {
  816. printk(KERN_ERR "Can't find VME bus\n");
  817. return -EINVAL;
  818. }
  819. if ((level < 1) || (level > 7)) {
  820. printk(KERN_ERR "Invalid interrupt level\n");
  821. return -EINVAL;
  822. }
  823. if (bridge->irq_set == NULL) {
  824. printk(KERN_ERR "Configuring interrupts not supported\n");
  825. return -EINVAL;
  826. }
  827. mutex_lock(&bridge->irq_mtx);
  828. if (bridge->irq[level - 1].callback[statid].func) {
  829. mutex_unlock(&bridge->irq_mtx);
  830. printk(KERN_WARNING "VME Interrupt already taken\n");
  831. return -EBUSY;
  832. }
  833. bridge->irq[level - 1].count++;
  834. bridge->irq[level - 1].callback[statid].priv_data = priv_data;
  835. bridge->irq[level - 1].callback[statid].func = callback;
  836. /* Enable IRQ level */
  837. bridge->irq_set(bridge, level, 1, 1);
  838. mutex_unlock(&bridge->irq_mtx);
  839. return 0;
  840. }
  841. EXPORT_SYMBOL(vme_irq_request);
  842. void vme_irq_free(struct vme_dev *vdev, int level, int statid)
  843. {
  844. struct vme_bridge *bridge;
  845. bridge = vdev->bridge;
  846. if (bridge == NULL) {
  847. printk(KERN_ERR "Can't find VME bus\n");
  848. return;
  849. }
  850. if ((level < 1) || (level > 7)) {
  851. printk(KERN_ERR "Invalid interrupt level\n");
  852. return;
  853. }
  854. if (bridge->irq_set == NULL) {
  855. printk(KERN_ERR "Configuring interrupts not supported\n");
  856. return;
  857. }
  858. mutex_lock(&bridge->irq_mtx);
  859. bridge->irq[level - 1].count--;
  860. /* Disable IRQ level if no more interrupts attached at this level*/
  861. if (bridge->irq[level - 1].count == 0)
  862. bridge->irq_set(bridge, level, 0, 1);
  863. bridge->irq[level - 1].callback[statid].func = NULL;
  864. bridge->irq[level - 1].callback[statid].priv_data = NULL;
  865. mutex_unlock(&bridge->irq_mtx);
  866. }
  867. EXPORT_SYMBOL(vme_irq_free);
  868. int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
  869. {
  870. struct vme_bridge *bridge;
  871. bridge = vdev->bridge;
  872. if (bridge == NULL) {
  873. printk(KERN_ERR "Can't find VME bus\n");
  874. return -EINVAL;
  875. }
  876. if ((level < 1) || (level > 7)) {
  877. printk(KERN_WARNING "Invalid interrupt level\n");
  878. return -EINVAL;
  879. }
  880. if (bridge->irq_generate == NULL) {
  881. printk(KERN_WARNING "Interrupt generation not supported\n");
  882. return -EINVAL;
  883. }
  884. return bridge->irq_generate(bridge, level, statid);
  885. }
  886. EXPORT_SYMBOL(vme_irq_generate);
  887. /*
  888. * Request the location monitor, return resource or NULL
  889. */
  890. struct vme_resource *vme_lm_request(struct vme_dev *vdev)
  891. {
  892. struct vme_bridge *bridge;
  893. struct list_head *lm_pos = NULL;
  894. struct vme_lm_resource *allocated_lm = NULL;
  895. struct vme_lm_resource *lm = NULL;
  896. struct vme_resource *resource = NULL;
  897. bridge = vdev->bridge;
  898. if (bridge == NULL) {
  899. printk(KERN_ERR "Can't find VME bus\n");
  900. goto err_bus;
  901. }
  902. /* Loop through DMA resources */
  903. list_for_each(lm_pos, &bridge->lm_resources) {
  904. lm = list_entry(lm_pos,
  905. struct vme_lm_resource, list);
  906. if (lm == NULL) {
  907. printk(KERN_ERR "Registered NULL Location Monitor resource\n");
  908. continue;
  909. }
  910. /* Find an unlocked controller */
  911. mutex_lock(&lm->mtx);
  912. if (lm->locked == 0) {
  913. lm->locked = 1;
  914. mutex_unlock(&lm->mtx);
  915. allocated_lm = lm;
  916. break;
  917. }
  918. mutex_unlock(&lm->mtx);
  919. }
  920. /* Check to see if we found a resource */
  921. if (allocated_lm == NULL)
  922. goto err_lm;
  923. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  924. if (resource == NULL) {
  925. printk(KERN_ERR "Unable to allocate resource structure\n");
  926. goto err_alloc;
  927. }
  928. resource->type = VME_LM;
  929. resource->entry = &allocated_lm->list;
  930. return resource;
  931. err_alloc:
  932. /* Unlock image */
  933. mutex_lock(&lm->mtx);
  934. lm->locked = 0;
  935. mutex_unlock(&lm->mtx);
  936. err_lm:
  937. err_bus:
  938. return NULL;
  939. }
  940. EXPORT_SYMBOL(vme_lm_request);
  941. int vme_lm_count(struct vme_resource *resource)
  942. {
  943. struct vme_lm_resource *lm;
  944. if (resource->type != VME_LM) {
  945. printk(KERN_ERR "Not a Location Monitor resource\n");
  946. return -EINVAL;
  947. }
  948. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  949. return lm->monitors;
  950. }
  951. EXPORT_SYMBOL(vme_lm_count);
  952. int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
  953. u32 aspace, u32 cycle)
  954. {
  955. struct vme_bridge *bridge = find_bridge(resource);
  956. struct vme_lm_resource *lm;
  957. if (resource->type != VME_LM) {
  958. printk(KERN_ERR "Not a Location Monitor resource\n");
  959. return -EINVAL;
  960. }
  961. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  962. if (bridge->lm_set == NULL) {
  963. printk(KERN_ERR "vme_lm_set not supported\n");
  964. return -EINVAL;
  965. }
  966. return bridge->lm_set(lm, lm_base, aspace, cycle);
  967. }
  968. EXPORT_SYMBOL(vme_lm_set);
  969. int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
  970. u32 *aspace, u32 *cycle)
  971. {
  972. struct vme_bridge *bridge = find_bridge(resource);
  973. struct vme_lm_resource *lm;
  974. if (resource->type != VME_LM) {
  975. printk(KERN_ERR "Not a Location Monitor resource\n");
  976. return -EINVAL;
  977. }
  978. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  979. if (bridge->lm_get == NULL) {
  980. printk(KERN_ERR "vme_lm_get not supported\n");
  981. return -EINVAL;
  982. }
  983. return bridge->lm_get(lm, lm_base, aspace, cycle);
  984. }
  985. EXPORT_SYMBOL(vme_lm_get);
  986. int vme_lm_attach(struct vme_resource *resource, int monitor,
  987. void (*callback)(int))
  988. {
  989. struct vme_bridge *bridge = find_bridge(resource);
  990. struct vme_lm_resource *lm;
  991. if (resource->type != VME_LM) {
  992. printk(KERN_ERR "Not a Location Monitor resource\n");
  993. return -EINVAL;
  994. }
  995. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  996. if (bridge->lm_attach == NULL) {
  997. printk(KERN_ERR "vme_lm_attach not supported\n");
  998. return -EINVAL;
  999. }
  1000. return bridge->lm_attach(lm, monitor, callback);
  1001. }
  1002. EXPORT_SYMBOL(vme_lm_attach);
  1003. int vme_lm_detach(struct vme_resource *resource, int monitor)
  1004. {
  1005. struct vme_bridge *bridge = find_bridge(resource);
  1006. struct vme_lm_resource *lm;
  1007. if (resource->type != VME_LM) {
  1008. printk(KERN_ERR "Not a Location Monitor resource\n");
  1009. return -EINVAL;
  1010. }
  1011. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1012. if (bridge->lm_detach == NULL) {
  1013. printk(KERN_ERR "vme_lm_detach not supported\n");
  1014. return -EINVAL;
  1015. }
  1016. return bridge->lm_detach(lm, monitor);
  1017. }
  1018. EXPORT_SYMBOL(vme_lm_detach);
  1019. void vme_lm_free(struct vme_resource *resource)
  1020. {
  1021. struct vme_lm_resource *lm;
  1022. if (resource->type != VME_LM) {
  1023. printk(KERN_ERR "Not a Location Monitor resource\n");
  1024. return;
  1025. }
  1026. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1027. mutex_lock(&lm->mtx);
  1028. /* XXX
  1029. * Check to see that there aren't any callbacks still attached, if
  1030. * there are we should probably be detaching them!
  1031. */
  1032. lm->locked = 0;
  1033. mutex_unlock(&lm->mtx);
  1034. kfree(resource);
  1035. }
  1036. EXPORT_SYMBOL(vme_lm_free);
  1037. int vme_slot_get(struct vme_dev *vdev)
  1038. {
  1039. struct vme_bridge *bridge;
  1040. bridge = vdev->bridge;
  1041. if (bridge == NULL) {
  1042. printk(KERN_ERR "Can't find VME bus\n");
  1043. return -EINVAL;
  1044. }
  1045. if (bridge->slot_get == NULL) {
  1046. printk(KERN_WARNING "vme_slot_get not supported\n");
  1047. return -EINVAL;
  1048. }
  1049. return bridge->slot_get(bridge);
  1050. }
  1051. EXPORT_SYMBOL(vme_slot_get);
  1052. /* - Bridge Registration --------------------------------------------------- */
  1053. static void vme_dev_release(struct device *dev)
  1054. {
  1055. kfree(dev_to_vme_dev(dev));
  1056. }
  1057. int vme_register_bridge(struct vme_bridge *bridge)
  1058. {
  1059. int i;
  1060. int ret = -1;
  1061. mutex_lock(&vme_buses_lock);
  1062. for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
  1063. if ((vme_bus_numbers & (1 << i)) == 0) {
  1064. vme_bus_numbers |= (1 << i);
  1065. bridge->num = i;
  1066. INIT_LIST_HEAD(&bridge->devices);
  1067. list_add_tail(&bridge->bus_list, &vme_bus_list);
  1068. ret = 0;
  1069. break;
  1070. }
  1071. }
  1072. mutex_unlock(&vme_buses_lock);
  1073. return ret;
  1074. }
  1075. EXPORT_SYMBOL(vme_register_bridge);
  1076. void vme_unregister_bridge(struct vme_bridge *bridge)
  1077. {
  1078. struct vme_dev *vdev;
  1079. struct vme_dev *tmp;
  1080. mutex_lock(&vme_buses_lock);
  1081. vme_bus_numbers &= ~(1 << bridge->num);
  1082. list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
  1083. list_del(&vdev->drv_list);
  1084. list_del(&vdev->bridge_list);
  1085. device_unregister(&vdev->dev);
  1086. }
  1087. list_del(&bridge->bus_list);
  1088. mutex_unlock(&vme_buses_lock);
  1089. }
  1090. EXPORT_SYMBOL(vme_unregister_bridge);
  1091. /* - Driver Registration --------------------------------------------------- */
  1092. static int __vme_register_driver_bus(struct vme_driver *drv,
  1093. struct vme_bridge *bridge, unsigned int ndevs)
  1094. {
  1095. int err;
  1096. unsigned int i;
  1097. struct vme_dev *vdev;
  1098. struct vme_dev *tmp;
  1099. for (i = 0; i < ndevs; i++) {
  1100. vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
  1101. if (!vdev) {
  1102. err = -ENOMEM;
  1103. goto err_devalloc;
  1104. }
  1105. vdev->num = i;
  1106. vdev->bridge = bridge;
  1107. vdev->dev.platform_data = drv;
  1108. vdev->dev.release = vme_dev_release;
  1109. vdev->dev.parent = bridge->parent;
  1110. vdev->dev.bus = &vme_bus_type;
  1111. dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
  1112. vdev->num);
  1113. err = device_register(&vdev->dev);
  1114. if (err)
  1115. goto err_reg;
  1116. if (vdev->dev.platform_data) {
  1117. list_add_tail(&vdev->drv_list, &drv->devices);
  1118. list_add_tail(&vdev->bridge_list, &bridge->devices);
  1119. } else
  1120. device_unregister(&vdev->dev);
  1121. }
  1122. return 0;
  1123. err_reg:
  1124. put_device(&vdev->dev);
  1125. kfree(vdev);
  1126. err_devalloc:
  1127. list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
  1128. list_del(&vdev->drv_list);
  1129. list_del(&vdev->bridge_list);
  1130. device_unregister(&vdev->dev);
  1131. }
  1132. return err;
  1133. }
  1134. static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1135. {
  1136. struct vme_bridge *bridge;
  1137. int err = 0;
  1138. mutex_lock(&vme_buses_lock);
  1139. list_for_each_entry(bridge, &vme_bus_list, bus_list) {
  1140. /*
  1141. * This cannot cause trouble as we already have vme_buses_lock
  1142. * and if the bridge is removed, it will have to go through
  1143. * vme_unregister_bridge() to do it (which calls remove() on
  1144. * the bridge which in turn tries to acquire vme_buses_lock and
  1145. * will have to wait).
  1146. */
  1147. err = __vme_register_driver_bus(drv, bridge, ndevs);
  1148. if (err)
  1149. break;
  1150. }
  1151. mutex_unlock(&vme_buses_lock);
  1152. return err;
  1153. }
  1154. int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1155. {
  1156. int err;
  1157. drv->driver.name = drv->name;
  1158. drv->driver.bus = &vme_bus_type;
  1159. INIT_LIST_HEAD(&drv->devices);
  1160. err = driver_register(&drv->driver);
  1161. if (err)
  1162. return err;
  1163. err = __vme_register_driver(drv, ndevs);
  1164. if (err)
  1165. driver_unregister(&drv->driver);
  1166. return err;
  1167. }
  1168. EXPORT_SYMBOL(vme_register_driver);
  1169. void vme_unregister_driver(struct vme_driver *drv)
  1170. {
  1171. struct vme_dev *dev, *dev_tmp;
  1172. mutex_lock(&vme_buses_lock);
  1173. list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
  1174. list_del(&dev->drv_list);
  1175. list_del(&dev->bridge_list);
  1176. device_unregister(&dev->dev);
  1177. }
  1178. mutex_unlock(&vme_buses_lock);
  1179. driver_unregister(&drv->driver);
  1180. }
  1181. EXPORT_SYMBOL(vme_unregister_driver);
  1182. /* - Bus Registration ------------------------------------------------------ */
  1183. static int vme_bus_match(struct device *dev, struct device_driver *drv)
  1184. {
  1185. struct vme_driver *vme_drv;
  1186. vme_drv = container_of(drv, struct vme_driver, driver);
  1187. if (dev->platform_data == vme_drv) {
  1188. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1189. if (vme_drv->match && vme_drv->match(vdev))
  1190. return 1;
  1191. dev->platform_data = NULL;
  1192. }
  1193. return 0;
  1194. }
  1195. static int vme_bus_probe(struct device *dev)
  1196. {
  1197. int retval = -ENODEV;
  1198. struct vme_driver *driver;
  1199. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1200. driver = dev->platform_data;
  1201. if (driver->probe != NULL)
  1202. retval = driver->probe(vdev);
  1203. return retval;
  1204. }
  1205. static int vme_bus_remove(struct device *dev)
  1206. {
  1207. int retval = -ENODEV;
  1208. struct vme_driver *driver;
  1209. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1210. driver = dev->platform_data;
  1211. if (driver->remove != NULL)
  1212. retval = driver->remove(vdev);
  1213. return retval;
  1214. }
  1215. struct bus_type vme_bus_type = {
  1216. .name = "vme",
  1217. .match = vme_bus_match,
  1218. .probe = vme_bus_probe,
  1219. .remove = vme_bus_remove,
  1220. };
  1221. EXPORT_SYMBOL(vme_bus_type);
  1222. static int __init vme_init(void)
  1223. {
  1224. return bus_register(&vme_bus_type);
  1225. }
  1226. static void __exit vme_exit(void)
  1227. {
  1228. bus_unregister(&vme_bus_type);
  1229. }
  1230. MODULE_DESCRIPTION("VME bridge driver framework");
  1231. MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
  1232. MODULE_LICENSE("GPL");
  1233. module_init(vme_init);
  1234. module_exit(vme_exit);