share.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025
  1. /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
  2. * Parallel-port resource manager code.
  3. *
  4. * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
  5. * Tim Waugh <tim@cyberelk.demon.co.uk>
  6. * Jose Renau <renau@acm.org>
  7. * Philip Blundell <philb@gnu.org>
  8. * Andrea Arcangeli
  9. *
  10. * based on work by Grant Guenther <grant@torque.net>
  11. * and Philip Blundell
  12. *
  13. * Any part of this program may be used in documents licensed under
  14. * the GNU Free Documentation License, Version 1.1 or any later version
  15. * published by the Free Software Foundation.
  16. */
  17. #undef PARPORT_DEBUG_SHARING /* undef for production */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/threads.h>
  21. #include <linux/parport.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ioport.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/sched.h>
  29. #include <linux/kmod.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/mutex.h>
  32. #include <asm/irq.h>
  33. #undef PARPORT_PARANOID
  34. #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
  35. unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
  36. int parport_default_spintime = DEFAULT_SPIN_TIME;
  37. static LIST_HEAD(portlist);
  38. static DEFINE_SPINLOCK(parportlist_lock);
  39. /* list of all allocated ports, sorted by ->number */
  40. static LIST_HEAD(all_ports);
  41. static DEFINE_SPINLOCK(full_list_lock);
  42. static LIST_HEAD(drivers);
  43. static DEFINE_MUTEX(registration_lock);
  44. /* What you can do to a port that's gone away.. */
  45. static void dead_write_lines (struct parport *p, unsigned char b){}
  46. static unsigned char dead_read_lines (struct parport *p) { return 0; }
  47. static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
  48. unsigned char c) { return 0; }
  49. static void dead_onearg (struct parport *p){}
  50. static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
  51. static void dead_state (struct parport *p, struct parport_state *s) { }
  52. static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
  53. { return 0; }
  54. static size_t dead_read (struct parport *p, void *b, size_t l, int f)
  55. { return 0; }
  56. static struct parport_operations dead_ops = {
  57. .write_data = dead_write_lines, /* data */
  58. .read_data = dead_read_lines,
  59. .write_control = dead_write_lines, /* control */
  60. .read_control = dead_read_lines,
  61. .frob_control = dead_frob_lines,
  62. .read_status = dead_read_lines, /* status */
  63. .enable_irq = dead_onearg, /* enable_irq */
  64. .disable_irq = dead_onearg, /* disable_irq */
  65. .data_forward = dead_onearg, /* data_forward */
  66. .data_reverse = dead_onearg, /* data_reverse */
  67. .init_state = dead_initstate, /* init_state */
  68. .save_state = dead_state,
  69. .restore_state = dead_state,
  70. .epp_write_data = dead_write, /* epp */
  71. .epp_read_data = dead_read,
  72. .epp_write_addr = dead_write,
  73. .epp_read_addr = dead_read,
  74. .ecp_write_data = dead_write, /* ecp */
  75. .ecp_read_data = dead_read,
  76. .ecp_write_addr = dead_write,
  77. .compat_write_data = dead_write, /* compat */
  78. .nibble_read_data = dead_read, /* nibble */
  79. .byte_read_data = dead_read, /* byte */
  80. .owner = NULL,
  81. };
  82. /* Call attach(port) for each registered driver. */
  83. static void attach_driver_chain(struct parport *port)
  84. {
  85. /* caller has exclusive registration_lock */
  86. struct parport_driver *drv;
  87. list_for_each_entry(drv, &drivers, list)
  88. drv->attach(port);
  89. }
  90. /* Call detach(port) for each registered driver. */
  91. static void detach_driver_chain(struct parport *port)
  92. {
  93. struct parport_driver *drv;
  94. /* caller has exclusive registration_lock */
  95. list_for_each_entry(drv, &drivers, list)
  96. drv->detach (port);
  97. }
  98. /* Ask kmod for some lowlevel drivers. */
  99. static void get_lowlevel_driver (void)
  100. {
  101. /* There is no actual module called this: you should set
  102. * up an alias for modutils. */
  103. request_module ("parport_lowlevel");
  104. }
  105. /**
  106. * parport_register_driver - register a parallel port device driver
  107. * @drv: structure describing the driver
  108. *
  109. * This can be called by a parallel port device driver in order
  110. * to receive notifications about ports being found in the
  111. * system, as well as ports no longer available.
  112. *
  113. * The @drv structure is allocated by the caller and must not be
  114. * deallocated until after calling parport_unregister_driver().
  115. *
  116. * The driver's attach() function may block. The port that
  117. * attach() is given will be valid for the duration of the
  118. * callback, but if the driver wants to take a copy of the
  119. * pointer it must call parport_get_port() to do so. Calling
  120. * parport_register_device() on that port will do this for you.
  121. *
  122. * The driver's detach() function may block. The port that
  123. * detach() is given will be valid for the duration of the
  124. * callback, but if the driver wants to take a copy of the
  125. * pointer it must call parport_get_port() to do so.
  126. *
  127. * Returns 0 on success. Currently it always succeeds.
  128. **/
  129. int parport_register_driver (struct parport_driver *drv)
  130. {
  131. struct parport *port;
  132. if (list_empty(&portlist))
  133. get_lowlevel_driver ();
  134. mutex_lock(&registration_lock);
  135. list_for_each_entry(port, &portlist, list)
  136. drv->attach(port);
  137. list_add(&drv->list, &drivers);
  138. mutex_unlock(&registration_lock);
  139. return 0;
  140. }
  141. /**
  142. * parport_unregister_driver - deregister a parallel port device driver
  143. * @drv: structure describing the driver that was given to
  144. * parport_register_driver()
  145. *
  146. * This should be called by a parallel port device driver that
  147. * has registered itself using parport_register_driver() when it
  148. * is about to be unloaded.
  149. *
  150. * When it returns, the driver's attach() routine will no longer
  151. * be called, and for each port that attach() was called for, the
  152. * detach() routine will have been called.
  153. *
  154. * All the driver's attach() and detach() calls are guaranteed to have
  155. * finished by the time this function returns.
  156. **/
  157. void parport_unregister_driver (struct parport_driver *drv)
  158. {
  159. struct parport *port;
  160. mutex_lock(&registration_lock);
  161. list_del_init(&drv->list);
  162. list_for_each_entry(port, &portlist, list)
  163. drv->detach(port);
  164. mutex_unlock(&registration_lock);
  165. }
  166. static void free_port (struct parport *port)
  167. {
  168. int d;
  169. spin_lock(&full_list_lock);
  170. list_del(&port->full_list);
  171. spin_unlock(&full_list_lock);
  172. for (d = 0; d < 5; d++) {
  173. kfree(port->probe_info[d].class_name);
  174. kfree(port->probe_info[d].mfr);
  175. kfree(port->probe_info[d].model);
  176. kfree(port->probe_info[d].cmdset);
  177. kfree(port->probe_info[d].description);
  178. }
  179. kfree(port->name);
  180. kfree(port);
  181. }
  182. /**
  183. * parport_get_port - increment a port's reference count
  184. * @port: the port
  185. *
  186. * This ensures that a struct parport pointer remains valid
  187. * until the matching parport_put_port() call.
  188. **/
  189. struct parport *parport_get_port (struct parport *port)
  190. {
  191. atomic_inc (&port->ref_count);
  192. return port;
  193. }
  194. /**
  195. * parport_put_port - decrement a port's reference count
  196. * @port: the port
  197. *
  198. * This should be called once for each call to parport_get_port(),
  199. * once the port is no longer needed.
  200. **/
  201. void parport_put_port (struct parport *port)
  202. {
  203. if (atomic_dec_and_test (&port->ref_count))
  204. /* Can destroy it now. */
  205. free_port (port);
  206. return;
  207. }
  208. /**
  209. * parport_register_port - register a parallel port
  210. * @base: base I/O address
  211. * @irq: IRQ line
  212. * @dma: DMA channel
  213. * @ops: pointer to the port driver's port operations structure
  214. *
  215. * When a parallel port (lowlevel) driver finds a port that
  216. * should be made available to parallel port device drivers, it
  217. * should call parport_register_port(). The @base, @irq, and
  218. * @dma parameters are for the convenience of port drivers, and
  219. * for ports where they aren't meaningful needn't be set to
  220. * anything special. They can be altered afterwards by adjusting
  221. * the relevant members of the parport structure that is returned
  222. * and represents the port. They should not be tampered with
  223. * after calling parport_announce_port, however.
  224. *
  225. * If there are parallel port device drivers in the system that
  226. * have registered themselves using parport_register_driver(),
  227. * they are not told about the port at this time; that is done by
  228. * parport_announce_port().
  229. *
  230. * The @ops structure is allocated by the caller, and must not be
  231. * deallocated before calling parport_remove_port().
  232. *
  233. * If there is no memory to allocate a new parport structure,
  234. * this function will return %NULL.
  235. **/
  236. struct parport *parport_register_port(unsigned long base, int irq, int dma,
  237. struct parport_operations *ops)
  238. {
  239. struct list_head *l;
  240. struct parport *tmp;
  241. int num;
  242. int device;
  243. char *name;
  244. tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
  245. if (!tmp) {
  246. printk(KERN_WARNING "parport: memory squeeze\n");
  247. return NULL;
  248. }
  249. /* Init our structure */
  250. memset(tmp, 0, sizeof(struct parport));
  251. tmp->base = base;
  252. tmp->irq = irq;
  253. tmp->dma = dma;
  254. tmp->muxport = tmp->daisy = tmp->muxsel = -1;
  255. tmp->modes = 0;
  256. INIT_LIST_HEAD(&tmp->list);
  257. tmp->devices = tmp->cad = NULL;
  258. tmp->flags = 0;
  259. tmp->ops = ops;
  260. tmp->physport = tmp;
  261. memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
  262. rwlock_init(&tmp->cad_lock);
  263. spin_lock_init(&tmp->waitlist_lock);
  264. spin_lock_init(&tmp->pardevice_lock);
  265. tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
  266. tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  267. init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
  268. tmp->spintime = parport_default_spintime;
  269. atomic_set (&tmp->ref_count, 1);
  270. INIT_LIST_HEAD(&tmp->full_list);
  271. name = kmalloc(15, GFP_KERNEL);
  272. if (!name) {
  273. printk(KERN_ERR "parport: memory squeeze\n");
  274. kfree(tmp);
  275. return NULL;
  276. }
  277. /* Search for the lowest free parport number. */
  278. spin_lock(&full_list_lock);
  279. for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
  280. struct parport *p = list_entry(l, struct parport, full_list);
  281. if (p->number != num)
  282. break;
  283. }
  284. tmp->portnum = tmp->number = num;
  285. list_add_tail(&tmp->full_list, l);
  286. spin_unlock(&full_list_lock);
  287. /*
  288. * Now that the portnum is known finish doing the Init.
  289. */
  290. sprintf(name, "parport%d", tmp->portnum = tmp->number);
  291. tmp->name = name;
  292. for (device = 0; device < 5; device++)
  293. /* assume the worst */
  294. tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
  295. tmp->waithead = tmp->waittail = NULL;
  296. return tmp;
  297. }
  298. /**
  299. * parport_announce_port - tell device drivers about a parallel port
  300. * @port: parallel port to announce
  301. *
  302. * After a port driver has registered a parallel port with
  303. * parport_register_port, and performed any necessary
  304. * initialisation or adjustments, it should call
  305. * parport_announce_port() in order to notify all device drivers
  306. * that have called parport_register_driver(). Their attach()
  307. * functions will be called, with @port as the parameter.
  308. **/
  309. void parport_announce_port (struct parport *port)
  310. {
  311. int i;
  312. #ifdef CONFIG_PARPORT_1284
  313. /* Analyse the IEEE1284.3 topology of the port. */
  314. parport_daisy_init(port);
  315. #endif
  316. if (!port->dev)
  317. printk(KERN_WARNING "%s: fix this legacy "
  318. "no-device port driver!\n",
  319. port->name);
  320. parport_proc_register(port);
  321. mutex_lock(&registration_lock);
  322. spin_lock_irq(&parportlist_lock);
  323. list_add_tail(&port->list, &portlist);
  324. for (i = 1; i < 3; i++) {
  325. struct parport *slave = port->slaves[i-1];
  326. if (slave)
  327. list_add_tail(&slave->list, &portlist);
  328. }
  329. spin_unlock_irq(&parportlist_lock);
  330. /* Let drivers know that new port(s) has arrived. */
  331. attach_driver_chain (port);
  332. for (i = 1; i < 3; i++) {
  333. struct parport *slave = port->slaves[i-1];
  334. if (slave)
  335. attach_driver_chain(slave);
  336. }
  337. mutex_unlock(&registration_lock);
  338. }
  339. /**
  340. * parport_remove_port - deregister a parallel port
  341. * @port: parallel port to deregister
  342. *
  343. * When a parallel port driver is forcibly unloaded, or a
  344. * parallel port becomes inaccessible, the port driver must call
  345. * this function in order to deal with device drivers that still
  346. * want to use it.
  347. *
  348. * The parport structure associated with the port has its
  349. * operations structure replaced with one containing 'null'
  350. * operations that return errors or just don't do anything.
  351. *
  352. * Any drivers that have registered themselves using
  353. * parport_register_driver() are notified that the port is no
  354. * longer accessible by having their detach() routines called
  355. * with @port as the parameter.
  356. **/
  357. void parport_remove_port(struct parport *port)
  358. {
  359. int i;
  360. mutex_lock(&registration_lock);
  361. /* Spread the word. */
  362. detach_driver_chain (port);
  363. #ifdef CONFIG_PARPORT_1284
  364. /* Forget the IEEE1284.3 topology of the port. */
  365. parport_daisy_fini(port);
  366. for (i = 1; i < 3; i++) {
  367. struct parport *slave = port->slaves[i-1];
  368. if (!slave)
  369. continue;
  370. detach_driver_chain(slave);
  371. parport_daisy_fini(slave);
  372. }
  373. #endif
  374. port->ops = &dead_ops;
  375. spin_lock(&parportlist_lock);
  376. list_del_init(&port->list);
  377. for (i = 1; i < 3; i++) {
  378. struct parport *slave = port->slaves[i-1];
  379. if (slave)
  380. list_del_init(&slave->list);
  381. }
  382. spin_unlock(&parportlist_lock);
  383. mutex_unlock(&registration_lock);
  384. parport_proc_unregister(port);
  385. for (i = 1; i < 3; i++) {
  386. struct parport *slave = port->slaves[i-1];
  387. if (slave)
  388. parport_put_port(slave);
  389. }
  390. }
  391. /**
  392. * parport_register_device - register a device on a parallel port
  393. * @port: port to which the device is attached
  394. * @name: a name to refer to the device
  395. * @pf: preemption callback
  396. * @kf: kick callback (wake-up)
  397. * @irq_func: interrupt handler
  398. * @flags: registration flags
  399. * @handle: data for callback functions
  400. *
  401. * This function, called by parallel port device drivers,
  402. * declares that a device is connected to a port, and tells the
  403. * system all it needs to know.
  404. *
  405. * The @name is allocated by the caller and must not be
  406. * deallocated until the caller calls @parport_unregister_device
  407. * for that device.
  408. *
  409. * The preemption callback function, @pf, is called when this
  410. * device driver has claimed access to the port but another
  411. * device driver wants to use it. It is given @handle as its
  412. * parameter, and should return zero if it is willing for the
  413. * system to release the port to another driver on its behalf.
  414. * If it wants to keep control of the port it should return
  415. * non-zero, and no action will be taken. It is good manners for
  416. * the driver to try to release the port at the earliest
  417. * opportunity after its preemption callback rejects a preemption
  418. * attempt. Note that if a preemption callback is happy for
  419. * preemption to go ahead, there is no need to release the port;
  420. * it is done automatically. This function may not block, as it
  421. * may be called from interrupt context. If the device driver
  422. * does not support preemption, @pf can be %NULL.
  423. *
  424. * The wake-up ("kick") callback function, @kf, is called when
  425. * the port is available to be claimed for exclusive access; that
  426. * is, parport_claim() is guaranteed to succeed when called from
  427. * inside the wake-up callback function. If the driver wants to
  428. * claim the port it should do so; otherwise, it need not take
  429. * any action. This function may not block, as it may be called
  430. * from interrupt context. If the device driver does not want to
  431. * be explicitly invited to claim the port in this way, @kf can
  432. * be %NULL.
  433. *
  434. * The interrupt handler, @irq_func, is called when an interrupt
  435. * arrives from the parallel port. Note that if a device driver
  436. * wants to use interrupts it should use parport_enable_irq(),
  437. * and can also check the irq member of the parport structure
  438. * representing the port.
  439. *
  440. * The parallel port (lowlevel) driver is the one that has called
  441. * request_irq() and whose interrupt handler is called first.
  442. * This handler does whatever needs to be done to the hardware to
  443. * acknowledge the interrupt (for PC-style ports there is nothing
  444. * special to be done). It then tells the IEEE 1284 code about
  445. * the interrupt, which may involve reacting to an IEEE 1284
  446. * event depending on the current IEEE 1284 phase. After this,
  447. * it calls @irq_func. Needless to say, @irq_func will be called
  448. * from interrupt context, and may not block.
  449. *
  450. * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
  451. * so should only be used when sharing the port with other device
  452. * drivers is impossible and would lead to incorrect behaviour.
  453. * Use it sparingly! Normally, @flags will be zero.
  454. *
  455. * This function returns a pointer to a structure that represents
  456. * the device on the port, or %NULL if there is not enough memory
  457. * to allocate space for that structure.
  458. **/
  459. struct pardevice *
  460. parport_register_device(struct parport *port, const char *name,
  461. int (*pf)(void *), void (*kf)(void *),
  462. void (*irq_func)(void *),
  463. int flags, void *handle)
  464. {
  465. struct pardevice *tmp;
  466. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  467. /* An exclusive device is registered. */
  468. printk (KERN_DEBUG "%s: no more devices allowed\n",
  469. port->name);
  470. return NULL;
  471. }
  472. if (flags & PARPORT_DEV_LURK) {
  473. if (!pf || !kf) {
  474. printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
  475. return NULL;
  476. }
  477. }
  478. /* We up our own module reference count, and that of the port
  479. on which a device is to be registered, to ensure that
  480. neither of us gets unloaded while we sleep in (e.g.)
  481. kmalloc.
  482. */
  483. if (!try_module_get(port->ops->owner)) {
  484. return NULL;
  485. }
  486. parport_get_port (port);
  487. tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
  488. if (tmp == NULL) {
  489. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  490. goto out;
  491. }
  492. tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
  493. if (tmp->state == NULL) {
  494. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  495. goto out_free_pardevice;
  496. }
  497. tmp->name = name;
  498. tmp->port = port;
  499. tmp->daisy = -1;
  500. tmp->preempt = pf;
  501. tmp->wakeup = kf;
  502. tmp->private = handle;
  503. tmp->flags = flags;
  504. tmp->irq_func = irq_func;
  505. tmp->waiting = 0;
  506. tmp->timeout = 5 * HZ;
  507. /* Chain this onto the list */
  508. tmp->prev = NULL;
  509. /*
  510. * This function must not run from an irq handler so we don' t need
  511. * to clear irq on the local CPU. -arca
  512. */
  513. spin_lock(&port->physport->pardevice_lock);
  514. if (flags & PARPORT_DEV_EXCL) {
  515. if (port->physport->devices) {
  516. spin_unlock (&port->physport->pardevice_lock);
  517. printk (KERN_DEBUG
  518. "%s: cannot grant exclusive access for "
  519. "device %s\n", port->name, name);
  520. goto out_free_all;
  521. }
  522. port->flags |= PARPORT_FLAG_EXCL;
  523. }
  524. tmp->next = port->physport->devices;
  525. wmb(); /* Make sure that tmp->next is written before it's
  526. added to the list; see comments marked 'no locking
  527. required' */
  528. if (port->physport->devices)
  529. port->physport->devices->prev = tmp;
  530. port->physport->devices = tmp;
  531. spin_unlock(&port->physport->pardevice_lock);
  532. init_waitqueue_head(&tmp->wait_q);
  533. tmp->timeslice = parport_default_timeslice;
  534. tmp->waitnext = tmp->waitprev = NULL;
  535. /*
  536. * This has to be run as last thing since init_state may need other
  537. * pardevice fields. -arca
  538. */
  539. port->ops->init_state(tmp, tmp->state);
  540. parport_device_proc_register(tmp);
  541. return tmp;
  542. out_free_all:
  543. kfree(tmp->state);
  544. out_free_pardevice:
  545. kfree(tmp);
  546. out:
  547. parport_put_port (port);
  548. module_put(port->ops->owner);
  549. return NULL;
  550. }
  551. /**
  552. * parport_unregister_device - deregister a device on a parallel port
  553. * @dev: pointer to structure representing device
  554. *
  555. * This undoes the effect of parport_register_device().
  556. **/
  557. void parport_unregister_device(struct pardevice *dev)
  558. {
  559. struct parport *port;
  560. #ifdef PARPORT_PARANOID
  561. if (dev == NULL) {
  562. printk(KERN_ERR "parport_unregister_device: passed NULL\n");
  563. return;
  564. }
  565. #endif
  566. parport_device_proc_unregister(dev);
  567. port = dev->port->physport;
  568. if (port->cad == dev) {
  569. printk(KERN_DEBUG "%s: %s forgot to release port\n",
  570. port->name, dev->name);
  571. parport_release (dev);
  572. }
  573. spin_lock(&port->pardevice_lock);
  574. if (dev->next)
  575. dev->next->prev = dev->prev;
  576. if (dev->prev)
  577. dev->prev->next = dev->next;
  578. else
  579. port->devices = dev->next;
  580. if (dev->flags & PARPORT_DEV_EXCL)
  581. port->flags &= ~PARPORT_FLAG_EXCL;
  582. spin_unlock(&port->pardevice_lock);
  583. /* Make sure we haven't left any pointers around in the wait
  584. * list. */
  585. spin_lock (&port->waitlist_lock);
  586. if (dev->waitprev || dev->waitnext || port->waithead == dev) {
  587. if (dev->waitprev)
  588. dev->waitprev->waitnext = dev->waitnext;
  589. else
  590. port->waithead = dev->waitnext;
  591. if (dev->waitnext)
  592. dev->waitnext->waitprev = dev->waitprev;
  593. else
  594. port->waittail = dev->waitprev;
  595. }
  596. spin_unlock (&port->waitlist_lock);
  597. kfree(dev->state);
  598. kfree(dev);
  599. module_put(port->ops->owner);
  600. parport_put_port (port);
  601. }
  602. /**
  603. * parport_find_number - find a parallel port by number
  604. * @number: parallel port number
  605. *
  606. * This returns the parallel port with the specified number, or
  607. * %NULL if there is none.
  608. *
  609. * There is an implicit parport_get_port() done already; to throw
  610. * away the reference to the port that parport_find_number()
  611. * gives you, use parport_put_port().
  612. */
  613. struct parport *parport_find_number (int number)
  614. {
  615. struct parport *port, *result = NULL;
  616. if (list_empty(&portlist))
  617. get_lowlevel_driver ();
  618. spin_lock (&parportlist_lock);
  619. list_for_each_entry(port, &portlist, list) {
  620. if (port->number == number) {
  621. result = parport_get_port (port);
  622. break;
  623. }
  624. }
  625. spin_unlock (&parportlist_lock);
  626. return result;
  627. }
  628. /**
  629. * parport_find_base - find a parallel port by base address
  630. * @base: base I/O address
  631. *
  632. * This returns the parallel port with the specified base
  633. * address, or %NULL if there is none.
  634. *
  635. * There is an implicit parport_get_port() done already; to throw
  636. * away the reference to the port that parport_find_base()
  637. * gives you, use parport_put_port().
  638. */
  639. struct parport *parport_find_base (unsigned long base)
  640. {
  641. struct parport *port, *result = NULL;
  642. if (list_empty(&portlist))
  643. get_lowlevel_driver ();
  644. spin_lock (&parportlist_lock);
  645. list_for_each_entry(port, &portlist, list) {
  646. if (port->base == base) {
  647. result = parport_get_port (port);
  648. break;
  649. }
  650. }
  651. spin_unlock (&parportlist_lock);
  652. return result;
  653. }
  654. /**
  655. * parport_claim - claim access to a parallel port device
  656. * @dev: pointer to structure representing a device on the port
  657. *
  658. * This function will not block and so can be used from interrupt
  659. * context. If parport_claim() succeeds in claiming access to
  660. * the port it returns zero and the port is available to use. It
  661. * may fail (returning non-zero) if the port is in use by another
  662. * driver and that driver is not willing to relinquish control of
  663. * the port.
  664. **/
  665. int parport_claim(struct pardevice *dev)
  666. {
  667. struct pardevice *oldcad;
  668. struct parport *port = dev->port->physport;
  669. unsigned long flags;
  670. if (port->cad == dev) {
  671. printk(KERN_INFO "%s: %s already owner\n",
  672. dev->port->name,dev->name);
  673. return 0;
  674. }
  675. /* Preempt any current device */
  676. write_lock_irqsave (&port->cad_lock, flags);
  677. if ((oldcad = port->cad) != NULL) {
  678. if (oldcad->preempt) {
  679. if (oldcad->preempt(oldcad->private))
  680. goto blocked;
  681. port->ops->save_state(port, dev->state);
  682. } else
  683. goto blocked;
  684. if (port->cad != oldcad) {
  685. /* I think we'll actually deadlock rather than
  686. get here, but just in case.. */
  687. printk(KERN_WARNING
  688. "%s: %s released port when preempted!\n",
  689. port->name, oldcad->name);
  690. if (port->cad)
  691. goto blocked;
  692. }
  693. }
  694. /* Can't fail from now on, so mark ourselves as no longer waiting. */
  695. if (dev->waiting & 1) {
  696. dev->waiting = 0;
  697. /* Take ourselves out of the wait list again. */
  698. spin_lock_irq (&port->waitlist_lock);
  699. if (dev->waitprev)
  700. dev->waitprev->waitnext = dev->waitnext;
  701. else
  702. port->waithead = dev->waitnext;
  703. if (dev->waitnext)
  704. dev->waitnext->waitprev = dev->waitprev;
  705. else
  706. port->waittail = dev->waitprev;
  707. spin_unlock_irq (&port->waitlist_lock);
  708. dev->waitprev = dev->waitnext = NULL;
  709. }
  710. /* Now we do the change of devices */
  711. port->cad = dev;
  712. #ifdef CONFIG_PARPORT_1284
  713. /* If it's a mux port, select it. */
  714. if (dev->port->muxport >= 0) {
  715. /* FIXME */
  716. port->muxsel = dev->port->muxport;
  717. }
  718. /* If it's a daisy chain device, select it. */
  719. if (dev->daisy >= 0) {
  720. /* This could be lazier. */
  721. if (!parport_daisy_select (port, dev->daisy,
  722. IEEE1284_MODE_COMPAT))
  723. port->daisy = dev->daisy;
  724. }
  725. #endif /* IEEE1284.3 support */
  726. /* Restore control registers */
  727. port->ops->restore_state(port, dev->state);
  728. write_unlock_irqrestore(&port->cad_lock, flags);
  729. dev->time = jiffies;
  730. return 0;
  731. blocked:
  732. /* If this is the first time we tried to claim the port, register an
  733. interest. This is only allowed for devices sleeping in
  734. parport_claim_or_block(), or those with a wakeup function. */
  735. /* The cad_lock is still held for writing here */
  736. if (dev->waiting & 2 || dev->wakeup) {
  737. spin_lock (&port->waitlist_lock);
  738. if (test_and_set_bit(0, &dev->waiting) == 0) {
  739. /* First add ourselves to the end of the wait list. */
  740. dev->waitnext = NULL;
  741. dev->waitprev = port->waittail;
  742. if (port->waittail) {
  743. port->waittail->waitnext = dev;
  744. port->waittail = dev;
  745. } else
  746. port->waithead = port->waittail = dev;
  747. }
  748. spin_unlock (&port->waitlist_lock);
  749. }
  750. write_unlock_irqrestore (&port->cad_lock, flags);
  751. return -EAGAIN;
  752. }
  753. /**
  754. * parport_claim_or_block - claim access to a parallel port device
  755. * @dev: pointer to structure representing a device on the port
  756. *
  757. * This behaves like parport_claim(), but will block if necessary
  758. * to wait for the port to be free. A return value of 1
  759. * indicates that it slept; 0 means that it succeeded without
  760. * needing to sleep. A negative error code indicates failure.
  761. **/
  762. int parport_claim_or_block(struct pardevice *dev)
  763. {
  764. int r;
  765. /* Signal to parport_claim() that we can wait even without a
  766. wakeup function. */
  767. dev->waiting = 2;
  768. /* Try to claim the port. If this fails, we need to sleep. */
  769. r = parport_claim(dev);
  770. if (r == -EAGAIN) {
  771. #ifdef PARPORT_DEBUG_SHARING
  772. printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
  773. #endif
  774. /*
  775. * FIXME!!! Use the proper locking for dev->waiting,
  776. * and make this use the "wait_event_interruptible()"
  777. * interfaces. The cli/sti that used to be here
  778. * did nothing.
  779. *
  780. * See also parport_release()
  781. */
  782. /* If dev->waiting is clear now, an interrupt
  783. gave us the port and we would deadlock if we slept. */
  784. if (dev->waiting) {
  785. interruptible_sleep_on (&dev->wait_q);
  786. if (signal_pending (current)) {
  787. return -EINTR;
  788. }
  789. r = 1;
  790. } else {
  791. r = 0;
  792. #ifdef PARPORT_DEBUG_SHARING
  793. printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
  794. dev->name);
  795. #endif
  796. }
  797. #ifdef PARPORT_DEBUG_SHARING
  798. if (dev->port->physport->cad != dev)
  799. printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
  800. "but %s owns port!\n", dev->name,
  801. dev->port->physport->cad ?
  802. dev->port->physport->cad->name:"nobody");
  803. #endif
  804. }
  805. dev->waiting = 0;
  806. return r;
  807. }
  808. /**
  809. * parport_release - give up access to a parallel port device
  810. * @dev: pointer to structure representing parallel port device
  811. *
  812. * This function cannot fail, but it should not be called without
  813. * the port claimed. Similarly, if the port is already claimed
  814. * you should not try claiming it again.
  815. **/
  816. void parport_release(struct pardevice *dev)
  817. {
  818. struct parport *port = dev->port->physport;
  819. struct pardevice *pd;
  820. unsigned long flags;
  821. /* Make sure that dev is the current device */
  822. write_lock_irqsave(&port->cad_lock, flags);
  823. if (port->cad != dev) {
  824. write_unlock_irqrestore (&port->cad_lock, flags);
  825. printk(KERN_WARNING "%s: %s tried to release parport "
  826. "when not owner\n", port->name, dev->name);
  827. return;
  828. }
  829. #ifdef CONFIG_PARPORT_1284
  830. /* If this is on a mux port, deselect it. */
  831. if (dev->port->muxport >= 0) {
  832. /* FIXME */
  833. port->muxsel = -1;
  834. }
  835. /* If this is a daisy device, deselect it. */
  836. if (dev->daisy >= 0) {
  837. parport_daisy_deselect_all (port);
  838. port->daisy = -1;
  839. }
  840. #endif
  841. port->cad = NULL;
  842. write_unlock_irqrestore(&port->cad_lock, flags);
  843. /* Save control registers */
  844. port->ops->save_state(port, dev->state);
  845. /* If anybody is waiting, find out who's been there longest and
  846. then wake them up. (Note: no locking required) */
  847. /* !!! LOCKING IS NEEDED HERE */
  848. for (pd = port->waithead; pd; pd = pd->waitnext) {
  849. if (pd->waiting & 2) { /* sleeping in claim_or_block */
  850. parport_claim(pd);
  851. if (waitqueue_active(&pd->wait_q))
  852. wake_up_interruptible(&pd->wait_q);
  853. return;
  854. } else if (pd->wakeup) {
  855. pd->wakeup(pd->private);
  856. if (dev->port->cad) /* racy but no matter */
  857. return;
  858. } else {
  859. printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
  860. }
  861. }
  862. /* Nobody was waiting, so walk the list to see if anyone is
  863. interested in being woken up. (Note: no locking required) */
  864. /* !!! LOCKING IS NEEDED HERE */
  865. for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
  866. if (pd->wakeup && pd != dev)
  867. pd->wakeup(pd->private);
  868. }
  869. }
  870. irqreturn_t parport_irq_handler(int irq, void *dev_id)
  871. {
  872. struct parport *port = dev_id;
  873. parport_generic_irq(port);
  874. return IRQ_HANDLED;
  875. }
  876. /* Exported symbols for modules. */
  877. EXPORT_SYMBOL(parport_claim);
  878. EXPORT_SYMBOL(parport_claim_or_block);
  879. EXPORT_SYMBOL(parport_release);
  880. EXPORT_SYMBOL(parport_register_port);
  881. EXPORT_SYMBOL(parport_announce_port);
  882. EXPORT_SYMBOL(parport_remove_port);
  883. EXPORT_SYMBOL(parport_register_driver);
  884. EXPORT_SYMBOL(parport_unregister_driver);
  885. EXPORT_SYMBOL(parport_register_device);
  886. EXPORT_SYMBOL(parport_unregister_device);
  887. EXPORT_SYMBOL(parport_get_port);
  888. EXPORT_SYMBOL(parport_put_port);
  889. EXPORT_SYMBOL(parport_find_number);
  890. EXPORT_SYMBOL(parport_find_base);
  891. EXPORT_SYMBOL(parport_irq_handler);
  892. MODULE_LICENSE("GPL");