share.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. /*
  2. * Parallel-port resource manager code.
  3. *
  4. * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
  5. * Tim Waugh <tim@cyberelk.demon.co.uk>
  6. * Jose Renau <renau@acm.org>
  7. * Philip Blundell <philb@gnu.org>
  8. * Andrea Arcangeli
  9. *
  10. * based on work by Grant Guenther <grant@torque.net>
  11. * and Philip Blundell
  12. *
  13. * Any part of this program may be used in documents licensed under
  14. * the GNU Free Documentation License, Version 1.1 or any later version
  15. * published by the Free Software Foundation.
  16. */
  17. #undef PARPORT_DEBUG_SHARING /* undef for production */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/threads.h>
  21. #include <linux/parport.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ioport.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/sched.h>
  29. #include <linux/kmod.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/mutex.h>
  32. #include <asm/irq.h>
  33. #undef PARPORT_PARANOID
  34. #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
  35. unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
  36. int parport_default_spintime = DEFAULT_SPIN_TIME;
  37. static LIST_HEAD(portlist);
  38. static DEFINE_SPINLOCK(parportlist_lock);
  39. /* list of all allocated ports, sorted by ->number */
  40. static LIST_HEAD(all_ports);
  41. static DEFINE_SPINLOCK(full_list_lock);
  42. static LIST_HEAD(drivers);
  43. static DEFINE_MUTEX(registration_lock);
  44. /* What you can do to a port that's gone away.. */
  45. static void dead_write_lines (struct parport *p, unsigned char b){}
  46. static unsigned char dead_read_lines (struct parport *p) { return 0; }
  47. static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
  48. unsigned char c) { return 0; }
  49. static void dead_onearg (struct parport *p){}
  50. static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
  51. static void dead_state (struct parport *p, struct parport_state *s) { }
  52. static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
  53. { return 0; }
  54. static size_t dead_read (struct parport *p, void *b, size_t l, int f)
  55. { return 0; }
  56. static struct parport_operations dead_ops = {
  57. .write_data = dead_write_lines, /* data */
  58. .read_data = dead_read_lines,
  59. .write_control = dead_write_lines, /* control */
  60. .read_control = dead_read_lines,
  61. .frob_control = dead_frob_lines,
  62. .read_status = dead_read_lines, /* status */
  63. .enable_irq = dead_onearg, /* enable_irq */
  64. .disable_irq = dead_onearg, /* disable_irq */
  65. .data_forward = dead_onearg, /* data_forward */
  66. .data_reverse = dead_onearg, /* data_reverse */
  67. .init_state = dead_initstate, /* init_state */
  68. .save_state = dead_state,
  69. .restore_state = dead_state,
  70. .epp_write_data = dead_write, /* epp */
  71. .epp_read_data = dead_read,
  72. .epp_write_addr = dead_write,
  73. .epp_read_addr = dead_read,
  74. .ecp_write_data = dead_write, /* ecp */
  75. .ecp_read_data = dead_read,
  76. .ecp_write_addr = dead_write,
  77. .compat_write_data = dead_write, /* compat */
  78. .nibble_read_data = dead_read, /* nibble */
  79. .byte_read_data = dead_read, /* byte */
  80. .owner = NULL,
  81. };
  82. /* Call attach(port) for each registered driver. */
  83. static void attach_driver_chain(struct parport *port)
  84. {
  85. /* caller has exclusive registration_lock */
  86. struct parport_driver *drv;
  87. list_for_each_entry(drv, &drivers, list)
  88. drv->attach(port);
  89. }
  90. /* Call detach(port) for each registered driver. */
  91. static void detach_driver_chain(struct parport *port)
  92. {
  93. struct parport_driver *drv;
  94. /* caller has exclusive registration_lock */
  95. list_for_each_entry(drv, &drivers, list)
  96. drv->detach (port);
  97. }
  98. /* Ask kmod for some lowlevel drivers. */
  99. static void get_lowlevel_driver (void)
  100. {
  101. /* There is no actual module called this: you should set
  102. * up an alias for modutils. */
  103. request_module ("parport_lowlevel");
  104. }
  105. /**
  106. * parport_register_driver - register a parallel port device driver
  107. * @drv: structure describing the driver
  108. *
  109. * This can be called by a parallel port device driver in order
  110. * to receive notifications about ports being found in the
  111. * system, as well as ports no longer available.
  112. *
  113. * The @drv structure is allocated by the caller and must not be
  114. * deallocated until after calling parport_unregister_driver().
  115. *
  116. * The driver's attach() function may block. The port that
  117. * attach() is given will be valid for the duration of the
  118. * callback, but if the driver wants to take a copy of the
  119. * pointer it must call parport_get_port() to do so. Calling
  120. * parport_register_device() on that port will do this for you.
  121. *
  122. * The driver's detach() function may block. The port that
  123. * detach() is given will be valid for the duration of the
  124. * callback, but if the driver wants to take a copy of the
  125. * pointer it must call parport_get_port() to do so.
  126. *
  127. * Returns 0 on success. Currently it always succeeds.
  128. **/
  129. int parport_register_driver (struct parport_driver *drv)
  130. {
  131. struct parport *port;
  132. if (list_empty(&portlist))
  133. get_lowlevel_driver ();
  134. mutex_lock(&registration_lock);
  135. list_for_each_entry(port, &portlist, list)
  136. drv->attach(port);
  137. list_add(&drv->list, &drivers);
  138. mutex_unlock(&registration_lock);
  139. return 0;
  140. }
  141. /**
  142. * parport_unregister_driver - deregister a parallel port device driver
  143. * @drv: structure describing the driver that was given to
  144. * parport_register_driver()
  145. *
  146. * This should be called by a parallel port device driver that
  147. * has registered itself using parport_register_driver() when it
  148. * is about to be unloaded.
  149. *
  150. * When it returns, the driver's attach() routine will no longer
  151. * be called, and for each port that attach() was called for, the
  152. * detach() routine will have been called.
  153. *
  154. * All the driver's attach() and detach() calls are guaranteed to have
  155. * finished by the time this function returns.
  156. **/
  157. void parport_unregister_driver (struct parport_driver *drv)
  158. {
  159. struct parport *port;
  160. mutex_lock(&registration_lock);
  161. list_del_init(&drv->list);
  162. list_for_each_entry(port, &portlist, list)
  163. drv->detach(port);
  164. mutex_unlock(&registration_lock);
  165. }
  166. static void free_port (struct parport *port)
  167. {
  168. int d;
  169. spin_lock(&full_list_lock);
  170. list_del(&port->full_list);
  171. spin_unlock(&full_list_lock);
  172. for (d = 0; d < 5; d++) {
  173. kfree(port->probe_info[d].class_name);
  174. kfree(port->probe_info[d].mfr);
  175. kfree(port->probe_info[d].model);
  176. kfree(port->probe_info[d].cmdset);
  177. kfree(port->probe_info[d].description);
  178. }
  179. kfree(port->name);
  180. kfree(port);
  181. }
  182. /**
  183. * parport_get_port - increment a port's reference count
  184. * @port: the port
  185. *
  186. * This ensures that a struct parport pointer remains valid
  187. * until the matching parport_put_port() call.
  188. **/
  189. struct parport *parport_get_port (struct parport *port)
  190. {
  191. atomic_inc (&port->ref_count);
  192. return port;
  193. }
  194. /**
  195. * parport_put_port - decrement a port's reference count
  196. * @port: the port
  197. *
  198. * This should be called once for each call to parport_get_port(),
  199. * once the port is no longer needed.
  200. **/
  201. void parport_put_port (struct parport *port)
  202. {
  203. if (atomic_dec_and_test (&port->ref_count))
  204. /* Can destroy it now. */
  205. free_port (port);
  206. return;
  207. }
  208. /**
  209. * parport_register_port - register a parallel port
  210. * @base: base I/O address
  211. * @irq: IRQ line
  212. * @dma: DMA channel
  213. * @ops: pointer to the port driver's port operations structure
  214. *
  215. * When a parallel port (lowlevel) driver finds a port that
  216. * should be made available to parallel port device drivers, it
  217. * should call parport_register_port(). The @base, @irq, and
  218. * @dma parameters are for the convenience of port drivers, and
  219. * for ports where they aren't meaningful needn't be set to
  220. * anything special. They can be altered afterwards by adjusting
  221. * the relevant members of the parport structure that is returned
  222. * and represents the port. They should not be tampered with
  223. * after calling parport_announce_port, however.
  224. *
  225. * If there are parallel port device drivers in the system that
  226. * have registered themselves using parport_register_driver(),
  227. * they are not told about the port at this time; that is done by
  228. * parport_announce_port().
  229. *
  230. * The @ops structure is allocated by the caller, and must not be
  231. * deallocated before calling parport_remove_port().
  232. *
  233. * If there is no memory to allocate a new parport structure,
  234. * this function will return %NULL.
  235. **/
  236. struct parport *parport_register_port(unsigned long base, int irq, int dma,
  237. struct parport_operations *ops)
  238. {
  239. struct list_head *l;
  240. struct parport *tmp;
  241. int num;
  242. int device;
  243. char *name;
  244. tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
  245. if (!tmp) {
  246. printk(KERN_WARNING "parport: memory squeeze\n");
  247. return NULL;
  248. }
  249. /* Init our structure */
  250. tmp->base = base;
  251. tmp->irq = irq;
  252. tmp->dma = dma;
  253. tmp->muxport = tmp->daisy = tmp->muxsel = -1;
  254. tmp->modes = 0;
  255. INIT_LIST_HEAD(&tmp->list);
  256. tmp->devices = tmp->cad = NULL;
  257. tmp->flags = 0;
  258. tmp->ops = ops;
  259. tmp->physport = tmp;
  260. memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
  261. rwlock_init(&tmp->cad_lock);
  262. spin_lock_init(&tmp->waitlist_lock);
  263. spin_lock_init(&tmp->pardevice_lock);
  264. tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
  265. tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  266. sema_init(&tmp->ieee1284.irq, 0);
  267. tmp->spintime = parport_default_spintime;
  268. atomic_set (&tmp->ref_count, 1);
  269. INIT_LIST_HEAD(&tmp->full_list);
  270. name = kmalloc(15, GFP_KERNEL);
  271. if (!name) {
  272. printk(KERN_ERR "parport: memory squeeze\n");
  273. kfree(tmp);
  274. return NULL;
  275. }
  276. /* Search for the lowest free parport number. */
  277. spin_lock(&full_list_lock);
  278. for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
  279. struct parport *p = list_entry(l, struct parport, full_list);
  280. if (p->number != num)
  281. break;
  282. }
  283. tmp->portnum = tmp->number = num;
  284. list_add_tail(&tmp->full_list, l);
  285. spin_unlock(&full_list_lock);
  286. /*
  287. * Now that the portnum is known finish doing the Init.
  288. */
  289. sprintf(name, "parport%d", tmp->portnum = tmp->number);
  290. tmp->name = name;
  291. for (device = 0; device < 5; device++)
  292. /* assume the worst */
  293. tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
  294. tmp->waithead = tmp->waittail = NULL;
  295. return tmp;
  296. }
  297. /**
  298. * parport_announce_port - tell device drivers about a parallel port
  299. * @port: parallel port to announce
  300. *
  301. * After a port driver has registered a parallel port with
  302. * parport_register_port, and performed any necessary
  303. * initialisation or adjustments, it should call
  304. * parport_announce_port() in order to notify all device drivers
  305. * that have called parport_register_driver(). Their attach()
  306. * functions will be called, with @port as the parameter.
  307. **/
  308. void parport_announce_port (struct parport *port)
  309. {
  310. int i;
  311. #ifdef CONFIG_PARPORT_1284
  312. /* Analyse the IEEE1284.3 topology of the port. */
  313. parport_daisy_init(port);
  314. #endif
  315. if (!port->dev)
  316. printk(KERN_WARNING "%s: fix this legacy "
  317. "no-device port driver!\n",
  318. port->name);
  319. parport_proc_register(port);
  320. mutex_lock(&registration_lock);
  321. spin_lock_irq(&parportlist_lock);
  322. list_add_tail(&port->list, &portlist);
  323. for (i = 1; i < 3; i++) {
  324. struct parport *slave = port->slaves[i-1];
  325. if (slave)
  326. list_add_tail(&slave->list, &portlist);
  327. }
  328. spin_unlock_irq(&parportlist_lock);
  329. /* Let drivers know that new port(s) has arrived. */
  330. attach_driver_chain (port);
  331. for (i = 1; i < 3; i++) {
  332. struct parport *slave = port->slaves[i-1];
  333. if (slave)
  334. attach_driver_chain(slave);
  335. }
  336. mutex_unlock(&registration_lock);
  337. }
  338. /**
  339. * parport_remove_port - deregister a parallel port
  340. * @port: parallel port to deregister
  341. *
  342. * When a parallel port driver is forcibly unloaded, or a
  343. * parallel port becomes inaccessible, the port driver must call
  344. * this function in order to deal with device drivers that still
  345. * want to use it.
  346. *
  347. * The parport structure associated with the port has its
  348. * operations structure replaced with one containing 'null'
  349. * operations that return errors or just don't do anything.
  350. *
  351. * Any drivers that have registered themselves using
  352. * parport_register_driver() are notified that the port is no
  353. * longer accessible by having their detach() routines called
  354. * with @port as the parameter.
  355. **/
  356. void parport_remove_port(struct parport *port)
  357. {
  358. int i;
  359. mutex_lock(&registration_lock);
  360. /* Spread the word. */
  361. detach_driver_chain (port);
  362. #ifdef CONFIG_PARPORT_1284
  363. /* Forget the IEEE1284.3 topology of the port. */
  364. parport_daisy_fini(port);
  365. for (i = 1; i < 3; i++) {
  366. struct parport *slave = port->slaves[i-1];
  367. if (!slave)
  368. continue;
  369. detach_driver_chain(slave);
  370. parport_daisy_fini(slave);
  371. }
  372. #endif
  373. port->ops = &dead_ops;
  374. spin_lock(&parportlist_lock);
  375. list_del_init(&port->list);
  376. for (i = 1; i < 3; i++) {
  377. struct parport *slave = port->slaves[i-1];
  378. if (slave)
  379. list_del_init(&slave->list);
  380. }
  381. spin_unlock(&parportlist_lock);
  382. mutex_unlock(&registration_lock);
  383. parport_proc_unregister(port);
  384. for (i = 1; i < 3; i++) {
  385. struct parport *slave = port->slaves[i-1];
  386. if (slave)
  387. parport_put_port(slave);
  388. }
  389. }
  390. /**
  391. * parport_register_device - register a device on a parallel port
  392. * @port: port to which the device is attached
  393. * @name: a name to refer to the device
  394. * @pf: preemption callback
  395. * @kf: kick callback (wake-up)
  396. * @irq_func: interrupt handler
  397. * @flags: registration flags
  398. * @handle: data for callback functions
  399. *
  400. * This function, called by parallel port device drivers,
  401. * declares that a device is connected to a port, and tells the
  402. * system all it needs to know.
  403. *
  404. * The @name is allocated by the caller and must not be
  405. * deallocated until the caller calls @parport_unregister_device
  406. * for that device.
  407. *
  408. * The preemption callback function, @pf, is called when this
  409. * device driver has claimed access to the port but another
  410. * device driver wants to use it. It is given @handle as its
  411. * parameter, and should return zero if it is willing for the
  412. * system to release the port to another driver on its behalf.
  413. * If it wants to keep control of the port it should return
  414. * non-zero, and no action will be taken. It is good manners for
  415. * the driver to try to release the port at the earliest
  416. * opportunity after its preemption callback rejects a preemption
  417. * attempt. Note that if a preemption callback is happy for
  418. * preemption to go ahead, there is no need to release the port;
  419. * it is done automatically. This function may not block, as it
  420. * may be called from interrupt context. If the device driver
  421. * does not support preemption, @pf can be %NULL.
  422. *
  423. * The wake-up ("kick") callback function, @kf, is called when
  424. * the port is available to be claimed for exclusive access; that
  425. * is, parport_claim() is guaranteed to succeed when called from
  426. * inside the wake-up callback function. If the driver wants to
  427. * claim the port it should do so; otherwise, it need not take
  428. * any action. This function may not block, as it may be called
  429. * from interrupt context. If the device driver does not want to
  430. * be explicitly invited to claim the port in this way, @kf can
  431. * be %NULL.
  432. *
  433. * The interrupt handler, @irq_func, is called when an interrupt
  434. * arrives from the parallel port. Note that if a device driver
  435. * wants to use interrupts it should use parport_enable_irq(),
  436. * and can also check the irq member of the parport structure
  437. * representing the port.
  438. *
  439. * The parallel port (lowlevel) driver is the one that has called
  440. * request_irq() and whose interrupt handler is called first.
  441. * This handler does whatever needs to be done to the hardware to
  442. * acknowledge the interrupt (for PC-style ports there is nothing
  443. * special to be done). It then tells the IEEE 1284 code about
  444. * the interrupt, which may involve reacting to an IEEE 1284
  445. * event depending on the current IEEE 1284 phase. After this,
  446. * it calls @irq_func. Needless to say, @irq_func will be called
  447. * from interrupt context, and may not block.
  448. *
  449. * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
  450. * so should only be used when sharing the port with other device
  451. * drivers is impossible and would lead to incorrect behaviour.
  452. * Use it sparingly! Normally, @flags will be zero.
  453. *
  454. * This function returns a pointer to a structure that represents
  455. * the device on the port, or %NULL if there is not enough memory
  456. * to allocate space for that structure.
  457. **/
  458. struct pardevice *
  459. parport_register_device(struct parport *port, const char *name,
  460. int (*pf)(void *), void (*kf)(void *),
  461. void (*irq_func)(void *),
  462. int flags, void *handle)
  463. {
  464. struct pardevice *tmp;
  465. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  466. /* An exclusive device is registered. */
  467. printk (KERN_DEBUG "%s: no more devices allowed\n",
  468. port->name);
  469. return NULL;
  470. }
  471. if (flags & PARPORT_DEV_LURK) {
  472. if (!pf || !kf) {
  473. printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
  474. return NULL;
  475. }
  476. }
  477. /* We up our own module reference count, and that of the port
  478. on which a device is to be registered, to ensure that
  479. neither of us gets unloaded while we sleep in (e.g.)
  480. kmalloc.
  481. */
  482. if (!try_module_get(port->ops->owner)) {
  483. return NULL;
  484. }
  485. parport_get_port (port);
  486. tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
  487. if (tmp == NULL) {
  488. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  489. goto out;
  490. }
  491. tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
  492. if (tmp->state == NULL) {
  493. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  494. goto out_free_pardevice;
  495. }
  496. tmp->name = name;
  497. tmp->port = port;
  498. tmp->daisy = -1;
  499. tmp->preempt = pf;
  500. tmp->wakeup = kf;
  501. tmp->private = handle;
  502. tmp->flags = flags;
  503. tmp->irq_func = irq_func;
  504. tmp->waiting = 0;
  505. tmp->timeout = 5 * HZ;
  506. /* Chain this onto the list */
  507. tmp->prev = NULL;
  508. /*
  509. * This function must not run from an irq handler so we don' t need
  510. * to clear irq on the local CPU. -arca
  511. */
  512. spin_lock(&port->physport->pardevice_lock);
  513. if (flags & PARPORT_DEV_EXCL) {
  514. if (port->physport->devices) {
  515. spin_unlock (&port->physport->pardevice_lock);
  516. printk (KERN_DEBUG
  517. "%s: cannot grant exclusive access for "
  518. "device %s\n", port->name, name);
  519. goto out_free_all;
  520. }
  521. port->flags |= PARPORT_FLAG_EXCL;
  522. }
  523. tmp->next = port->physport->devices;
  524. wmb(); /* Make sure that tmp->next is written before it's
  525. added to the list; see comments marked 'no locking
  526. required' */
  527. if (port->physport->devices)
  528. port->physport->devices->prev = tmp;
  529. port->physport->devices = tmp;
  530. spin_unlock(&port->physport->pardevice_lock);
  531. init_waitqueue_head(&tmp->wait_q);
  532. tmp->timeslice = parport_default_timeslice;
  533. tmp->waitnext = tmp->waitprev = NULL;
  534. /*
  535. * This has to be run as last thing since init_state may need other
  536. * pardevice fields. -arca
  537. */
  538. port->ops->init_state(tmp, tmp->state);
  539. if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
  540. port->proc_device = tmp;
  541. parport_device_proc_register(tmp);
  542. }
  543. return tmp;
  544. out_free_all:
  545. kfree(tmp->state);
  546. out_free_pardevice:
  547. kfree(tmp);
  548. out:
  549. parport_put_port (port);
  550. module_put(port->ops->owner);
  551. return NULL;
  552. }
  553. /**
  554. * parport_unregister_device - deregister a device on a parallel port
  555. * @dev: pointer to structure representing device
  556. *
  557. * This undoes the effect of parport_register_device().
  558. **/
  559. void parport_unregister_device(struct pardevice *dev)
  560. {
  561. struct parport *port;
  562. #ifdef PARPORT_PARANOID
  563. if (dev == NULL) {
  564. printk(KERN_ERR "parport_unregister_device: passed NULL\n");
  565. return;
  566. }
  567. #endif
  568. port = dev->port->physport;
  569. if (port->proc_device == dev) {
  570. port->proc_device = NULL;
  571. clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
  572. parport_device_proc_unregister(dev);
  573. }
  574. if (port->cad == dev) {
  575. printk(KERN_DEBUG "%s: %s forgot to release port\n",
  576. port->name, dev->name);
  577. parport_release (dev);
  578. }
  579. spin_lock(&port->pardevice_lock);
  580. if (dev->next)
  581. dev->next->prev = dev->prev;
  582. if (dev->prev)
  583. dev->prev->next = dev->next;
  584. else
  585. port->devices = dev->next;
  586. if (dev->flags & PARPORT_DEV_EXCL)
  587. port->flags &= ~PARPORT_FLAG_EXCL;
  588. spin_unlock(&port->pardevice_lock);
  589. /* Make sure we haven't left any pointers around in the wait
  590. * list. */
  591. spin_lock_irq(&port->waitlist_lock);
  592. if (dev->waitprev || dev->waitnext || port->waithead == dev) {
  593. if (dev->waitprev)
  594. dev->waitprev->waitnext = dev->waitnext;
  595. else
  596. port->waithead = dev->waitnext;
  597. if (dev->waitnext)
  598. dev->waitnext->waitprev = dev->waitprev;
  599. else
  600. port->waittail = dev->waitprev;
  601. }
  602. spin_unlock_irq(&port->waitlist_lock);
  603. kfree(dev->state);
  604. kfree(dev);
  605. module_put(port->ops->owner);
  606. parport_put_port (port);
  607. }
  608. /**
  609. * parport_find_number - find a parallel port by number
  610. * @number: parallel port number
  611. *
  612. * This returns the parallel port with the specified number, or
  613. * %NULL if there is none.
  614. *
  615. * There is an implicit parport_get_port() done already; to throw
  616. * away the reference to the port that parport_find_number()
  617. * gives you, use parport_put_port().
  618. */
  619. struct parport *parport_find_number (int number)
  620. {
  621. struct parport *port, *result = NULL;
  622. if (list_empty(&portlist))
  623. get_lowlevel_driver ();
  624. spin_lock (&parportlist_lock);
  625. list_for_each_entry(port, &portlist, list) {
  626. if (port->number == number) {
  627. result = parport_get_port (port);
  628. break;
  629. }
  630. }
  631. spin_unlock (&parportlist_lock);
  632. return result;
  633. }
  634. /**
  635. * parport_find_base - find a parallel port by base address
  636. * @base: base I/O address
  637. *
  638. * This returns the parallel port with the specified base
  639. * address, or %NULL if there is none.
  640. *
  641. * There is an implicit parport_get_port() done already; to throw
  642. * away the reference to the port that parport_find_base()
  643. * gives you, use parport_put_port().
  644. */
  645. struct parport *parport_find_base (unsigned long base)
  646. {
  647. struct parport *port, *result = NULL;
  648. if (list_empty(&portlist))
  649. get_lowlevel_driver ();
  650. spin_lock (&parportlist_lock);
  651. list_for_each_entry(port, &portlist, list) {
  652. if (port->base == base) {
  653. result = parport_get_port (port);
  654. break;
  655. }
  656. }
  657. spin_unlock (&parportlist_lock);
  658. return result;
  659. }
  660. /**
  661. * parport_claim - claim access to a parallel port device
  662. * @dev: pointer to structure representing a device on the port
  663. *
  664. * This function will not block and so can be used from interrupt
  665. * context. If parport_claim() succeeds in claiming access to
  666. * the port it returns zero and the port is available to use. It
  667. * may fail (returning non-zero) if the port is in use by another
  668. * driver and that driver is not willing to relinquish control of
  669. * the port.
  670. **/
  671. int parport_claim(struct pardevice *dev)
  672. {
  673. struct pardevice *oldcad;
  674. struct parport *port = dev->port->physport;
  675. unsigned long flags;
  676. if (port->cad == dev) {
  677. printk(KERN_INFO "%s: %s already owner\n",
  678. dev->port->name,dev->name);
  679. return 0;
  680. }
  681. /* Preempt any current device */
  682. write_lock_irqsave (&port->cad_lock, flags);
  683. if ((oldcad = port->cad) != NULL) {
  684. if (oldcad->preempt) {
  685. if (oldcad->preempt(oldcad->private))
  686. goto blocked;
  687. port->ops->save_state(port, dev->state);
  688. } else
  689. goto blocked;
  690. if (port->cad != oldcad) {
  691. /* I think we'll actually deadlock rather than
  692. get here, but just in case.. */
  693. printk(KERN_WARNING
  694. "%s: %s released port when preempted!\n",
  695. port->name, oldcad->name);
  696. if (port->cad)
  697. goto blocked;
  698. }
  699. }
  700. /* Can't fail from now on, so mark ourselves as no longer waiting. */
  701. if (dev->waiting & 1) {
  702. dev->waiting = 0;
  703. /* Take ourselves out of the wait list again. */
  704. spin_lock_irq (&port->waitlist_lock);
  705. if (dev->waitprev)
  706. dev->waitprev->waitnext = dev->waitnext;
  707. else
  708. port->waithead = dev->waitnext;
  709. if (dev->waitnext)
  710. dev->waitnext->waitprev = dev->waitprev;
  711. else
  712. port->waittail = dev->waitprev;
  713. spin_unlock_irq (&port->waitlist_lock);
  714. dev->waitprev = dev->waitnext = NULL;
  715. }
  716. /* Now we do the change of devices */
  717. port->cad = dev;
  718. #ifdef CONFIG_PARPORT_1284
  719. /* If it's a mux port, select it. */
  720. if (dev->port->muxport >= 0) {
  721. /* FIXME */
  722. port->muxsel = dev->port->muxport;
  723. }
  724. /* If it's a daisy chain device, select it. */
  725. if (dev->daisy >= 0) {
  726. /* This could be lazier. */
  727. if (!parport_daisy_select (port, dev->daisy,
  728. IEEE1284_MODE_COMPAT))
  729. port->daisy = dev->daisy;
  730. }
  731. #endif /* IEEE1284.3 support */
  732. /* Restore control registers */
  733. port->ops->restore_state(port, dev->state);
  734. write_unlock_irqrestore(&port->cad_lock, flags);
  735. dev->time = jiffies;
  736. return 0;
  737. blocked:
  738. /* If this is the first time we tried to claim the port, register an
  739. interest. This is only allowed for devices sleeping in
  740. parport_claim_or_block(), or those with a wakeup function. */
  741. /* The cad_lock is still held for writing here */
  742. if (dev->waiting & 2 || dev->wakeup) {
  743. spin_lock (&port->waitlist_lock);
  744. if (test_and_set_bit(0, &dev->waiting) == 0) {
  745. /* First add ourselves to the end of the wait list. */
  746. dev->waitnext = NULL;
  747. dev->waitprev = port->waittail;
  748. if (port->waittail) {
  749. port->waittail->waitnext = dev;
  750. port->waittail = dev;
  751. } else
  752. port->waithead = port->waittail = dev;
  753. }
  754. spin_unlock (&port->waitlist_lock);
  755. }
  756. write_unlock_irqrestore (&port->cad_lock, flags);
  757. return -EAGAIN;
  758. }
  759. /**
  760. * parport_claim_or_block - claim access to a parallel port device
  761. * @dev: pointer to structure representing a device on the port
  762. *
  763. * This behaves like parport_claim(), but will block if necessary
  764. * to wait for the port to be free. A return value of 1
  765. * indicates that it slept; 0 means that it succeeded without
  766. * needing to sleep. A negative error code indicates failure.
  767. **/
  768. int parport_claim_or_block(struct pardevice *dev)
  769. {
  770. int r;
  771. /* Signal to parport_claim() that we can wait even without a
  772. wakeup function. */
  773. dev->waiting = 2;
  774. /* Try to claim the port. If this fails, we need to sleep. */
  775. r = parport_claim(dev);
  776. if (r == -EAGAIN) {
  777. #ifdef PARPORT_DEBUG_SHARING
  778. printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
  779. #endif
  780. /*
  781. * FIXME!!! Use the proper locking for dev->waiting,
  782. * and make this use the "wait_event_interruptible()"
  783. * interfaces. The cli/sti that used to be here
  784. * did nothing.
  785. *
  786. * See also parport_release()
  787. */
  788. /* If dev->waiting is clear now, an interrupt
  789. gave us the port and we would deadlock if we slept. */
  790. if (dev->waiting) {
  791. interruptible_sleep_on (&dev->wait_q);
  792. if (signal_pending (current)) {
  793. return -EINTR;
  794. }
  795. r = 1;
  796. } else {
  797. r = 0;
  798. #ifdef PARPORT_DEBUG_SHARING
  799. printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
  800. dev->name);
  801. #endif
  802. }
  803. #ifdef PARPORT_DEBUG_SHARING
  804. if (dev->port->physport->cad != dev)
  805. printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
  806. "but %s owns port!\n", dev->name,
  807. dev->port->physport->cad ?
  808. dev->port->physport->cad->name:"nobody");
  809. #endif
  810. }
  811. dev->waiting = 0;
  812. return r;
  813. }
  814. /**
  815. * parport_release - give up access to a parallel port device
  816. * @dev: pointer to structure representing parallel port device
  817. *
  818. * This function cannot fail, but it should not be called without
  819. * the port claimed. Similarly, if the port is already claimed
  820. * you should not try claiming it again.
  821. **/
  822. void parport_release(struct pardevice *dev)
  823. {
  824. struct parport *port = dev->port->physport;
  825. struct pardevice *pd;
  826. unsigned long flags;
  827. /* Make sure that dev is the current device */
  828. write_lock_irqsave(&port->cad_lock, flags);
  829. if (port->cad != dev) {
  830. write_unlock_irqrestore (&port->cad_lock, flags);
  831. printk(KERN_WARNING "%s: %s tried to release parport "
  832. "when not owner\n", port->name, dev->name);
  833. return;
  834. }
  835. #ifdef CONFIG_PARPORT_1284
  836. /* If this is on a mux port, deselect it. */
  837. if (dev->port->muxport >= 0) {
  838. /* FIXME */
  839. port->muxsel = -1;
  840. }
  841. /* If this is a daisy device, deselect it. */
  842. if (dev->daisy >= 0) {
  843. parport_daisy_deselect_all (port);
  844. port->daisy = -1;
  845. }
  846. #endif
  847. port->cad = NULL;
  848. write_unlock_irqrestore(&port->cad_lock, flags);
  849. /* Save control registers */
  850. port->ops->save_state(port, dev->state);
  851. /* If anybody is waiting, find out who's been there longest and
  852. then wake them up. (Note: no locking required) */
  853. /* !!! LOCKING IS NEEDED HERE */
  854. for (pd = port->waithead; pd; pd = pd->waitnext) {
  855. if (pd->waiting & 2) { /* sleeping in claim_or_block */
  856. parport_claim(pd);
  857. if (waitqueue_active(&pd->wait_q))
  858. wake_up_interruptible(&pd->wait_q);
  859. return;
  860. } else if (pd->wakeup) {
  861. pd->wakeup(pd->private);
  862. if (dev->port->cad) /* racy but no matter */
  863. return;
  864. } else {
  865. printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
  866. }
  867. }
  868. /* Nobody was waiting, so walk the list to see if anyone is
  869. interested in being woken up. (Note: no locking required) */
  870. /* !!! LOCKING IS NEEDED HERE */
  871. for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
  872. if (pd->wakeup && pd != dev)
  873. pd->wakeup(pd->private);
  874. }
  875. }
  876. irqreturn_t parport_irq_handler(int irq, void *dev_id)
  877. {
  878. struct parport *port = dev_id;
  879. parport_generic_irq(port);
  880. return IRQ_HANDLED;
  881. }
  882. /* Exported symbols for modules. */
  883. EXPORT_SYMBOL(parport_claim);
  884. EXPORT_SYMBOL(parport_claim_or_block);
  885. EXPORT_SYMBOL(parport_release);
  886. EXPORT_SYMBOL(parport_register_port);
  887. EXPORT_SYMBOL(parport_announce_port);
  888. EXPORT_SYMBOL(parport_remove_port);
  889. EXPORT_SYMBOL(parport_register_driver);
  890. EXPORT_SYMBOL(parport_unregister_driver);
  891. EXPORT_SYMBOL(parport_register_device);
  892. EXPORT_SYMBOL(parport_unregister_device);
  893. EXPORT_SYMBOL(parport_get_port);
  894. EXPORT_SYMBOL(parport_put_port);
  895. EXPORT_SYMBOL(parport_find_number);
  896. EXPORT_SYMBOL(parport_find_base);
  897. EXPORT_SYMBOL(parport_irq_handler);
  898. MODULE_LICENSE("GPL");