share.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
  2. * Parallel-port resource manager code.
  3. *
  4. * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
  5. * Tim Waugh <tim@cyberelk.demon.co.uk>
  6. * Jose Renau <renau@acm.org>
  7. * Philip Blundell <philb@gnu.org>
  8. * Andrea Arcangeli
  9. *
  10. * based on work by Grant Guenther <grant@torque.net>
  11. * and Philip Blundell
  12. *
  13. * Any part of this program may be used in documents licensed under
  14. * the GNU Free Documentation License, Version 1.1 or any later version
  15. * published by the Free Software Foundation.
  16. */
  17. #undef PARPORT_DEBUG_SHARING /* undef for production */
  18. #include <linux/config.h>
  19. #include <linux/module.h>
  20. #include <linux/string.h>
  21. #include <linux/threads.h>
  22. #include <linux/parport.h>
  23. #include <linux/delay.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/ioport.h>
  27. #include <linux/kernel.h>
  28. #include <linux/slab.h>
  29. #include <linux/sched.h>
  30. #include <linux/kmod.h>
  31. #include <linux/spinlock.h>
  32. #include <asm/irq.h>
  33. #undef PARPORT_PARANOID
  34. #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
  35. unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
  36. int parport_default_spintime = DEFAULT_SPIN_TIME;
  37. static LIST_HEAD(portlist);
  38. static DEFINE_SPINLOCK(parportlist_lock);
  39. /* list of all allocated ports, sorted by ->number */
  40. static LIST_HEAD(all_ports);
  41. static DEFINE_SPINLOCK(full_list_lock);
  42. static LIST_HEAD(drivers);
  43. static DECLARE_MUTEX(registration_lock);
  44. /* What you can do to a port that's gone away.. */
  45. static void dead_write_lines (struct parport *p, unsigned char b){}
  46. static unsigned char dead_read_lines (struct parport *p) { return 0; }
  47. static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
  48. unsigned char c) { return 0; }
  49. static void dead_onearg (struct parport *p){}
  50. static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
  51. static void dead_state (struct parport *p, struct parport_state *s) { }
  52. static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
  53. { return 0; }
  54. static size_t dead_read (struct parport *p, void *b, size_t l, int f)
  55. { return 0; }
  56. static struct parport_operations dead_ops = {
  57. .write_data = dead_write_lines, /* data */
  58. .read_data = dead_read_lines,
  59. .write_control = dead_write_lines, /* control */
  60. .read_control = dead_read_lines,
  61. .frob_control = dead_frob_lines,
  62. .read_status = dead_read_lines, /* status */
  63. .enable_irq = dead_onearg, /* enable_irq */
  64. .disable_irq = dead_onearg, /* disable_irq */
  65. .data_forward = dead_onearg, /* data_forward */
  66. .data_reverse = dead_onearg, /* data_reverse */
  67. .init_state = dead_initstate, /* init_state */
  68. .save_state = dead_state,
  69. .restore_state = dead_state,
  70. .epp_write_data = dead_write, /* epp */
  71. .epp_read_data = dead_read,
  72. .epp_write_addr = dead_write,
  73. .epp_read_addr = dead_read,
  74. .ecp_write_data = dead_write, /* ecp */
  75. .ecp_read_data = dead_read,
  76. .ecp_write_addr = dead_write,
  77. .compat_write_data = dead_write, /* compat */
  78. .nibble_read_data = dead_read, /* nibble */
  79. .byte_read_data = dead_read, /* byte */
  80. .owner = NULL,
  81. };
  82. /* Call attach(port) for each registered driver. */
  83. static void attach_driver_chain(struct parport *port)
  84. {
  85. /* caller has exclusive registration_lock */
  86. struct parport_driver *drv;
  87. list_for_each_entry(drv, &drivers, list)
  88. drv->attach(port);
  89. }
  90. /* Call detach(port) for each registered driver. */
  91. static void detach_driver_chain(struct parport *port)
  92. {
  93. struct parport_driver *drv;
  94. /* caller has exclusive registration_lock */
  95. list_for_each_entry(drv, &drivers, list)
  96. drv->detach (port);
  97. }
  98. /* Ask kmod for some lowlevel drivers. */
  99. static void get_lowlevel_driver (void)
  100. {
  101. /* There is no actual module called this: you should set
  102. * up an alias for modutils. */
  103. request_module ("parport_lowlevel");
  104. }
  105. /**
  106. * parport_register_driver - register a parallel port device driver
  107. * @drv: structure describing the driver
  108. *
  109. * This can be called by a parallel port device driver in order
  110. * to receive notifications about ports being found in the
  111. * system, as well as ports no longer available.
  112. *
  113. * The @drv structure is allocated by the caller and must not be
  114. * deallocated until after calling parport_unregister_driver().
  115. *
  116. * The driver's attach() function may block. The port that
  117. * attach() is given will be valid for the duration of the
  118. * callback, but if the driver wants to take a copy of the
  119. * pointer it must call parport_get_port() to do so. Calling
  120. * parport_register_device() on that port will do this for you.
  121. *
  122. * The driver's detach() function may block. The port that
  123. * detach() is given will be valid for the duration of the
  124. * callback, but if the driver wants to take a copy of the
  125. * pointer it must call parport_get_port() to do so.
  126. *
  127. * Returns 0 on success. Currently it always succeeds.
  128. **/
  129. int parport_register_driver (struct parport_driver *drv)
  130. {
  131. struct parport *port;
  132. if (list_empty(&portlist))
  133. get_lowlevel_driver ();
  134. down(&registration_lock);
  135. list_for_each_entry(port, &portlist, list)
  136. drv->attach(port);
  137. list_add(&drv->list, &drivers);
  138. up(&registration_lock);
  139. return 0;
  140. }
  141. /**
  142. * parport_unregister_driver - deregister a parallel port device driver
  143. * @drv: structure describing the driver that was given to
  144. * parport_register_driver()
  145. *
  146. * This should be called by a parallel port device driver that
  147. * has registered itself using parport_register_driver() when it
  148. * is about to be unloaded.
  149. *
  150. * When it returns, the driver's attach() routine will no longer
  151. * be called, and for each port that attach() was called for, the
  152. * detach() routine will have been called.
  153. *
  154. * All the driver's attach() and detach() calls are guaranteed to have
  155. * finished by the time this function returns.
  156. **/
  157. void parport_unregister_driver (struct parport_driver *drv)
  158. {
  159. struct parport *port;
  160. down(&registration_lock);
  161. list_del_init(&drv->list);
  162. list_for_each_entry(port, &portlist, list)
  163. drv->detach(port);
  164. up(&registration_lock);
  165. }
  166. static void free_port (struct parport *port)
  167. {
  168. int d;
  169. spin_lock(&full_list_lock);
  170. list_del(&port->full_list);
  171. spin_unlock(&full_list_lock);
  172. for (d = 0; d < 5; d++) {
  173. if (port->probe_info[d].class_name)
  174. kfree (port->probe_info[d].class_name);
  175. if (port->probe_info[d].mfr)
  176. kfree (port->probe_info[d].mfr);
  177. if (port->probe_info[d].model)
  178. kfree (port->probe_info[d].model);
  179. if (port->probe_info[d].cmdset)
  180. kfree (port->probe_info[d].cmdset);
  181. if (port->probe_info[d].description)
  182. kfree (port->probe_info[d].description);
  183. }
  184. kfree(port->name);
  185. kfree(port);
  186. }
  187. /**
  188. * parport_get_port - increment a port's reference count
  189. * @port: the port
  190. *
  191. * This ensure's that a struct parport pointer remains valid
  192. * until the matching parport_put_port() call.
  193. **/
  194. struct parport *parport_get_port (struct parport *port)
  195. {
  196. atomic_inc (&port->ref_count);
  197. return port;
  198. }
  199. /**
  200. * parport_put_port - decrement a port's reference count
  201. * @port: the port
  202. *
  203. * This should be called once for each call to parport_get_port(),
  204. * once the port is no longer needed.
  205. **/
  206. void parport_put_port (struct parport *port)
  207. {
  208. if (atomic_dec_and_test (&port->ref_count))
  209. /* Can destroy it now. */
  210. free_port (port);
  211. return;
  212. }
  213. /**
  214. * parport_register_port - register a parallel port
  215. * @base: base I/O address
  216. * @irq: IRQ line
  217. * @dma: DMA channel
  218. * @ops: pointer to the port driver's port operations structure
  219. *
  220. * When a parallel port (lowlevel) driver finds a port that
  221. * should be made available to parallel port device drivers, it
  222. * should call parport_register_port(). The @base, @irq, and
  223. * @dma parameters are for the convenience of port drivers, and
  224. * for ports where they aren't meaningful needn't be set to
  225. * anything special. They can be altered afterwards by adjusting
  226. * the relevant members of the parport structure that is returned
  227. * and represents the port. They should not be tampered with
  228. * after calling parport_announce_port, however.
  229. *
  230. * If there are parallel port device drivers in the system that
  231. * have registered themselves using parport_register_driver(),
  232. * they are not told about the port at this time; that is done by
  233. * parport_announce_port().
  234. *
  235. * The @ops structure is allocated by the caller, and must not be
  236. * deallocated before calling parport_remove_port().
  237. *
  238. * If there is no memory to allocate a new parport structure,
  239. * this function will return %NULL.
  240. **/
  241. struct parport *parport_register_port(unsigned long base, int irq, int dma,
  242. struct parport_operations *ops)
  243. {
  244. struct list_head *l;
  245. struct parport *tmp;
  246. int num;
  247. int device;
  248. char *name;
  249. tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
  250. if (!tmp) {
  251. printk(KERN_WARNING "parport: memory squeeze\n");
  252. return NULL;
  253. }
  254. /* Init our structure */
  255. memset(tmp, 0, sizeof(struct parport));
  256. tmp->base = base;
  257. tmp->irq = irq;
  258. tmp->dma = dma;
  259. tmp->muxport = tmp->daisy = tmp->muxsel = -1;
  260. tmp->modes = 0;
  261. INIT_LIST_HEAD(&tmp->list);
  262. tmp->devices = tmp->cad = NULL;
  263. tmp->flags = 0;
  264. tmp->ops = ops;
  265. tmp->physport = tmp;
  266. memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
  267. rwlock_init(&tmp->cad_lock);
  268. spin_lock_init(&tmp->waitlist_lock);
  269. spin_lock_init(&tmp->pardevice_lock);
  270. tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
  271. tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  272. init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
  273. tmp->spintime = parport_default_spintime;
  274. atomic_set (&tmp->ref_count, 1);
  275. INIT_LIST_HEAD(&tmp->full_list);
  276. name = kmalloc(15, GFP_KERNEL);
  277. if (!name) {
  278. printk(KERN_ERR "parport: memory squeeze\n");
  279. kfree(tmp);
  280. return NULL;
  281. }
  282. /* Search for the lowest free parport number. */
  283. spin_lock(&full_list_lock);
  284. for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
  285. struct parport *p = list_entry(l, struct parport, full_list);
  286. if (p->number != num)
  287. break;
  288. }
  289. tmp->portnum = tmp->number = num;
  290. list_add_tail(&tmp->full_list, l);
  291. spin_unlock(&full_list_lock);
  292. /*
  293. * Now that the portnum is known finish doing the Init.
  294. */
  295. sprintf(name, "parport%d", tmp->portnum = tmp->number);
  296. tmp->name = name;
  297. for (device = 0; device < 5; device++)
  298. /* assume the worst */
  299. tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
  300. tmp->waithead = tmp->waittail = NULL;
  301. return tmp;
  302. }
  303. /**
  304. * parport_announce_port - tell device drivers about a parallel port
  305. * @port: parallel port to announce
  306. *
  307. * After a port driver has registered a parallel port with
  308. * parport_register_port, and performed any necessary
  309. * initialisation or adjustments, it should call
  310. * parport_announce_port() in order to notify all device drivers
  311. * that have called parport_register_driver(). Their attach()
  312. * functions will be called, with @port as the parameter.
  313. **/
  314. void parport_announce_port (struct parport *port)
  315. {
  316. int i;
  317. #ifdef CONFIG_PARPORT_1284
  318. /* Analyse the IEEE1284.3 topology of the port. */
  319. parport_daisy_init(port);
  320. #endif
  321. parport_proc_register(port);
  322. down(&registration_lock);
  323. spin_lock_irq(&parportlist_lock);
  324. list_add_tail(&port->list, &portlist);
  325. for (i = 1; i < 3; i++) {
  326. struct parport *slave = port->slaves[i-1];
  327. if (slave)
  328. list_add_tail(&slave->list, &portlist);
  329. }
  330. spin_unlock_irq(&parportlist_lock);
  331. /* Let drivers know that new port(s) has arrived. */
  332. attach_driver_chain (port);
  333. for (i = 1; i < 3; i++) {
  334. struct parport *slave = port->slaves[i-1];
  335. if (slave)
  336. attach_driver_chain(slave);
  337. }
  338. up(&registration_lock);
  339. }
  340. /**
  341. * parport_remove_port - deregister a parallel port
  342. * @port: parallel port to deregister
  343. *
  344. * When a parallel port driver is forcibly unloaded, or a
  345. * parallel port becomes inaccessible, the port driver must call
  346. * this function in order to deal with device drivers that still
  347. * want to use it.
  348. *
  349. * The parport structure associated with the port has its
  350. * operations structure replaced with one containing 'null'
  351. * operations that return errors or just don't do anything.
  352. *
  353. * Any drivers that have registered themselves using
  354. * parport_register_driver() are notified that the port is no
  355. * longer accessible by having their detach() routines called
  356. * with @port as the parameter.
  357. **/
  358. void parport_remove_port(struct parport *port)
  359. {
  360. int i;
  361. down(&registration_lock);
  362. /* Spread the word. */
  363. detach_driver_chain (port);
  364. #ifdef CONFIG_PARPORT_1284
  365. /* Forget the IEEE1284.3 topology of the port. */
  366. parport_daisy_fini(port);
  367. for (i = 1; i < 3; i++) {
  368. struct parport *slave = port->slaves[i-1];
  369. if (!slave)
  370. continue;
  371. detach_driver_chain(slave);
  372. parport_daisy_fini(slave);
  373. }
  374. #endif
  375. port->ops = &dead_ops;
  376. spin_lock(&parportlist_lock);
  377. list_del_init(&port->list);
  378. for (i = 1; i < 3; i++) {
  379. struct parport *slave = port->slaves[i-1];
  380. if (slave)
  381. list_del_init(&slave->list);
  382. }
  383. spin_unlock(&parportlist_lock);
  384. up(&registration_lock);
  385. parport_proc_unregister(port);
  386. for (i = 1; i < 3; i++) {
  387. struct parport *slave = port->slaves[i-1];
  388. if (slave)
  389. parport_put_port(slave);
  390. }
  391. }
  392. /**
  393. * parport_register_device - register a device on a parallel port
  394. * @port: port to which the device is attached
  395. * @name: a name to refer to the device
  396. * @pf: preemption callback
  397. * @kf: kick callback (wake-up)
  398. * @irq_func: interrupt handler
  399. * @flags: registration flags
  400. * @handle: data for callback functions
  401. *
  402. * This function, called by parallel port device drivers,
  403. * declares that a device is connected to a port, and tells the
  404. * system all it needs to know.
  405. *
  406. * The @name is allocated by the caller and must not be
  407. * deallocated until the caller calls @parport_unregister_device
  408. * for that device.
  409. *
  410. * The preemption callback function, @pf, is called when this
  411. * device driver has claimed access to the port but another
  412. * device driver wants to use it. It is given @handle as its
  413. * parameter, and should return zero if it is willing for the
  414. * system to release the port to another driver on its behalf.
  415. * If it wants to keep control of the port it should return
  416. * non-zero, and no action will be taken. It is good manners for
  417. * the driver to try to release the port at the earliest
  418. * opportunity after its preemption callback rejects a preemption
  419. * attempt. Note that if a preemption callback is happy for
  420. * preemption to go ahead, there is no need to release the port;
  421. * it is done automatically. This function may not block, as it
  422. * may be called from interrupt context. If the device driver
  423. * does not support preemption, @pf can be %NULL.
  424. *
  425. * The wake-up ("kick") callback function, @kf, is called when
  426. * the port is available to be claimed for exclusive access; that
  427. * is, parport_claim() is guaranteed to succeed when called from
  428. * inside the wake-up callback function. If the driver wants to
  429. * claim the port it should do so; otherwise, it need not take
  430. * any action. This function may not block, as it may be called
  431. * from interrupt context. If the device driver does not want to
  432. * be explicitly invited to claim the port in this way, @kf can
  433. * be %NULL.
  434. *
  435. * The interrupt handler, @irq_func, is called when an interrupt
  436. * arrives from the parallel port. Note that if a device driver
  437. * wants to use interrupts it should use parport_enable_irq(),
  438. * and can also check the irq member of the parport structure
  439. * representing the port.
  440. *
  441. * The parallel port (lowlevel) driver is the one that has called
  442. * request_irq() and whose interrupt handler is called first.
  443. * This handler does whatever needs to be done to the hardware to
  444. * acknowledge the interrupt (for PC-style ports there is nothing
  445. * special to be done). It then tells the IEEE 1284 code about
  446. * the interrupt, which may involve reacting to an IEEE 1284
  447. * event depending on the current IEEE 1284 phase. After this,
  448. * it calls @irq_func. Needless to say, @irq_func will be called
  449. * from interrupt context, and may not block.
  450. *
  451. * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
  452. * so should only be used when sharing the port with other device
  453. * drivers is impossible and would lead to incorrect behaviour.
  454. * Use it sparingly! Normally, @flags will be zero.
  455. *
  456. * This function returns a pointer to a structure that represents
  457. * the device on the port, or %NULL if there is not enough memory
  458. * to allocate space for that structure.
  459. **/
  460. struct pardevice *
  461. parport_register_device(struct parport *port, const char *name,
  462. int (*pf)(void *), void (*kf)(void *),
  463. void (*irq_func)(int, void *, struct pt_regs *),
  464. int flags, void *handle)
  465. {
  466. struct pardevice *tmp;
  467. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  468. /* An exclusive device is registered. */
  469. printk (KERN_DEBUG "%s: no more devices allowed\n",
  470. port->name);
  471. return NULL;
  472. }
  473. if (flags & PARPORT_DEV_LURK) {
  474. if (!pf || !kf) {
  475. printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
  476. return NULL;
  477. }
  478. }
  479. /* We up our own module reference count, and that of the port
  480. on which a device is to be registered, to ensure that
  481. neither of us gets unloaded while we sleep in (e.g.)
  482. kmalloc.
  483. */
  484. if (!try_module_get(port->ops->owner)) {
  485. return NULL;
  486. }
  487. parport_get_port (port);
  488. tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
  489. if (tmp == NULL) {
  490. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  491. goto out;
  492. }
  493. tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
  494. if (tmp->state == NULL) {
  495. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  496. goto out_free_pardevice;
  497. }
  498. tmp->name = name;
  499. tmp->port = port;
  500. tmp->daisy = -1;
  501. tmp->preempt = pf;
  502. tmp->wakeup = kf;
  503. tmp->private = handle;
  504. tmp->flags = flags;
  505. tmp->irq_func = irq_func;
  506. tmp->waiting = 0;
  507. tmp->timeout = 5 * HZ;
  508. /* Chain this onto the list */
  509. tmp->prev = NULL;
  510. /*
  511. * This function must not run from an irq handler so we don' t need
  512. * to clear irq on the local CPU. -arca
  513. */
  514. spin_lock(&port->physport->pardevice_lock);
  515. if (flags & PARPORT_DEV_EXCL) {
  516. if (port->physport->devices) {
  517. spin_unlock (&port->physport->pardevice_lock);
  518. printk (KERN_DEBUG
  519. "%s: cannot grant exclusive access for "
  520. "device %s\n", port->name, name);
  521. goto out_free_all;
  522. }
  523. port->flags |= PARPORT_FLAG_EXCL;
  524. }
  525. tmp->next = port->physport->devices;
  526. wmb(); /* Make sure that tmp->next is written before it's
  527. added to the list; see comments marked 'no locking
  528. required' */
  529. if (port->physport->devices)
  530. port->physport->devices->prev = tmp;
  531. port->physport->devices = tmp;
  532. spin_unlock(&port->physport->pardevice_lock);
  533. init_waitqueue_head(&tmp->wait_q);
  534. tmp->timeslice = parport_default_timeslice;
  535. tmp->waitnext = tmp->waitprev = NULL;
  536. /*
  537. * This has to be run as last thing since init_state may need other
  538. * pardevice fields. -arca
  539. */
  540. port->ops->init_state(tmp, tmp->state);
  541. parport_device_proc_register(tmp);
  542. return tmp;
  543. out_free_all:
  544. kfree (tmp->state);
  545. out_free_pardevice:
  546. kfree (tmp);
  547. out:
  548. parport_put_port (port);
  549. module_put(port->ops->owner);
  550. return NULL;
  551. }
  552. /**
  553. * parport_unregister_device - deregister a device on a parallel port
  554. * @dev: pointer to structure representing device
  555. *
  556. * This undoes the effect of parport_register_device().
  557. **/
  558. void parport_unregister_device(struct pardevice *dev)
  559. {
  560. struct parport *port;
  561. #ifdef PARPORT_PARANOID
  562. if (dev == NULL) {
  563. printk(KERN_ERR "parport_unregister_device: passed NULL\n");
  564. return;
  565. }
  566. #endif
  567. parport_device_proc_unregister(dev);
  568. port = dev->port->physport;
  569. if (port->cad == dev) {
  570. printk(KERN_DEBUG "%s: %s forgot to release port\n",
  571. port->name, dev->name);
  572. parport_release (dev);
  573. }
  574. spin_lock(&port->pardevice_lock);
  575. if (dev->next)
  576. dev->next->prev = dev->prev;
  577. if (dev->prev)
  578. dev->prev->next = dev->next;
  579. else
  580. port->devices = dev->next;
  581. if (dev->flags & PARPORT_DEV_EXCL)
  582. port->flags &= ~PARPORT_FLAG_EXCL;
  583. spin_unlock(&port->pardevice_lock);
  584. /* Make sure we haven't left any pointers around in the wait
  585. * list. */
  586. spin_lock (&port->waitlist_lock);
  587. if (dev->waitprev || dev->waitnext || port->waithead == dev) {
  588. if (dev->waitprev)
  589. dev->waitprev->waitnext = dev->waitnext;
  590. else
  591. port->waithead = dev->waitnext;
  592. if (dev->waitnext)
  593. dev->waitnext->waitprev = dev->waitprev;
  594. else
  595. port->waittail = dev->waitprev;
  596. }
  597. spin_unlock (&port->waitlist_lock);
  598. kfree(dev->state);
  599. kfree(dev);
  600. module_put(port->ops->owner);
  601. parport_put_port (port);
  602. }
  603. /**
  604. * parport_find_number - find a parallel port by number
  605. * @number: parallel port number
  606. *
  607. * This returns the parallel port with the specified number, or
  608. * %NULL if there is none.
  609. *
  610. * There is an implicit parport_get_port() done already; to throw
  611. * away the reference to the port that parport_find_number()
  612. * gives you, use parport_put_port().
  613. */
  614. struct parport *parport_find_number (int number)
  615. {
  616. struct parport *port, *result = NULL;
  617. if (list_empty(&portlist))
  618. get_lowlevel_driver ();
  619. spin_lock (&parportlist_lock);
  620. list_for_each_entry(port, &portlist, list) {
  621. if (port->number == number) {
  622. result = parport_get_port (port);
  623. break;
  624. }
  625. }
  626. spin_unlock (&parportlist_lock);
  627. return result;
  628. }
  629. /**
  630. * parport_find_base - find a parallel port by base address
  631. * @base: base I/O address
  632. *
  633. * This returns the parallel port with the specified base
  634. * address, or %NULL if there is none.
  635. *
  636. * There is an implicit parport_get_port() done already; to throw
  637. * away the reference to the port that parport_find_base()
  638. * gives you, use parport_put_port().
  639. */
  640. struct parport *parport_find_base (unsigned long base)
  641. {
  642. struct parport *port, *result = NULL;
  643. if (list_empty(&portlist))
  644. get_lowlevel_driver ();
  645. spin_lock (&parportlist_lock);
  646. list_for_each_entry(port, &portlist, list) {
  647. if (port->base == base) {
  648. result = parport_get_port (port);
  649. break;
  650. }
  651. }
  652. spin_unlock (&parportlist_lock);
  653. return result;
  654. }
  655. /**
  656. * parport_claim - claim access to a parallel port device
  657. * @dev: pointer to structure representing a device on the port
  658. *
  659. * This function will not block and so can be used from interrupt
  660. * context. If parport_claim() succeeds in claiming access to
  661. * the port it returns zero and the port is available to use. It
  662. * may fail (returning non-zero) if the port is in use by another
  663. * driver and that driver is not willing to relinquish control of
  664. * the port.
  665. **/
  666. int parport_claim(struct pardevice *dev)
  667. {
  668. struct pardevice *oldcad;
  669. struct parport *port = dev->port->physport;
  670. unsigned long flags;
  671. if (port->cad == dev) {
  672. printk(KERN_INFO "%s: %s already owner\n",
  673. dev->port->name,dev->name);
  674. return 0;
  675. }
  676. /* Preempt any current device */
  677. write_lock_irqsave (&port->cad_lock, flags);
  678. if ((oldcad = port->cad) != NULL) {
  679. if (oldcad->preempt) {
  680. if (oldcad->preempt(oldcad->private))
  681. goto blocked;
  682. port->ops->save_state(port, dev->state);
  683. } else
  684. goto blocked;
  685. if (port->cad != oldcad) {
  686. /* I think we'll actually deadlock rather than
  687. get here, but just in case.. */
  688. printk(KERN_WARNING
  689. "%s: %s released port when preempted!\n",
  690. port->name, oldcad->name);
  691. if (port->cad)
  692. goto blocked;
  693. }
  694. }
  695. /* Can't fail from now on, so mark ourselves as no longer waiting. */
  696. if (dev->waiting & 1) {
  697. dev->waiting = 0;
  698. /* Take ourselves out of the wait list again. */
  699. spin_lock_irq (&port->waitlist_lock);
  700. if (dev->waitprev)
  701. dev->waitprev->waitnext = dev->waitnext;
  702. else
  703. port->waithead = dev->waitnext;
  704. if (dev->waitnext)
  705. dev->waitnext->waitprev = dev->waitprev;
  706. else
  707. port->waittail = dev->waitprev;
  708. spin_unlock_irq (&port->waitlist_lock);
  709. dev->waitprev = dev->waitnext = NULL;
  710. }
  711. /* Now we do the change of devices */
  712. port->cad = dev;
  713. #ifdef CONFIG_PARPORT_1284
  714. /* If it's a mux port, select it. */
  715. if (dev->port->muxport >= 0) {
  716. /* FIXME */
  717. port->muxsel = dev->port->muxport;
  718. }
  719. /* If it's a daisy chain device, select it. */
  720. if (dev->daisy >= 0) {
  721. /* This could be lazier. */
  722. if (!parport_daisy_select (port, dev->daisy,
  723. IEEE1284_MODE_COMPAT))
  724. port->daisy = dev->daisy;
  725. }
  726. #endif /* IEEE1284.3 support */
  727. /* Restore control registers */
  728. port->ops->restore_state(port, dev->state);
  729. write_unlock_irqrestore(&port->cad_lock, flags);
  730. dev->time = jiffies;
  731. return 0;
  732. blocked:
  733. /* If this is the first time we tried to claim the port, register an
  734. interest. This is only allowed for devices sleeping in
  735. parport_claim_or_block(), or those with a wakeup function. */
  736. /* The cad_lock is still held for writing here */
  737. if (dev->waiting & 2 || dev->wakeup) {
  738. spin_lock (&port->waitlist_lock);
  739. if (test_and_set_bit(0, &dev->waiting) == 0) {
  740. /* First add ourselves to the end of the wait list. */
  741. dev->waitnext = NULL;
  742. dev->waitprev = port->waittail;
  743. if (port->waittail) {
  744. port->waittail->waitnext = dev;
  745. port->waittail = dev;
  746. } else
  747. port->waithead = port->waittail = dev;
  748. }
  749. spin_unlock (&port->waitlist_lock);
  750. }
  751. write_unlock_irqrestore (&port->cad_lock, flags);
  752. return -EAGAIN;
  753. }
  754. /**
  755. * parport_claim_or_block - claim access to a parallel port device
  756. * @dev: pointer to structure representing a device on the port
  757. *
  758. * This behaves like parport_claim(), but will block if necessary
  759. * to wait for the port to be free. A return value of 1
  760. * indicates that it slept; 0 means that it succeeded without
  761. * needing to sleep. A negative error code indicates failure.
  762. **/
  763. int parport_claim_or_block(struct pardevice *dev)
  764. {
  765. int r;
  766. /* Signal to parport_claim() that we can wait even without a
  767. wakeup function. */
  768. dev->waiting = 2;
  769. /* Try to claim the port. If this fails, we need to sleep. */
  770. r = parport_claim(dev);
  771. if (r == -EAGAIN) {
  772. #ifdef PARPORT_DEBUG_SHARING
  773. printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
  774. #endif
  775. /*
  776. * FIXME!!! Use the proper locking for dev->waiting,
  777. * and make this use the "wait_event_interruptible()"
  778. * interfaces. The cli/sti that used to be here
  779. * did nothing.
  780. *
  781. * See also parport_release()
  782. */
  783. /* If dev->waiting is clear now, an interrupt
  784. gave us the port and we would deadlock if we slept. */
  785. if (dev->waiting) {
  786. interruptible_sleep_on (&dev->wait_q);
  787. if (signal_pending (current)) {
  788. return -EINTR;
  789. }
  790. r = 1;
  791. } else {
  792. r = 0;
  793. #ifdef PARPORT_DEBUG_SHARING
  794. printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
  795. dev->name);
  796. #endif
  797. }
  798. #ifdef PARPORT_DEBUG_SHARING
  799. if (dev->port->physport->cad != dev)
  800. printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
  801. "but %s owns port!\n", dev->name,
  802. dev->port->physport->cad ?
  803. dev->port->physport->cad->name:"nobody");
  804. #endif
  805. }
  806. dev->waiting = 0;
  807. return r;
  808. }
  809. /**
  810. * parport_release - give up access to a parallel port device
  811. * @dev: pointer to structure representing parallel port device
  812. *
  813. * This function cannot fail, but it should not be called without
  814. * the port claimed. Similarly, if the port is already claimed
  815. * you should not try claiming it again.
  816. **/
  817. void parport_release(struct pardevice *dev)
  818. {
  819. struct parport *port = dev->port->physport;
  820. struct pardevice *pd;
  821. unsigned long flags;
  822. /* Make sure that dev is the current device */
  823. write_lock_irqsave(&port->cad_lock, flags);
  824. if (port->cad != dev) {
  825. write_unlock_irqrestore (&port->cad_lock, flags);
  826. printk(KERN_WARNING "%s: %s tried to release parport "
  827. "when not owner\n", port->name, dev->name);
  828. return;
  829. }
  830. #ifdef CONFIG_PARPORT_1284
  831. /* If this is on a mux port, deselect it. */
  832. if (dev->port->muxport >= 0) {
  833. /* FIXME */
  834. port->muxsel = -1;
  835. }
  836. /* If this is a daisy device, deselect it. */
  837. if (dev->daisy >= 0) {
  838. parport_daisy_deselect_all (port);
  839. port->daisy = -1;
  840. }
  841. #endif
  842. port->cad = NULL;
  843. write_unlock_irqrestore(&port->cad_lock, flags);
  844. /* Save control registers */
  845. port->ops->save_state(port, dev->state);
  846. /* If anybody is waiting, find out who's been there longest and
  847. then wake them up. (Note: no locking required) */
  848. /* !!! LOCKING IS NEEDED HERE */
  849. for (pd = port->waithead; pd; pd = pd->waitnext) {
  850. if (pd->waiting & 2) { /* sleeping in claim_or_block */
  851. parport_claim(pd);
  852. if (waitqueue_active(&pd->wait_q))
  853. wake_up_interruptible(&pd->wait_q);
  854. return;
  855. } else if (pd->wakeup) {
  856. pd->wakeup(pd->private);
  857. if (dev->port->cad) /* racy but no matter */
  858. return;
  859. } else {
  860. printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
  861. }
  862. }
  863. /* Nobody was waiting, so walk the list to see if anyone is
  864. interested in being woken up. (Note: no locking required) */
  865. /* !!! LOCKING IS NEEDED HERE */
  866. for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
  867. if (pd->wakeup && pd != dev)
  868. pd->wakeup(pd->private);
  869. }
  870. }
  871. /* Exported symbols for modules. */
  872. EXPORT_SYMBOL(parport_claim);
  873. EXPORT_SYMBOL(parport_claim_or_block);
  874. EXPORT_SYMBOL(parport_release);
  875. EXPORT_SYMBOL(parport_register_port);
  876. EXPORT_SYMBOL(parport_announce_port);
  877. EXPORT_SYMBOL(parport_remove_port);
  878. EXPORT_SYMBOL(parport_register_driver);
  879. EXPORT_SYMBOL(parport_unregister_driver);
  880. EXPORT_SYMBOL(parport_register_device);
  881. EXPORT_SYMBOL(parport_unregister_device);
  882. EXPORT_SYMBOL(parport_put_port);
  883. EXPORT_SYMBOL(parport_find_number);
  884. EXPORT_SYMBOL(parport_find_base);
  885. MODULE_LICENSE("GPL");