share.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
  2. * Parallel-port resource manager code.
  3. *
  4. * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
  5. * Tim Waugh <tim@cyberelk.demon.co.uk>
  6. * Jose Renau <renau@acm.org>
  7. * Philip Blundell <philb@gnu.org>
  8. * Andrea Arcangeli
  9. *
  10. * based on work by Grant Guenther <grant@torque.net>
  11. * and Philip Blundell
  12. *
  13. * Any part of this program may be used in documents licensed under
  14. * the GNU Free Documentation License, Version 1.1 or any later version
  15. * published by the Free Software Foundation.
  16. */
  17. #undef PARPORT_DEBUG_SHARING /* undef for production */
  18. #include <linux/config.h>
  19. #include <linux/module.h>
  20. #include <linux/string.h>
  21. #include <linux/threads.h>
  22. #include <linux/parport.h>
  23. #include <linux/delay.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/ioport.h>
  27. #include <linux/kernel.h>
  28. #include <linux/slab.h>
  29. #include <linux/sched.h>
  30. #include <linux/kmod.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/mutex.h>
  33. #include <asm/irq.h>
  34. #undef PARPORT_PARANOID
  35. #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
  36. unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
  37. int parport_default_spintime = DEFAULT_SPIN_TIME;
  38. static LIST_HEAD(portlist);
  39. static DEFINE_SPINLOCK(parportlist_lock);
  40. /* list of all allocated ports, sorted by ->number */
  41. static LIST_HEAD(all_ports);
  42. static DEFINE_SPINLOCK(full_list_lock);
  43. static LIST_HEAD(drivers);
  44. static DEFINE_MUTEX(registration_lock);
  45. /* What you can do to a port that's gone away.. */
  46. static void dead_write_lines (struct parport *p, unsigned char b){}
  47. static unsigned char dead_read_lines (struct parport *p) { return 0; }
  48. static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
  49. unsigned char c) { return 0; }
  50. static void dead_onearg (struct parport *p){}
  51. static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
  52. static void dead_state (struct parport *p, struct parport_state *s) { }
  53. static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
  54. { return 0; }
  55. static size_t dead_read (struct parport *p, void *b, size_t l, int f)
  56. { return 0; }
  57. static struct parport_operations dead_ops = {
  58. .write_data = dead_write_lines, /* data */
  59. .read_data = dead_read_lines,
  60. .write_control = dead_write_lines, /* control */
  61. .read_control = dead_read_lines,
  62. .frob_control = dead_frob_lines,
  63. .read_status = dead_read_lines, /* status */
  64. .enable_irq = dead_onearg, /* enable_irq */
  65. .disable_irq = dead_onearg, /* disable_irq */
  66. .data_forward = dead_onearg, /* data_forward */
  67. .data_reverse = dead_onearg, /* data_reverse */
  68. .init_state = dead_initstate, /* init_state */
  69. .save_state = dead_state,
  70. .restore_state = dead_state,
  71. .epp_write_data = dead_write, /* epp */
  72. .epp_read_data = dead_read,
  73. .epp_write_addr = dead_write,
  74. .epp_read_addr = dead_read,
  75. .ecp_write_data = dead_write, /* ecp */
  76. .ecp_read_data = dead_read,
  77. .ecp_write_addr = dead_write,
  78. .compat_write_data = dead_write, /* compat */
  79. .nibble_read_data = dead_read, /* nibble */
  80. .byte_read_data = dead_read, /* byte */
  81. .owner = NULL,
  82. };
  83. /* Call attach(port) for each registered driver. */
  84. static void attach_driver_chain(struct parport *port)
  85. {
  86. /* caller has exclusive registration_lock */
  87. struct parport_driver *drv;
  88. list_for_each_entry(drv, &drivers, list)
  89. drv->attach(port);
  90. }
  91. /* Call detach(port) for each registered driver. */
  92. static void detach_driver_chain(struct parport *port)
  93. {
  94. struct parport_driver *drv;
  95. /* caller has exclusive registration_lock */
  96. list_for_each_entry(drv, &drivers, list)
  97. drv->detach (port);
  98. }
  99. /* Ask kmod for some lowlevel drivers. */
  100. static void get_lowlevel_driver (void)
  101. {
  102. /* There is no actual module called this: you should set
  103. * up an alias for modutils. */
  104. request_module ("parport_lowlevel");
  105. }
  106. /**
  107. * parport_register_driver - register a parallel port device driver
  108. * @drv: structure describing the driver
  109. *
  110. * This can be called by a parallel port device driver in order
  111. * to receive notifications about ports being found in the
  112. * system, as well as ports no longer available.
  113. *
  114. * The @drv structure is allocated by the caller and must not be
  115. * deallocated until after calling parport_unregister_driver().
  116. *
  117. * The driver's attach() function may block. The port that
  118. * attach() is given will be valid for the duration of the
  119. * callback, but if the driver wants to take a copy of the
  120. * pointer it must call parport_get_port() to do so. Calling
  121. * parport_register_device() on that port will do this for you.
  122. *
  123. * The driver's detach() function may block. The port that
  124. * detach() is given will be valid for the duration of the
  125. * callback, but if the driver wants to take a copy of the
  126. * pointer it must call parport_get_port() to do so.
  127. *
  128. * Returns 0 on success. Currently it always succeeds.
  129. **/
  130. int parport_register_driver (struct parport_driver *drv)
  131. {
  132. struct parport *port;
  133. if (list_empty(&portlist))
  134. get_lowlevel_driver ();
  135. mutex_lock(&registration_lock);
  136. list_for_each_entry(port, &portlist, list)
  137. drv->attach(port);
  138. list_add(&drv->list, &drivers);
  139. mutex_unlock(&registration_lock);
  140. return 0;
  141. }
  142. /**
  143. * parport_unregister_driver - deregister a parallel port device driver
  144. * @drv: structure describing the driver that was given to
  145. * parport_register_driver()
  146. *
  147. * This should be called by a parallel port device driver that
  148. * has registered itself using parport_register_driver() when it
  149. * is about to be unloaded.
  150. *
  151. * When it returns, the driver's attach() routine will no longer
  152. * be called, and for each port that attach() was called for, the
  153. * detach() routine will have been called.
  154. *
  155. * All the driver's attach() and detach() calls are guaranteed to have
  156. * finished by the time this function returns.
  157. **/
  158. void parport_unregister_driver (struct parport_driver *drv)
  159. {
  160. struct parport *port;
  161. mutex_lock(&registration_lock);
  162. list_del_init(&drv->list);
  163. list_for_each_entry(port, &portlist, list)
  164. drv->detach(port);
  165. mutex_unlock(&registration_lock);
  166. }
  167. static void free_port (struct parport *port)
  168. {
  169. int d;
  170. spin_lock(&full_list_lock);
  171. list_del(&port->full_list);
  172. spin_unlock(&full_list_lock);
  173. for (d = 0; d < 5; d++) {
  174. kfree(port->probe_info[d].class_name);
  175. kfree(port->probe_info[d].mfr);
  176. kfree(port->probe_info[d].model);
  177. kfree(port->probe_info[d].cmdset);
  178. kfree(port->probe_info[d].description);
  179. }
  180. kfree(port->name);
  181. kfree(port);
  182. }
  183. /**
  184. * parport_get_port - increment a port's reference count
  185. * @port: the port
  186. *
  187. * This ensure's that a struct parport pointer remains valid
  188. * until the matching parport_put_port() call.
  189. **/
  190. struct parport *parport_get_port (struct parport *port)
  191. {
  192. atomic_inc (&port->ref_count);
  193. return port;
  194. }
  195. /**
  196. * parport_put_port - decrement a port's reference count
  197. * @port: the port
  198. *
  199. * This should be called once for each call to parport_get_port(),
  200. * once the port is no longer needed.
  201. **/
  202. void parport_put_port (struct parport *port)
  203. {
  204. if (atomic_dec_and_test (&port->ref_count))
  205. /* Can destroy it now. */
  206. free_port (port);
  207. return;
  208. }
  209. /**
  210. * parport_register_port - register a parallel port
  211. * @base: base I/O address
  212. * @irq: IRQ line
  213. * @dma: DMA channel
  214. * @ops: pointer to the port driver's port operations structure
  215. *
  216. * When a parallel port (lowlevel) driver finds a port that
  217. * should be made available to parallel port device drivers, it
  218. * should call parport_register_port(). The @base, @irq, and
  219. * @dma parameters are for the convenience of port drivers, and
  220. * for ports where they aren't meaningful needn't be set to
  221. * anything special. They can be altered afterwards by adjusting
  222. * the relevant members of the parport structure that is returned
  223. * and represents the port. They should not be tampered with
  224. * after calling parport_announce_port, however.
  225. *
  226. * If there are parallel port device drivers in the system that
  227. * have registered themselves using parport_register_driver(),
  228. * they are not told about the port at this time; that is done by
  229. * parport_announce_port().
  230. *
  231. * The @ops structure is allocated by the caller, and must not be
  232. * deallocated before calling parport_remove_port().
  233. *
  234. * If there is no memory to allocate a new parport structure,
  235. * this function will return %NULL.
  236. **/
  237. struct parport *parport_register_port(unsigned long base, int irq, int dma,
  238. struct parport_operations *ops)
  239. {
  240. struct list_head *l;
  241. struct parport *tmp;
  242. int num;
  243. int device;
  244. char *name;
  245. tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
  246. if (!tmp) {
  247. printk(KERN_WARNING "parport: memory squeeze\n");
  248. return NULL;
  249. }
  250. /* Init our structure */
  251. memset(tmp, 0, sizeof(struct parport));
  252. tmp->base = base;
  253. tmp->irq = irq;
  254. tmp->dma = dma;
  255. tmp->muxport = tmp->daisy = tmp->muxsel = -1;
  256. tmp->modes = 0;
  257. INIT_LIST_HEAD(&tmp->list);
  258. tmp->devices = tmp->cad = NULL;
  259. tmp->flags = 0;
  260. tmp->ops = ops;
  261. tmp->physport = tmp;
  262. memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
  263. rwlock_init(&tmp->cad_lock);
  264. spin_lock_init(&tmp->waitlist_lock);
  265. spin_lock_init(&tmp->pardevice_lock);
  266. tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
  267. tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  268. init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
  269. tmp->spintime = parport_default_spintime;
  270. atomic_set (&tmp->ref_count, 1);
  271. INIT_LIST_HEAD(&tmp->full_list);
  272. name = kmalloc(15, GFP_KERNEL);
  273. if (!name) {
  274. printk(KERN_ERR "parport: memory squeeze\n");
  275. kfree(tmp);
  276. return NULL;
  277. }
  278. /* Search for the lowest free parport number. */
  279. spin_lock(&full_list_lock);
  280. for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
  281. struct parport *p = list_entry(l, struct parport, full_list);
  282. if (p->number != num)
  283. break;
  284. }
  285. tmp->portnum = tmp->number = num;
  286. list_add_tail(&tmp->full_list, l);
  287. spin_unlock(&full_list_lock);
  288. /*
  289. * Now that the portnum is known finish doing the Init.
  290. */
  291. sprintf(name, "parport%d", tmp->portnum = tmp->number);
  292. tmp->name = name;
  293. for (device = 0; device < 5; device++)
  294. /* assume the worst */
  295. tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
  296. tmp->waithead = tmp->waittail = NULL;
  297. return tmp;
  298. }
  299. /**
  300. * parport_announce_port - tell device drivers about a parallel port
  301. * @port: parallel port to announce
  302. *
  303. * After a port driver has registered a parallel port with
  304. * parport_register_port, and performed any necessary
  305. * initialisation or adjustments, it should call
  306. * parport_announce_port() in order to notify all device drivers
  307. * that have called parport_register_driver(). Their attach()
  308. * functions will be called, with @port as the parameter.
  309. **/
  310. void parport_announce_port (struct parport *port)
  311. {
  312. int i;
  313. #ifdef CONFIG_PARPORT_1284
  314. /* Analyse the IEEE1284.3 topology of the port. */
  315. parport_daisy_init(port);
  316. #endif
  317. parport_proc_register(port);
  318. mutex_lock(&registration_lock);
  319. spin_lock_irq(&parportlist_lock);
  320. list_add_tail(&port->list, &portlist);
  321. for (i = 1; i < 3; i++) {
  322. struct parport *slave = port->slaves[i-1];
  323. if (slave)
  324. list_add_tail(&slave->list, &portlist);
  325. }
  326. spin_unlock_irq(&parportlist_lock);
  327. /* Let drivers know that new port(s) has arrived. */
  328. attach_driver_chain (port);
  329. for (i = 1; i < 3; i++) {
  330. struct parport *slave = port->slaves[i-1];
  331. if (slave)
  332. attach_driver_chain(slave);
  333. }
  334. mutex_unlock(&registration_lock);
  335. }
  336. /**
  337. * parport_remove_port - deregister a parallel port
  338. * @port: parallel port to deregister
  339. *
  340. * When a parallel port driver is forcibly unloaded, or a
  341. * parallel port becomes inaccessible, the port driver must call
  342. * this function in order to deal with device drivers that still
  343. * want to use it.
  344. *
  345. * The parport structure associated with the port has its
  346. * operations structure replaced with one containing 'null'
  347. * operations that return errors or just don't do anything.
  348. *
  349. * Any drivers that have registered themselves using
  350. * parport_register_driver() are notified that the port is no
  351. * longer accessible by having their detach() routines called
  352. * with @port as the parameter.
  353. **/
  354. void parport_remove_port(struct parport *port)
  355. {
  356. int i;
  357. mutex_lock(&registration_lock);
  358. /* Spread the word. */
  359. detach_driver_chain (port);
  360. #ifdef CONFIG_PARPORT_1284
  361. /* Forget the IEEE1284.3 topology of the port. */
  362. parport_daisy_fini(port);
  363. for (i = 1; i < 3; i++) {
  364. struct parport *slave = port->slaves[i-1];
  365. if (!slave)
  366. continue;
  367. detach_driver_chain(slave);
  368. parport_daisy_fini(slave);
  369. }
  370. #endif
  371. port->ops = &dead_ops;
  372. spin_lock(&parportlist_lock);
  373. list_del_init(&port->list);
  374. for (i = 1; i < 3; i++) {
  375. struct parport *slave = port->slaves[i-1];
  376. if (slave)
  377. list_del_init(&slave->list);
  378. }
  379. spin_unlock(&parportlist_lock);
  380. mutex_unlock(&registration_lock);
  381. parport_proc_unregister(port);
  382. for (i = 1; i < 3; i++) {
  383. struct parport *slave = port->slaves[i-1];
  384. if (slave)
  385. parport_put_port(slave);
  386. }
  387. }
  388. /**
  389. * parport_register_device - register a device on a parallel port
  390. * @port: port to which the device is attached
  391. * @name: a name to refer to the device
  392. * @pf: preemption callback
  393. * @kf: kick callback (wake-up)
  394. * @irq_func: interrupt handler
  395. * @flags: registration flags
  396. * @handle: data for callback functions
  397. *
  398. * This function, called by parallel port device drivers,
  399. * declares that a device is connected to a port, and tells the
  400. * system all it needs to know.
  401. *
  402. * The @name is allocated by the caller and must not be
  403. * deallocated until the caller calls @parport_unregister_device
  404. * for that device.
  405. *
  406. * The preemption callback function, @pf, is called when this
  407. * device driver has claimed access to the port but another
  408. * device driver wants to use it. It is given @handle as its
  409. * parameter, and should return zero if it is willing for the
  410. * system to release the port to another driver on its behalf.
  411. * If it wants to keep control of the port it should return
  412. * non-zero, and no action will be taken. It is good manners for
  413. * the driver to try to release the port at the earliest
  414. * opportunity after its preemption callback rejects a preemption
  415. * attempt. Note that if a preemption callback is happy for
  416. * preemption to go ahead, there is no need to release the port;
  417. * it is done automatically. This function may not block, as it
  418. * may be called from interrupt context. If the device driver
  419. * does not support preemption, @pf can be %NULL.
  420. *
  421. * The wake-up ("kick") callback function, @kf, is called when
  422. * the port is available to be claimed for exclusive access; that
  423. * is, parport_claim() is guaranteed to succeed when called from
  424. * inside the wake-up callback function. If the driver wants to
  425. * claim the port it should do so; otherwise, it need not take
  426. * any action. This function may not block, as it may be called
  427. * from interrupt context. If the device driver does not want to
  428. * be explicitly invited to claim the port in this way, @kf can
  429. * be %NULL.
  430. *
  431. * The interrupt handler, @irq_func, is called when an interrupt
  432. * arrives from the parallel port. Note that if a device driver
  433. * wants to use interrupts it should use parport_enable_irq(),
  434. * and can also check the irq member of the parport structure
  435. * representing the port.
  436. *
  437. * The parallel port (lowlevel) driver is the one that has called
  438. * request_irq() and whose interrupt handler is called first.
  439. * This handler does whatever needs to be done to the hardware to
  440. * acknowledge the interrupt (for PC-style ports there is nothing
  441. * special to be done). It then tells the IEEE 1284 code about
  442. * the interrupt, which may involve reacting to an IEEE 1284
  443. * event depending on the current IEEE 1284 phase. After this,
  444. * it calls @irq_func. Needless to say, @irq_func will be called
  445. * from interrupt context, and may not block.
  446. *
  447. * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
  448. * so should only be used when sharing the port with other device
  449. * drivers is impossible and would lead to incorrect behaviour.
  450. * Use it sparingly! Normally, @flags will be zero.
  451. *
  452. * This function returns a pointer to a structure that represents
  453. * the device on the port, or %NULL if there is not enough memory
  454. * to allocate space for that structure.
  455. **/
  456. struct pardevice *
  457. parport_register_device(struct parport *port, const char *name,
  458. int (*pf)(void *), void (*kf)(void *),
  459. void (*irq_func)(int, void *, struct pt_regs *),
  460. int flags, void *handle)
  461. {
  462. struct pardevice *tmp;
  463. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  464. /* An exclusive device is registered. */
  465. printk (KERN_DEBUG "%s: no more devices allowed\n",
  466. port->name);
  467. return NULL;
  468. }
  469. if (flags & PARPORT_DEV_LURK) {
  470. if (!pf || !kf) {
  471. printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
  472. return NULL;
  473. }
  474. }
  475. /* We up our own module reference count, and that of the port
  476. on which a device is to be registered, to ensure that
  477. neither of us gets unloaded while we sleep in (e.g.)
  478. kmalloc.
  479. */
  480. if (!try_module_get(port->ops->owner)) {
  481. return NULL;
  482. }
  483. parport_get_port (port);
  484. tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
  485. if (tmp == NULL) {
  486. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  487. goto out;
  488. }
  489. tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
  490. if (tmp->state == NULL) {
  491. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  492. goto out_free_pardevice;
  493. }
  494. tmp->name = name;
  495. tmp->port = port;
  496. tmp->daisy = -1;
  497. tmp->preempt = pf;
  498. tmp->wakeup = kf;
  499. tmp->private = handle;
  500. tmp->flags = flags;
  501. tmp->irq_func = irq_func;
  502. tmp->waiting = 0;
  503. tmp->timeout = 5 * HZ;
  504. /* Chain this onto the list */
  505. tmp->prev = NULL;
  506. /*
  507. * This function must not run from an irq handler so we don' t need
  508. * to clear irq on the local CPU. -arca
  509. */
  510. spin_lock(&port->physport->pardevice_lock);
  511. if (flags & PARPORT_DEV_EXCL) {
  512. if (port->physport->devices) {
  513. spin_unlock (&port->physport->pardevice_lock);
  514. printk (KERN_DEBUG
  515. "%s: cannot grant exclusive access for "
  516. "device %s\n", port->name, name);
  517. goto out_free_all;
  518. }
  519. port->flags |= PARPORT_FLAG_EXCL;
  520. }
  521. tmp->next = port->physport->devices;
  522. wmb(); /* Make sure that tmp->next is written before it's
  523. added to the list; see comments marked 'no locking
  524. required' */
  525. if (port->physport->devices)
  526. port->physport->devices->prev = tmp;
  527. port->physport->devices = tmp;
  528. spin_unlock(&port->physport->pardevice_lock);
  529. init_waitqueue_head(&tmp->wait_q);
  530. tmp->timeslice = parport_default_timeslice;
  531. tmp->waitnext = tmp->waitprev = NULL;
  532. /*
  533. * This has to be run as last thing since init_state may need other
  534. * pardevice fields. -arca
  535. */
  536. port->ops->init_state(tmp, tmp->state);
  537. parport_device_proc_register(tmp);
  538. return tmp;
  539. out_free_all:
  540. kfree(tmp->state);
  541. out_free_pardevice:
  542. kfree(tmp);
  543. out:
  544. parport_put_port (port);
  545. module_put(port->ops->owner);
  546. return NULL;
  547. }
  548. /**
  549. * parport_unregister_device - deregister a device on a parallel port
  550. * @dev: pointer to structure representing device
  551. *
  552. * This undoes the effect of parport_register_device().
  553. **/
  554. void parport_unregister_device(struct pardevice *dev)
  555. {
  556. struct parport *port;
  557. #ifdef PARPORT_PARANOID
  558. if (dev == NULL) {
  559. printk(KERN_ERR "parport_unregister_device: passed NULL\n");
  560. return;
  561. }
  562. #endif
  563. parport_device_proc_unregister(dev);
  564. port = dev->port->physport;
  565. if (port->cad == dev) {
  566. printk(KERN_DEBUG "%s: %s forgot to release port\n",
  567. port->name, dev->name);
  568. parport_release (dev);
  569. }
  570. spin_lock(&port->pardevice_lock);
  571. if (dev->next)
  572. dev->next->prev = dev->prev;
  573. if (dev->prev)
  574. dev->prev->next = dev->next;
  575. else
  576. port->devices = dev->next;
  577. if (dev->flags & PARPORT_DEV_EXCL)
  578. port->flags &= ~PARPORT_FLAG_EXCL;
  579. spin_unlock(&port->pardevice_lock);
  580. /* Make sure we haven't left any pointers around in the wait
  581. * list. */
  582. spin_lock (&port->waitlist_lock);
  583. if (dev->waitprev || dev->waitnext || port->waithead == dev) {
  584. if (dev->waitprev)
  585. dev->waitprev->waitnext = dev->waitnext;
  586. else
  587. port->waithead = dev->waitnext;
  588. if (dev->waitnext)
  589. dev->waitnext->waitprev = dev->waitprev;
  590. else
  591. port->waittail = dev->waitprev;
  592. }
  593. spin_unlock (&port->waitlist_lock);
  594. kfree(dev->state);
  595. kfree(dev);
  596. module_put(port->ops->owner);
  597. parport_put_port (port);
  598. }
  599. /**
  600. * parport_find_number - find a parallel port by number
  601. * @number: parallel port number
  602. *
  603. * This returns the parallel port with the specified number, or
  604. * %NULL if there is none.
  605. *
  606. * There is an implicit parport_get_port() done already; to throw
  607. * away the reference to the port that parport_find_number()
  608. * gives you, use parport_put_port().
  609. */
  610. struct parport *parport_find_number (int number)
  611. {
  612. struct parport *port, *result = NULL;
  613. if (list_empty(&portlist))
  614. get_lowlevel_driver ();
  615. spin_lock (&parportlist_lock);
  616. list_for_each_entry(port, &portlist, list) {
  617. if (port->number == number) {
  618. result = parport_get_port (port);
  619. break;
  620. }
  621. }
  622. spin_unlock (&parportlist_lock);
  623. return result;
  624. }
  625. /**
  626. * parport_find_base - find a parallel port by base address
  627. * @base: base I/O address
  628. *
  629. * This returns the parallel port with the specified base
  630. * address, or %NULL if there is none.
  631. *
  632. * There is an implicit parport_get_port() done already; to throw
  633. * away the reference to the port that parport_find_base()
  634. * gives you, use parport_put_port().
  635. */
  636. struct parport *parport_find_base (unsigned long base)
  637. {
  638. struct parport *port, *result = NULL;
  639. if (list_empty(&portlist))
  640. get_lowlevel_driver ();
  641. spin_lock (&parportlist_lock);
  642. list_for_each_entry(port, &portlist, list) {
  643. if (port->base == base) {
  644. result = parport_get_port (port);
  645. break;
  646. }
  647. }
  648. spin_unlock (&parportlist_lock);
  649. return result;
  650. }
  651. /**
  652. * parport_claim - claim access to a parallel port device
  653. * @dev: pointer to structure representing a device on the port
  654. *
  655. * This function will not block and so can be used from interrupt
  656. * context. If parport_claim() succeeds in claiming access to
  657. * the port it returns zero and the port is available to use. It
  658. * may fail (returning non-zero) if the port is in use by another
  659. * driver and that driver is not willing to relinquish control of
  660. * the port.
  661. **/
  662. int parport_claim(struct pardevice *dev)
  663. {
  664. struct pardevice *oldcad;
  665. struct parport *port = dev->port->physport;
  666. unsigned long flags;
  667. if (port->cad == dev) {
  668. printk(KERN_INFO "%s: %s already owner\n",
  669. dev->port->name,dev->name);
  670. return 0;
  671. }
  672. /* Preempt any current device */
  673. write_lock_irqsave (&port->cad_lock, flags);
  674. if ((oldcad = port->cad) != NULL) {
  675. if (oldcad->preempt) {
  676. if (oldcad->preempt(oldcad->private))
  677. goto blocked;
  678. port->ops->save_state(port, dev->state);
  679. } else
  680. goto blocked;
  681. if (port->cad != oldcad) {
  682. /* I think we'll actually deadlock rather than
  683. get here, but just in case.. */
  684. printk(KERN_WARNING
  685. "%s: %s released port when preempted!\n",
  686. port->name, oldcad->name);
  687. if (port->cad)
  688. goto blocked;
  689. }
  690. }
  691. /* Can't fail from now on, so mark ourselves as no longer waiting. */
  692. if (dev->waiting & 1) {
  693. dev->waiting = 0;
  694. /* Take ourselves out of the wait list again. */
  695. spin_lock_irq (&port->waitlist_lock);
  696. if (dev->waitprev)
  697. dev->waitprev->waitnext = dev->waitnext;
  698. else
  699. port->waithead = dev->waitnext;
  700. if (dev->waitnext)
  701. dev->waitnext->waitprev = dev->waitprev;
  702. else
  703. port->waittail = dev->waitprev;
  704. spin_unlock_irq (&port->waitlist_lock);
  705. dev->waitprev = dev->waitnext = NULL;
  706. }
  707. /* Now we do the change of devices */
  708. port->cad = dev;
  709. #ifdef CONFIG_PARPORT_1284
  710. /* If it's a mux port, select it. */
  711. if (dev->port->muxport >= 0) {
  712. /* FIXME */
  713. port->muxsel = dev->port->muxport;
  714. }
  715. /* If it's a daisy chain device, select it. */
  716. if (dev->daisy >= 0) {
  717. /* This could be lazier. */
  718. if (!parport_daisy_select (port, dev->daisy,
  719. IEEE1284_MODE_COMPAT))
  720. port->daisy = dev->daisy;
  721. }
  722. #endif /* IEEE1284.3 support */
  723. /* Restore control registers */
  724. port->ops->restore_state(port, dev->state);
  725. write_unlock_irqrestore(&port->cad_lock, flags);
  726. dev->time = jiffies;
  727. return 0;
  728. blocked:
  729. /* If this is the first time we tried to claim the port, register an
  730. interest. This is only allowed for devices sleeping in
  731. parport_claim_or_block(), or those with a wakeup function. */
  732. /* The cad_lock is still held for writing here */
  733. if (dev->waiting & 2 || dev->wakeup) {
  734. spin_lock (&port->waitlist_lock);
  735. if (test_and_set_bit(0, &dev->waiting) == 0) {
  736. /* First add ourselves to the end of the wait list. */
  737. dev->waitnext = NULL;
  738. dev->waitprev = port->waittail;
  739. if (port->waittail) {
  740. port->waittail->waitnext = dev;
  741. port->waittail = dev;
  742. } else
  743. port->waithead = port->waittail = dev;
  744. }
  745. spin_unlock (&port->waitlist_lock);
  746. }
  747. write_unlock_irqrestore (&port->cad_lock, flags);
  748. return -EAGAIN;
  749. }
  750. /**
  751. * parport_claim_or_block - claim access to a parallel port device
  752. * @dev: pointer to structure representing a device on the port
  753. *
  754. * This behaves like parport_claim(), but will block if necessary
  755. * to wait for the port to be free. A return value of 1
  756. * indicates that it slept; 0 means that it succeeded without
  757. * needing to sleep. A negative error code indicates failure.
  758. **/
  759. int parport_claim_or_block(struct pardevice *dev)
  760. {
  761. int r;
  762. /* Signal to parport_claim() that we can wait even without a
  763. wakeup function. */
  764. dev->waiting = 2;
  765. /* Try to claim the port. If this fails, we need to sleep. */
  766. r = parport_claim(dev);
  767. if (r == -EAGAIN) {
  768. #ifdef PARPORT_DEBUG_SHARING
  769. printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
  770. #endif
  771. /*
  772. * FIXME!!! Use the proper locking for dev->waiting,
  773. * and make this use the "wait_event_interruptible()"
  774. * interfaces. The cli/sti that used to be here
  775. * did nothing.
  776. *
  777. * See also parport_release()
  778. */
  779. /* If dev->waiting is clear now, an interrupt
  780. gave us the port and we would deadlock if we slept. */
  781. if (dev->waiting) {
  782. interruptible_sleep_on (&dev->wait_q);
  783. if (signal_pending (current)) {
  784. return -EINTR;
  785. }
  786. r = 1;
  787. } else {
  788. r = 0;
  789. #ifdef PARPORT_DEBUG_SHARING
  790. printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
  791. dev->name);
  792. #endif
  793. }
  794. #ifdef PARPORT_DEBUG_SHARING
  795. if (dev->port->physport->cad != dev)
  796. printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
  797. "but %s owns port!\n", dev->name,
  798. dev->port->physport->cad ?
  799. dev->port->physport->cad->name:"nobody");
  800. #endif
  801. }
  802. dev->waiting = 0;
  803. return r;
  804. }
  805. /**
  806. * parport_release - give up access to a parallel port device
  807. * @dev: pointer to structure representing parallel port device
  808. *
  809. * This function cannot fail, but it should not be called without
  810. * the port claimed. Similarly, if the port is already claimed
  811. * you should not try claiming it again.
  812. **/
  813. void parport_release(struct pardevice *dev)
  814. {
  815. struct parport *port = dev->port->physport;
  816. struct pardevice *pd;
  817. unsigned long flags;
  818. /* Make sure that dev is the current device */
  819. write_lock_irqsave(&port->cad_lock, flags);
  820. if (port->cad != dev) {
  821. write_unlock_irqrestore (&port->cad_lock, flags);
  822. printk(KERN_WARNING "%s: %s tried to release parport "
  823. "when not owner\n", port->name, dev->name);
  824. return;
  825. }
  826. #ifdef CONFIG_PARPORT_1284
  827. /* If this is on a mux port, deselect it. */
  828. if (dev->port->muxport >= 0) {
  829. /* FIXME */
  830. port->muxsel = -1;
  831. }
  832. /* If this is a daisy device, deselect it. */
  833. if (dev->daisy >= 0) {
  834. parport_daisy_deselect_all (port);
  835. port->daisy = -1;
  836. }
  837. #endif
  838. port->cad = NULL;
  839. write_unlock_irqrestore(&port->cad_lock, flags);
  840. /* Save control registers */
  841. port->ops->save_state(port, dev->state);
  842. /* If anybody is waiting, find out who's been there longest and
  843. then wake them up. (Note: no locking required) */
  844. /* !!! LOCKING IS NEEDED HERE */
  845. for (pd = port->waithead; pd; pd = pd->waitnext) {
  846. if (pd->waiting & 2) { /* sleeping in claim_or_block */
  847. parport_claim(pd);
  848. if (waitqueue_active(&pd->wait_q))
  849. wake_up_interruptible(&pd->wait_q);
  850. return;
  851. } else if (pd->wakeup) {
  852. pd->wakeup(pd->private);
  853. if (dev->port->cad) /* racy but no matter */
  854. return;
  855. } else {
  856. printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
  857. }
  858. }
  859. /* Nobody was waiting, so walk the list to see if anyone is
  860. interested in being woken up. (Note: no locking required) */
  861. /* !!! LOCKING IS NEEDED HERE */
  862. for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
  863. if (pd->wakeup && pd != dev)
  864. pd->wakeup(pd->private);
  865. }
  866. }
  867. /* Exported symbols for modules. */
  868. EXPORT_SYMBOL(parport_claim);
  869. EXPORT_SYMBOL(parport_claim_or_block);
  870. EXPORT_SYMBOL(parport_release);
  871. EXPORT_SYMBOL(parport_register_port);
  872. EXPORT_SYMBOL(parport_announce_port);
  873. EXPORT_SYMBOL(parport_remove_port);
  874. EXPORT_SYMBOL(parport_register_driver);
  875. EXPORT_SYMBOL(parport_unregister_driver);
  876. EXPORT_SYMBOL(parport_register_device);
  877. EXPORT_SYMBOL(parport_unregister_device);
  878. EXPORT_SYMBOL(parport_get_port);
  879. EXPORT_SYMBOL(parport_put_port);
  880. EXPORT_SYMBOL(parport_find_number);
  881. EXPORT_SYMBOL(parport_find_base);
  882. MODULE_LICENSE("GPL");