omap_hdq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /*
  2. * drivers/w1/masters/omap_hdq.c
  3. *
  4. * Copyright (C) 2007 Texas Instruments, Inc.
  5. *
  6. * This file is licensed under the terms of the GNU General Public License
  7. * version 2. This program is licensed "as is" without any warranty of any
  8. * kind, whether express or implied.
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/err.h>
  16. #include <linux/clk.h>
  17. #include <linux/io.h>
  18. #include <asm/irq.h>
  19. #include <mach/hardware.h>
  20. #include "../w1.h"
  21. #include "../w1_int.h"
  22. #define MOD_NAME "OMAP_HDQ:"
  23. #define OMAP_HDQ_REVISION 0x00
  24. #define OMAP_HDQ_TX_DATA 0x04
  25. #define OMAP_HDQ_RX_DATA 0x08
  26. #define OMAP_HDQ_CTRL_STATUS 0x0c
  27. #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
  28. #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
  29. #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
  30. #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
  31. #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
  32. #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
  33. #define OMAP_HDQ_INT_STATUS 0x10
  34. #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
  35. #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
  36. #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
  37. #define OMAP_HDQ_SYSCONFIG 0x14
  38. #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
  39. #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
  40. #define OMAP_HDQ_SYSSTATUS 0x18
  41. #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
  42. #define OMAP_HDQ_FLAG_CLEAR 0
  43. #define OMAP_HDQ_FLAG_SET 1
  44. #define OMAP_HDQ_TIMEOUT (HZ/5)
  45. #define OMAP_HDQ_MAX_USER 4
  46. static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
  47. static int w1_id;
  48. struct hdq_data {
  49. struct device *dev;
  50. void __iomem *hdq_base;
  51. /* lock status update */
  52. struct mutex hdq_mutex;
  53. int hdq_usecount;
  54. struct clk *hdq_ick;
  55. struct clk *hdq_fck;
  56. u8 hdq_irqstatus;
  57. /* device lock */
  58. spinlock_t hdq_spinlock;
  59. /*
  60. * Used to control the call to omap_hdq_get and omap_hdq_put.
  61. * HDQ Protocol: Write the CMD|REG_address first, followed by
  62. * the data wrire or read.
  63. */
  64. int init_trans;
  65. };
  66. static int __init omap_hdq_probe(struct platform_device *pdev);
  67. static int omap_hdq_remove(struct platform_device *pdev);
  68. static struct platform_driver omap_hdq_driver = {
  69. .probe = omap_hdq_probe,
  70. .remove = omap_hdq_remove,
  71. .driver = {
  72. .name = "omap_hdq",
  73. },
  74. };
  75. static u8 omap_w1_read_byte(void *_hdq);
  76. static void omap_w1_write_byte(void *_hdq, u8 byte);
  77. static u8 omap_w1_reset_bus(void *_hdq);
  78. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  79. u8 search_type, w1_slave_found_callback slave_found);
  80. static struct w1_bus_master omap_w1_master = {
  81. .read_byte = omap_w1_read_byte,
  82. .write_byte = omap_w1_write_byte,
  83. .reset_bus = omap_w1_reset_bus,
  84. .search = omap_w1_search_bus,
  85. };
  86. /* HDQ register I/O routines */
  87. static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
  88. {
  89. return __raw_readb(hdq_data->hdq_base + offset);
  90. }
  91. static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
  92. {
  93. __raw_writeb(val, hdq_data->hdq_base + offset);
  94. }
  95. static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
  96. u8 val, u8 mask)
  97. {
  98. u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
  99. | (val & mask);
  100. __raw_writeb(new_val, hdq_data->hdq_base + offset);
  101. return new_val;
  102. }
  103. /*
  104. * Wait for one or more bits in flag change.
  105. * HDQ_FLAG_SET: wait until any bit in the flag is set.
  106. * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
  107. * return 0 on success and -ETIMEDOUT in the case of timeout.
  108. */
  109. static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
  110. u8 flag, u8 flag_set, u8 *status)
  111. {
  112. int ret = 0;
  113. unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
  114. if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
  115. /* wait for the flag clear */
  116. while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
  117. && time_before(jiffies, timeout)) {
  118. schedule_timeout_uninterruptible(1);
  119. }
  120. if (*status & flag)
  121. ret = -ETIMEDOUT;
  122. } else if (flag_set == OMAP_HDQ_FLAG_SET) {
  123. /* wait for the flag set */
  124. while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
  125. && time_before(jiffies, timeout)) {
  126. schedule_timeout_uninterruptible(1);
  127. }
  128. if (!(*status & flag))
  129. ret = -ETIMEDOUT;
  130. } else
  131. return -EINVAL;
  132. return ret;
  133. }
  134. /* write out a byte and fill *status with HDQ_INT_STATUS */
  135. static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
  136. {
  137. int ret;
  138. u8 tmp_status;
  139. unsigned long irqflags;
  140. *status = 0;
  141. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  142. /* clear interrupt flags via a dummy read */
  143. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  144. /* ISR loads it with new INT_STATUS */
  145. hdq_data->hdq_irqstatus = 0;
  146. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  147. hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
  148. /* set the GO bit */
  149. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
  150. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  151. /* wait for the TXCOMPLETE bit */
  152. ret = wait_event_timeout(hdq_wait_queue,
  153. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  154. if (ret == 0) {
  155. dev_dbg(hdq_data->dev, "TX wait elapsed\n");
  156. goto out;
  157. }
  158. *status = hdq_data->hdq_irqstatus;
  159. /* check irqstatus */
  160. if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
  161. dev_dbg(hdq_data->dev, "timeout waiting for"
  162. "TXCOMPLETE/RXCOMPLETE, %x", *status);
  163. ret = -ETIMEDOUT;
  164. goto out;
  165. }
  166. /* wait for the GO bit return to zero */
  167. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  168. OMAP_HDQ_CTRL_STATUS_GO,
  169. OMAP_HDQ_FLAG_CLEAR, &tmp_status);
  170. if (ret) {
  171. dev_dbg(hdq_data->dev, "timeout waiting GO bit"
  172. "return to zero, %x", tmp_status);
  173. }
  174. out:
  175. return ret;
  176. }
  177. /* HDQ Interrupt service routine */
  178. static irqreturn_t hdq_isr(int irq, void *_hdq)
  179. {
  180. struct hdq_data *hdq_data = _hdq;
  181. unsigned long irqflags;
  182. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  183. hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  184. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  185. dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
  186. if (hdq_data->hdq_irqstatus &
  187. (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
  188. | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  189. /* wake up sleeping process */
  190. wake_up(&hdq_wait_queue);
  191. }
  192. return IRQ_HANDLED;
  193. }
  194. /* HDQ Mode: always return success */
  195. static u8 omap_w1_reset_bus(void *_hdq)
  196. {
  197. return 0;
  198. }
  199. /* W1 search callback function */
  200. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  201. u8 search_type, w1_slave_found_callback slave_found)
  202. {
  203. u64 module_id, rn_le, cs, id;
  204. if (w1_id)
  205. module_id = w1_id;
  206. else
  207. module_id = 0x1;
  208. rn_le = cpu_to_le64(module_id);
  209. /*
  210. * HDQ might not obey truly the 1-wire spec.
  211. * So calculate CRC based on module parameter.
  212. */
  213. cs = w1_calc_crc8((u8 *)&rn_le, 7);
  214. id = (cs << 56) | module_id;
  215. slave_found(master_dev, id);
  216. }
  217. static int _omap_hdq_reset(struct hdq_data *hdq_data)
  218. {
  219. int ret;
  220. u8 tmp_status;
  221. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
  222. /*
  223. * Select HDQ mode & enable clocks.
  224. * It is observed that INT flags can't be cleared via a read and GO/INIT
  225. * won't return to zero if interrupt is disabled. So we always enable
  226. * interrupt.
  227. */
  228. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  229. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  230. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  231. /* wait for reset to complete */
  232. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
  233. OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
  234. if (ret)
  235. dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
  236. tmp_status);
  237. else {
  238. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  239. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  240. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  241. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  242. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  243. }
  244. return ret;
  245. }
  246. /* Issue break pulse to the device */
  247. static int omap_hdq_break(struct hdq_data *hdq_data)
  248. {
  249. int ret = 0;
  250. u8 tmp_status;
  251. unsigned long irqflags;
  252. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  253. if (ret < 0) {
  254. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  255. ret = -EINTR;
  256. goto rtn;
  257. }
  258. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  259. /* clear interrupt flags via a dummy read */
  260. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  261. /* ISR loads it with new INT_STATUS */
  262. hdq_data->hdq_irqstatus = 0;
  263. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  264. /* set the INIT and GO bit */
  265. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  266. OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
  267. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  268. OMAP_HDQ_CTRL_STATUS_GO);
  269. /* wait for the TIMEOUT bit */
  270. ret = wait_event_timeout(hdq_wait_queue,
  271. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  272. if (ret == 0) {
  273. dev_dbg(hdq_data->dev, "break wait elapsed\n");
  274. ret = -EINTR;
  275. goto out;
  276. }
  277. tmp_status = hdq_data->hdq_irqstatus;
  278. /* check irqstatus */
  279. if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  280. dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
  281. tmp_status);
  282. ret = -ETIMEDOUT;
  283. goto out;
  284. }
  285. /*
  286. * wait for both INIT and GO bits rerurn to zero.
  287. * zero wait time expected for interrupt mode.
  288. */
  289. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  290. OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  291. OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
  292. &tmp_status);
  293. if (ret)
  294. dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
  295. "return to zero, %x", tmp_status);
  296. out:
  297. mutex_unlock(&hdq_data->hdq_mutex);
  298. rtn:
  299. return ret;
  300. }
  301. static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
  302. {
  303. int ret = 0;
  304. u8 status;
  305. unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
  306. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  307. if (ret < 0) {
  308. ret = -EINTR;
  309. goto rtn;
  310. }
  311. if (!hdq_data->hdq_usecount) {
  312. ret = -EINVAL;
  313. goto out;
  314. }
  315. if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  316. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  317. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
  318. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  319. /*
  320. * The RX comes immediately after TX. It
  321. * triggers another interrupt before we
  322. * sleep. So we have to wait for RXCOMPLETE bit.
  323. */
  324. while (!(hdq_data->hdq_irqstatus
  325. & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
  326. && time_before(jiffies, timeout)) {
  327. schedule_timeout_uninterruptible(1);
  328. }
  329. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
  330. OMAP_HDQ_CTRL_STATUS_DIR);
  331. status = hdq_data->hdq_irqstatus;
  332. /* check irqstatus */
  333. if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  334. dev_dbg(hdq_data->dev, "timeout waiting for"
  335. "RXCOMPLETE, %x", status);
  336. ret = -ETIMEDOUT;
  337. goto out;
  338. }
  339. }
  340. /* the data is ready. Read it in! */
  341. *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
  342. out:
  343. mutex_unlock(&hdq_data->hdq_mutex);
  344. rtn:
  345. return 0;
  346. }
  347. /* Enable clocks and set the controller to HDQ mode */
  348. static int omap_hdq_get(struct hdq_data *hdq_data)
  349. {
  350. int ret = 0;
  351. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  352. if (ret < 0) {
  353. ret = -EINTR;
  354. goto rtn;
  355. }
  356. if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
  357. dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
  358. ret = -EINVAL;
  359. goto out;
  360. } else {
  361. hdq_data->hdq_usecount++;
  362. try_module_get(THIS_MODULE);
  363. if (1 == hdq_data->hdq_usecount) {
  364. if (clk_enable(hdq_data->hdq_ick)) {
  365. dev_dbg(hdq_data->dev, "Can not enable ick\n");
  366. ret = -ENODEV;
  367. goto clk_err;
  368. }
  369. if (clk_enable(hdq_data->hdq_fck)) {
  370. dev_dbg(hdq_data->dev, "Can not enable fck\n");
  371. clk_disable(hdq_data->hdq_ick);
  372. ret = -ENODEV;
  373. goto clk_err;
  374. }
  375. /* make sure HDQ is out of reset */
  376. if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
  377. OMAP_HDQ_SYSSTATUS_RESETDONE)) {
  378. ret = _omap_hdq_reset(hdq_data);
  379. if (ret)
  380. /* back up the count */
  381. hdq_data->hdq_usecount--;
  382. } else {
  383. /* select HDQ mode & enable clocks */
  384. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  385. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  386. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  387. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  388. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  389. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  390. }
  391. }
  392. }
  393. clk_err:
  394. clk_put(hdq_data->hdq_ick);
  395. clk_put(hdq_data->hdq_fck);
  396. out:
  397. mutex_unlock(&hdq_data->hdq_mutex);
  398. rtn:
  399. return ret;
  400. }
  401. /* Disable clocks to the module */
  402. static int omap_hdq_put(struct hdq_data *hdq_data)
  403. {
  404. int ret = 0;
  405. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  406. if (ret < 0)
  407. return -EINTR;
  408. if (0 == hdq_data->hdq_usecount) {
  409. dev_dbg(hdq_data->dev, "attempt to decrement use count"
  410. "when it is zero");
  411. ret = -EINVAL;
  412. } else {
  413. hdq_data->hdq_usecount--;
  414. module_put(THIS_MODULE);
  415. if (0 == hdq_data->hdq_usecount) {
  416. clk_disable(hdq_data->hdq_ick);
  417. clk_disable(hdq_data->hdq_fck);
  418. }
  419. }
  420. mutex_unlock(&hdq_data->hdq_mutex);
  421. return ret;
  422. }
  423. /* Read a byte of data from the device */
  424. static u8 omap_w1_read_byte(void *_hdq)
  425. {
  426. struct hdq_data *hdq_data = _hdq;
  427. u8 val = 0;
  428. int ret;
  429. ret = hdq_read_byte(hdq_data, &val);
  430. if (ret) {
  431. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  432. if (ret < 0) {
  433. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  434. return -EINTR;
  435. }
  436. hdq_data->init_trans = 0;
  437. mutex_unlock(&hdq_data->hdq_mutex);
  438. omap_hdq_put(hdq_data);
  439. return -1;
  440. }
  441. /* Write followed by a read, release the module */
  442. if (hdq_data->init_trans) {
  443. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  444. if (ret < 0) {
  445. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  446. return -EINTR;
  447. }
  448. hdq_data->init_trans = 0;
  449. mutex_unlock(&hdq_data->hdq_mutex);
  450. omap_hdq_put(hdq_data);
  451. }
  452. return val;
  453. }
  454. /* Write a byte of data to the device */
  455. static void omap_w1_write_byte(void *_hdq, u8 byte)
  456. {
  457. struct hdq_data *hdq_data = _hdq;
  458. int ret;
  459. u8 status;
  460. /* First write to initialize the transfer */
  461. if (hdq_data->init_trans == 0)
  462. omap_hdq_get(hdq_data);
  463. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  464. if (ret < 0) {
  465. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  466. return;
  467. }
  468. hdq_data->init_trans++;
  469. mutex_unlock(&hdq_data->hdq_mutex);
  470. ret = hdq_write_byte(hdq_data, byte, &status);
  471. if (ret == 0) {
  472. dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
  473. return;
  474. }
  475. /* Second write, data transfered. Release the module */
  476. if (hdq_data->init_trans > 1) {
  477. omap_hdq_put(hdq_data);
  478. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  479. if (ret < 0) {
  480. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  481. return;
  482. }
  483. hdq_data->init_trans = 0;
  484. mutex_unlock(&hdq_data->hdq_mutex);
  485. }
  486. return;
  487. }
  488. static int __init omap_hdq_probe(struct platform_device *pdev)
  489. {
  490. struct hdq_data *hdq_data;
  491. struct resource *res;
  492. int ret, irq;
  493. u8 rev;
  494. hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
  495. if (!hdq_data) {
  496. dev_dbg(&pdev->dev, "unable to allocate memory\n");
  497. ret = -ENOMEM;
  498. goto err_kmalloc;
  499. }
  500. hdq_data->dev = &pdev->dev;
  501. platform_set_drvdata(pdev, hdq_data);
  502. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  503. if (!res) {
  504. dev_dbg(&pdev->dev, "unable to get resource\n");
  505. ret = -ENXIO;
  506. goto err_resource;
  507. }
  508. hdq_data->hdq_base = ioremap(res->start, SZ_4K);
  509. if (!hdq_data->hdq_base) {
  510. dev_dbg(&pdev->dev, "ioremap failed\n");
  511. ret = -EINVAL;
  512. goto err_ioremap;
  513. }
  514. /* get interface & functional clock objects */
  515. hdq_data->hdq_ick = clk_get(&pdev->dev, "hdq_ick");
  516. hdq_data->hdq_fck = clk_get(&pdev->dev, "hdq_fck");
  517. if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
  518. dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
  519. if (IS_ERR(hdq_data->hdq_ick)) {
  520. ret = PTR_ERR(hdq_data->hdq_ick);
  521. goto err_clk;
  522. }
  523. if (IS_ERR(hdq_data->hdq_fck)) {
  524. ret = PTR_ERR(hdq_data->hdq_fck);
  525. clk_put(hdq_data->hdq_ick);
  526. goto err_clk;
  527. }
  528. }
  529. hdq_data->hdq_usecount = 0;
  530. mutex_init(&hdq_data->hdq_mutex);
  531. if (clk_enable(hdq_data->hdq_ick)) {
  532. dev_dbg(&pdev->dev, "Can not enable ick\n");
  533. ret = -ENODEV;
  534. goto err_intfclk;
  535. }
  536. if (clk_enable(hdq_data->hdq_fck)) {
  537. dev_dbg(&pdev->dev, "Can not enable fck\n");
  538. ret = -ENODEV;
  539. goto err_fnclk;
  540. }
  541. rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
  542. dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
  543. (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
  544. spin_lock_init(&hdq_data->hdq_spinlock);
  545. irq = platform_get_irq(pdev, 0);
  546. if (irq < 0) {
  547. ret = -ENXIO;
  548. goto err_irq;
  549. }
  550. ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
  551. if (ret < 0) {
  552. dev_dbg(&pdev->dev, "could not request irq\n");
  553. goto err_irq;
  554. }
  555. omap_hdq_break(hdq_data);
  556. /* don't clock the HDQ until it is needed */
  557. clk_disable(hdq_data->hdq_ick);
  558. clk_disable(hdq_data->hdq_fck);
  559. omap_w1_master.data = hdq_data;
  560. ret = w1_add_master_device(&omap_w1_master);
  561. if (ret) {
  562. dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
  563. goto err_w1;
  564. }
  565. return 0;
  566. err_w1:
  567. err_irq:
  568. clk_disable(hdq_data->hdq_fck);
  569. err_fnclk:
  570. clk_disable(hdq_data->hdq_ick);
  571. err_intfclk:
  572. clk_put(hdq_data->hdq_ick);
  573. clk_put(hdq_data->hdq_fck);
  574. err_clk:
  575. iounmap(hdq_data->hdq_base);
  576. err_ioremap:
  577. err_resource:
  578. platform_set_drvdata(pdev, NULL);
  579. kfree(hdq_data);
  580. err_kmalloc:
  581. return ret;
  582. }
  583. static int omap_hdq_remove(struct platform_device *pdev)
  584. {
  585. struct hdq_data *hdq_data = platform_get_drvdata(pdev);
  586. mutex_lock(&hdq_data->hdq_mutex);
  587. if (hdq_data->hdq_usecount) {
  588. dev_dbg(&pdev->dev, "removed when use count is not zero\n");
  589. return -EBUSY;
  590. }
  591. mutex_unlock(&hdq_data->hdq_mutex);
  592. /* remove module dependency */
  593. clk_put(hdq_data->hdq_ick);
  594. clk_put(hdq_data->hdq_fck);
  595. free_irq(INT_24XX_HDQ_IRQ, hdq_data);
  596. platform_set_drvdata(pdev, NULL);
  597. iounmap(hdq_data->hdq_base);
  598. kfree(hdq_data);
  599. return 0;
  600. }
  601. static int __init
  602. omap_hdq_init(void)
  603. {
  604. return platform_driver_register(&omap_hdq_driver);
  605. }
  606. module_init(omap_hdq_init);
  607. static void __exit
  608. omap_hdq_exit(void)
  609. {
  610. platform_driver_unregister(&omap_hdq_driver);
  611. }
  612. module_exit(omap_hdq_exit);
  613. module_param(w1_id, int, S_IRUSR);
  614. MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
  615. MODULE_AUTHOR("Texas Instruments");
  616. MODULE_DESCRIPTION("HDQ driver Library");
  617. MODULE_LICENSE("GPL");