omap_hdq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /*
  2. * drivers/w1/masters/omap_hdq.c
  3. *
  4. * Copyright (C) 2007 Texas Instruments, Inc.
  5. *
  6. * This file is licensed under the terms of the GNU General Public License
  7. * version 2. This program is licensed "as is" without any warranty of any
  8. * kind, whether express or implied.
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/err.h>
  17. #include <linux/clk.h>
  18. #include <linux/io.h>
  19. #include <asm/irq.h>
  20. #include <mach/hardware.h>
  21. #include "../w1.h"
  22. #include "../w1_int.h"
  23. #define MOD_NAME "OMAP_HDQ:"
  24. #define OMAP_HDQ_REVISION 0x00
  25. #define OMAP_HDQ_TX_DATA 0x04
  26. #define OMAP_HDQ_RX_DATA 0x08
  27. #define OMAP_HDQ_CTRL_STATUS 0x0c
  28. #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
  29. #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
  30. #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
  31. #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
  32. #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
  33. #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
  34. #define OMAP_HDQ_INT_STATUS 0x10
  35. #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
  36. #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
  37. #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
  38. #define OMAP_HDQ_SYSCONFIG 0x14
  39. #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
  40. #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
  41. #define OMAP_HDQ_SYSSTATUS 0x18
  42. #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
  43. #define OMAP_HDQ_FLAG_CLEAR 0
  44. #define OMAP_HDQ_FLAG_SET 1
  45. #define OMAP_HDQ_TIMEOUT (HZ/5)
  46. #define OMAP_HDQ_MAX_USER 4
  47. static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
  48. static int w1_id;
  49. struct hdq_data {
  50. struct device *dev;
  51. void __iomem *hdq_base;
  52. /* lock status update */
  53. struct mutex hdq_mutex;
  54. int hdq_usecount;
  55. struct clk *hdq_ick;
  56. struct clk *hdq_fck;
  57. u8 hdq_irqstatus;
  58. /* device lock */
  59. spinlock_t hdq_spinlock;
  60. /*
  61. * Used to control the call to omap_hdq_get and omap_hdq_put.
  62. * HDQ Protocol: Write the CMD|REG_address first, followed by
  63. * the data wrire or read.
  64. */
  65. int init_trans;
  66. };
  67. static int __devinit omap_hdq_probe(struct platform_device *pdev);
  68. static int omap_hdq_remove(struct platform_device *pdev);
  69. static struct platform_driver omap_hdq_driver = {
  70. .probe = omap_hdq_probe,
  71. .remove = omap_hdq_remove,
  72. .driver = {
  73. .name = "omap_hdq",
  74. },
  75. };
  76. static u8 omap_w1_read_byte(void *_hdq);
  77. static void omap_w1_write_byte(void *_hdq, u8 byte);
  78. static u8 omap_w1_reset_bus(void *_hdq);
  79. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  80. u8 search_type, w1_slave_found_callback slave_found);
  81. static struct w1_bus_master omap_w1_master = {
  82. .read_byte = omap_w1_read_byte,
  83. .write_byte = omap_w1_write_byte,
  84. .reset_bus = omap_w1_reset_bus,
  85. .search = omap_w1_search_bus,
  86. };
  87. /* HDQ register I/O routines */
  88. static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
  89. {
  90. return __raw_readb(hdq_data->hdq_base + offset);
  91. }
  92. static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
  93. {
  94. __raw_writeb(val, hdq_data->hdq_base + offset);
  95. }
  96. static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
  97. u8 val, u8 mask)
  98. {
  99. u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
  100. | (val & mask);
  101. __raw_writeb(new_val, hdq_data->hdq_base + offset);
  102. return new_val;
  103. }
  104. /*
  105. * Wait for one or more bits in flag change.
  106. * HDQ_FLAG_SET: wait until any bit in the flag is set.
  107. * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
  108. * return 0 on success and -ETIMEDOUT in the case of timeout.
  109. */
  110. static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
  111. u8 flag, u8 flag_set, u8 *status)
  112. {
  113. int ret = 0;
  114. unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
  115. if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
  116. /* wait for the flag clear */
  117. while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
  118. && time_before(jiffies, timeout)) {
  119. schedule_timeout_uninterruptible(1);
  120. }
  121. if (*status & flag)
  122. ret = -ETIMEDOUT;
  123. } else if (flag_set == OMAP_HDQ_FLAG_SET) {
  124. /* wait for the flag set */
  125. while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
  126. && time_before(jiffies, timeout)) {
  127. schedule_timeout_uninterruptible(1);
  128. }
  129. if (!(*status & flag))
  130. ret = -ETIMEDOUT;
  131. } else
  132. return -EINVAL;
  133. return ret;
  134. }
  135. /* write out a byte and fill *status with HDQ_INT_STATUS */
  136. static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
  137. {
  138. int ret;
  139. u8 tmp_status;
  140. unsigned long irqflags;
  141. *status = 0;
  142. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  143. /* clear interrupt flags via a dummy read */
  144. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  145. /* ISR loads it with new INT_STATUS */
  146. hdq_data->hdq_irqstatus = 0;
  147. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  148. hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
  149. /* set the GO bit */
  150. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
  151. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  152. /* wait for the TXCOMPLETE bit */
  153. ret = wait_event_timeout(hdq_wait_queue,
  154. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  155. if (ret == 0) {
  156. dev_dbg(hdq_data->dev, "TX wait elapsed\n");
  157. goto out;
  158. }
  159. *status = hdq_data->hdq_irqstatus;
  160. /* check irqstatus */
  161. if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
  162. dev_dbg(hdq_data->dev, "timeout waiting for"
  163. "TXCOMPLETE/RXCOMPLETE, %x", *status);
  164. ret = -ETIMEDOUT;
  165. goto out;
  166. }
  167. /* wait for the GO bit return to zero */
  168. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  169. OMAP_HDQ_CTRL_STATUS_GO,
  170. OMAP_HDQ_FLAG_CLEAR, &tmp_status);
  171. if (ret) {
  172. dev_dbg(hdq_data->dev, "timeout waiting GO bit"
  173. "return to zero, %x", tmp_status);
  174. }
  175. out:
  176. return ret;
  177. }
  178. /* HDQ Interrupt service routine */
  179. static irqreturn_t hdq_isr(int irq, void *_hdq)
  180. {
  181. struct hdq_data *hdq_data = _hdq;
  182. unsigned long irqflags;
  183. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  184. hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  185. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  186. dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
  187. if (hdq_data->hdq_irqstatus &
  188. (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
  189. | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  190. /* wake up sleeping process */
  191. wake_up(&hdq_wait_queue);
  192. }
  193. return IRQ_HANDLED;
  194. }
  195. /* HDQ Mode: always return success */
  196. static u8 omap_w1_reset_bus(void *_hdq)
  197. {
  198. return 0;
  199. }
  200. /* W1 search callback function */
  201. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  202. u8 search_type, w1_slave_found_callback slave_found)
  203. {
  204. u64 module_id, rn_le, cs, id;
  205. if (w1_id)
  206. module_id = w1_id;
  207. else
  208. module_id = 0x1;
  209. rn_le = cpu_to_le64(module_id);
  210. /*
  211. * HDQ might not obey truly the 1-wire spec.
  212. * So calculate CRC based on module parameter.
  213. */
  214. cs = w1_calc_crc8((u8 *)&rn_le, 7);
  215. id = (cs << 56) | module_id;
  216. slave_found(master_dev, id);
  217. }
  218. static int _omap_hdq_reset(struct hdq_data *hdq_data)
  219. {
  220. int ret;
  221. u8 tmp_status;
  222. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
  223. /*
  224. * Select HDQ mode & enable clocks.
  225. * It is observed that INT flags can't be cleared via a read and GO/INIT
  226. * won't return to zero if interrupt is disabled. So we always enable
  227. * interrupt.
  228. */
  229. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  230. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  231. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  232. /* wait for reset to complete */
  233. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
  234. OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
  235. if (ret)
  236. dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
  237. tmp_status);
  238. else {
  239. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  240. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  241. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  242. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  243. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  244. }
  245. return ret;
  246. }
  247. /* Issue break pulse to the device */
  248. static int omap_hdq_break(struct hdq_data *hdq_data)
  249. {
  250. int ret = 0;
  251. u8 tmp_status;
  252. unsigned long irqflags;
  253. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  254. if (ret < 0) {
  255. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  256. ret = -EINTR;
  257. goto rtn;
  258. }
  259. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  260. /* clear interrupt flags via a dummy read */
  261. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  262. /* ISR loads it with new INT_STATUS */
  263. hdq_data->hdq_irqstatus = 0;
  264. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  265. /* set the INIT and GO bit */
  266. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  267. OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
  268. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  269. OMAP_HDQ_CTRL_STATUS_GO);
  270. /* wait for the TIMEOUT bit */
  271. ret = wait_event_timeout(hdq_wait_queue,
  272. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  273. if (ret == 0) {
  274. dev_dbg(hdq_data->dev, "break wait elapsed\n");
  275. ret = -EINTR;
  276. goto out;
  277. }
  278. tmp_status = hdq_data->hdq_irqstatus;
  279. /* check irqstatus */
  280. if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  281. dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
  282. tmp_status);
  283. ret = -ETIMEDOUT;
  284. goto out;
  285. }
  286. /*
  287. * wait for both INIT and GO bits rerurn to zero.
  288. * zero wait time expected for interrupt mode.
  289. */
  290. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  291. OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  292. OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
  293. &tmp_status);
  294. if (ret)
  295. dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
  296. "return to zero, %x", tmp_status);
  297. out:
  298. mutex_unlock(&hdq_data->hdq_mutex);
  299. rtn:
  300. return ret;
  301. }
  302. static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
  303. {
  304. int ret = 0;
  305. u8 status;
  306. unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
  307. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  308. if (ret < 0) {
  309. ret = -EINTR;
  310. goto rtn;
  311. }
  312. if (!hdq_data->hdq_usecount) {
  313. ret = -EINVAL;
  314. goto out;
  315. }
  316. if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  317. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  318. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
  319. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  320. /*
  321. * The RX comes immediately after TX. It
  322. * triggers another interrupt before we
  323. * sleep. So we have to wait for RXCOMPLETE bit.
  324. */
  325. while (!(hdq_data->hdq_irqstatus
  326. & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
  327. && time_before(jiffies, timeout)) {
  328. schedule_timeout_uninterruptible(1);
  329. }
  330. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
  331. OMAP_HDQ_CTRL_STATUS_DIR);
  332. status = hdq_data->hdq_irqstatus;
  333. /* check irqstatus */
  334. if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  335. dev_dbg(hdq_data->dev, "timeout waiting for"
  336. "RXCOMPLETE, %x", status);
  337. ret = -ETIMEDOUT;
  338. goto out;
  339. }
  340. }
  341. /* the data is ready. Read it in! */
  342. *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
  343. out:
  344. mutex_unlock(&hdq_data->hdq_mutex);
  345. rtn:
  346. return 0;
  347. }
  348. /* Enable clocks and set the controller to HDQ mode */
  349. static int omap_hdq_get(struct hdq_data *hdq_data)
  350. {
  351. int ret = 0;
  352. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  353. if (ret < 0) {
  354. ret = -EINTR;
  355. goto rtn;
  356. }
  357. if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
  358. dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
  359. ret = -EINVAL;
  360. goto out;
  361. } else {
  362. hdq_data->hdq_usecount++;
  363. try_module_get(THIS_MODULE);
  364. if (1 == hdq_data->hdq_usecount) {
  365. if (clk_enable(hdq_data->hdq_ick)) {
  366. dev_dbg(hdq_data->dev, "Can not enable ick\n");
  367. ret = -ENODEV;
  368. goto clk_err;
  369. }
  370. if (clk_enable(hdq_data->hdq_fck)) {
  371. dev_dbg(hdq_data->dev, "Can not enable fck\n");
  372. clk_disable(hdq_data->hdq_ick);
  373. ret = -ENODEV;
  374. goto clk_err;
  375. }
  376. /* make sure HDQ is out of reset */
  377. if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
  378. OMAP_HDQ_SYSSTATUS_RESETDONE)) {
  379. ret = _omap_hdq_reset(hdq_data);
  380. if (ret)
  381. /* back up the count */
  382. hdq_data->hdq_usecount--;
  383. } else {
  384. /* select HDQ mode & enable clocks */
  385. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  386. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  387. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  388. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  389. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  390. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  391. }
  392. }
  393. }
  394. clk_err:
  395. clk_put(hdq_data->hdq_ick);
  396. clk_put(hdq_data->hdq_fck);
  397. out:
  398. mutex_unlock(&hdq_data->hdq_mutex);
  399. rtn:
  400. return ret;
  401. }
  402. /* Disable clocks to the module */
  403. static int omap_hdq_put(struct hdq_data *hdq_data)
  404. {
  405. int ret = 0;
  406. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  407. if (ret < 0)
  408. return -EINTR;
  409. if (0 == hdq_data->hdq_usecount) {
  410. dev_dbg(hdq_data->dev, "attempt to decrement use count"
  411. "when it is zero");
  412. ret = -EINVAL;
  413. } else {
  414. hdq_data->hdq_usecount--;
  415. module_put(THIS_MODULE);
  416. if (0 == hdq_data->hdq_usecount) {
  417. clk_disable(hdq_data->hdq_ick);
  418. clk_disable(hdq_data->hdq_fck);
  419. }
  420. }
  421. mutex_unlock(&hdq_data->hdq_mutex);
  422. return ret;
  423. }
  424. /* Read a byte of data from the device */
  425. static u8 omap_w1_read_byte(void *_hdq)
  426. {
  427. struct hdq_data *hdq_data = _hdq;
  428. u8 val = 0;
  429. int ret;
  430. ret = hdq_read_byte(hdq_data, &val);
  431. if (ret) {
  432. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  433. if (ret < 0) {
  434. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  435. return -EINTR;
  436. }
  437. hdq_data->init_trans = 0;
  438. mutex_unlock(&hdq_data->hdq_mutex);
  439. omap_hdq_put(hdq_data);
  440. return -1;
  441. }
  442. /* Write followed by a read, release the module */
  443. if (hdq_data->init_trans) {
  444. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  445. if (ret < 0) {
  446. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  447. return -EINTR;
  448. }
  449. hdq_data->init_trans = 0;
  450. mutex_unlock(&hdq_data->hdq_mutex);
  451. omap_hdq_put(hdq_data);
  452. }
  453. return val;
  454. }
  455. /* Write a byte of data to the device */
  456. static void omap_w1_write_byte(void *_hdq, u8 byte)
  457. {
  458. struct hdq_data *hdq_data = _hdq;
  459. int ret;
  460. u8 status;
  461. /* First write to initialize the transfer */
  462. if (hdq_data->init_trans == 0)
  463. omap_hdq_get(hdq_data);
  464. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  465. if (ret < 0) {
  466. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  467. return;
  468. }
  469. hdq_data->init_trans++;
  470. mutex_unlock(&hdq_data->hdq_mutex);
  471. ret = hdq_write_byte(hdq_data, byte, &status);
  472. if (ret == 0) {
  473. dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
  474. return;
  475. }
  476. /* Second write, data transfered. Release the module */
  477. if (hdq_data->init_trans > 1) {
  478. omap_hdq_put(hdq_data);
  479. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  480. if (ret < 0) {
  481. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  482. return;
  483. }
  484. hdq_data->init_trans = 0;
  485. mutex_unlock(&hdq_data->hdq_mutex);
  486. }
  487. return;
  488. }
  489. static int __devinit omap_hdq_probe(struct platform_device *pdev)
  490. {
  491. struct hdq_data *hdq_data;
  492. struct resource *res;
  493. int ret, irq;
  494. u8 rev;
  495. hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
  496. if (!hdq_data) {
  497. dev_dbg(&pdev->dev, "unable to allocate memory\n");
  498. ret = -ENOMEM;
  499. goto err_kmalloc;
  500. }
  501. hdq_data->dev = &pdev->dev;
  502. platform_set_drvdata(pdev, hdq_data);
  503. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  504. if (!res) {
  505. dev_dbg(&pdev->dev, "unable to get resource\n");
  506. ret = -ENXIO;
  507. goto err_resource;
  508. }
  509. hdq_data->hdq_base = ioremap(res->start, SZ_4K);
  510. if (!hdq_data->hdq_base) {
  511. dev_dbg(&pdev->dev, "ioremap failed\n");
  512. ret = -EINVAL;
  513. goto err_ioremap;
  514. }
  515. /* get interface & functional clock objects */
  516. hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
  517. hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
  518. if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
  519. dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
  520. if (IS_ERR(hdq_data->hdq_ick)) {
  521. ret = PTR_ERR(hdq_data->hdq_ick);
  522. goto err_clk;
  523. }
  524. if (IS_ERR(hdq_data->hdq_fck)) {
  525. ret = PTR_ERR(hdq_data->hdq_fck);
  526. clk_put(hdq_data->hdq_ick);
  527. goto err_clk;
  528. }
  529. }
  530. hdq_data->hdq_usecount = 0;
  531. mutex_init(&hdq_data->hdq_mutex);
  532. if (clk_enable(hdq_data->hdq_ick)) {
  533. dev_dbg(&pdev->dev, "Can not enable ick\n");
  534. ret = -ENODEV;
  535. goto err_intfclk;
  536. }
  537. if (clk_enable(hdq_data->hdq_fck)) {
  538. dev_dbg(&pdev->dev, "Can not enable fck\n");
  539. ret = -ENODEV;
  540. goto err_fnclk;
  541. }
  542. rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
  543. dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
  544. (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
  545. spin_lock_init(&hdq_data->hdq_spinlock);
  546. irq = platform_get_irq(pdev, 0);
  547. if (irq < 0) {
  548. ret = -ENXIO;
  549. goto err_irq;
  550. }
  551. ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
  552. if (ret < 0) {
  553. dev_dbg(&pdev->dev, "could not request irq\n");
  554. goto err_irq;
  555. }
  556. omap_hdq_break(hdq_data);
  557. /* don't clock the HDQ until it is needed */
  558. clk_disable(hdq_data->hdq_ick);
  559. clk_disable(hdq_data->hdq_fck);
  560. omap_w1_master.data = hdq_data;
  561. ret = w1_add_master_device(&omap_w1_master);
  562. if (ret) {
  563. dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
  564. goto err_w1;
  565. }
  566. return 0;
  567. err_w1:
  568. err_irq:
  569. clk_disable(hdq_data->hdq_fck);
  570. err_fnclk:
  571. clk_disable(hdq_data->hdq_ick);
  572. err_intfclk:
  573. clk_put(hdq_data->hdq_ick);
  574. clk_put(hdq_data->hdq_fck);
  575. err_clk:
  576. iounmap(hdq_data->hdq_base);
  577. err_ioremap:
  578. err_resource:
  579. platform_set_drvdata(pdev, NULL);
  580. kfree(hdq_data);
  581. err_kmalloc:
  582. return ret;
  583. }
  584. static int omap_hdq_remove(struct platform_device *pdev)
  585. {
  586. struct hdq_data *hdq_data = platform_get_drvdata(pdev);
  587. mutex_lock(&hdq_data->hdq_mutex);
  588. if (hdq_data->hdq_usecount) {
  589. dev_dbg(&pdev->dev, "removed when use count is not zero\n");
  590. mutex_unlock(&hdq_data->hdq_mutex);
  591. return -EBUSY;
  592. }
  593. mutex_unlock(&hdq_data->hdq_mutex);
  594. /* remove module dependency */
  595. clk_put(hdq_data->hdq_ick);
  596. clk_put(hdq_data->hdq_fck);
  597. free_irq(INT_24XX_HDQ_IRQ, hdq_data);
  598. platform_set_drvdata(pdev, NULL);
  599. iounmap(hdq_data->hdq_base);
  600. kfree(hdq_data);
  601. return 0;
  602. }
  603. static int __init
  604. omap_hdq_init(void)
  605. {
  606. return platform_driver_register(&omap_hdq_driver);
  607. }
  608. module_init(omap_hdq_init);
  609. static void __exit
  610. omap_hdq_exit(void)
  611. {
  612. platform_driver_unregister(&omap_hdq_driver);
  613. }
  614. module_exit(omap_hdq_exit);
  615. module_param(w1_id, int, S_IRUSR);
  616. MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
  617. MODULE_AUTHOR("Texas Instruments");
  618. MODULE_DESCRIPTION("HDQ driver Library");
  619. MODULE_LICENSE("GPL");