omap_hdq.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. /*
  2. * drivers/w1/masters/omap_hdq.c
  3. *
  4. * Copyright (C) 2007,2012 Texas Instruments, Inc.
  5. *
  6. * This file is licensed under the terms of the GNU General Public License
  7. * version 2. This program is licensed "as is" without any warranty of any
  8. * kind, whether express or implied.
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/err.h>
  17. #include <linux/io.h>
  18. #include <linux/sched.h>
  19. #include <linux/pm_runtime.h>
  20. #include "../w1.h"
  21. #include "../w1_int.h"
  22. #define MOD_NAME "OMAP_HDQ:"
  23. #define OMAP_HDQ_REVISION 0x00
  24. #define OMAP_HDQ_TX_DATA 0x04
  25. #define OMAP_HDQ_RX_DATA 0x08
  26. #define OMAP_HDQ_CTRL_STATUS 0x0c
  27. #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
  28. #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
  29. #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
  30. #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
  31. #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
  32. #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
  33. #define OMAP_HDQ_INT_STATUS 0x10
  34. #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
  35. #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
  36. #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
  37. #define OMAP_HDQ_SYSCONFIG 0x14
  38. #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
  39. #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
  40. #define OMAP_HDQ_SYSSTATUS 0x18
  41. #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
  42. #define OMAP_HDQ_FLAG_CLEAR 0
  43. #define OMAP_HDQ_FLAG_SET 1
  44. #define OMAP_HDQ_TIMEOUT (HZ/5)
  45. #define OMAP_HDQ_MAX_USER 4
  46. static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
  47. static int w1_id;
  48. struct hdq_data {
  49. struct device *dev;
  50. void __iomem *hdq_base;
  51. /* lock status update */
  52. struct mutex hdq_mutex;
  53. int hdq_usecount;
  54. u8 hdq_irqstatus;
  55. /* device lock */
  56. spinlock_t hdq_spinlock;
  57. /*
  58. * Used to control the call to omap_hdq_get and omap_hdq_put.
  59. * HDQ Protocol: Write the CMD|REG_address first, followed by
  60. * the data wrire or read.
  61. */
  62. int init_trans;
  63. };
  64. static int omap_hdq_probe(struct platform_device *pdev);
  65. static int omap_hdq_remove(struct platform_device *pdev);
  66. static struct platform_driver omap_hdq_driver = {
  67. .probe = omap_hdq_probe,
  68. .remove = omap_hdq_remove,
  69. .driver = {
  70. .name = "omap_hdq",
  71. },
  72. };
  73. static u8 omap_w1_read_byte(void *_hdq);
  74. static void omap_w1_write_byte(void *_hdq, u8 byte);
  75. static u8 omap_w1_reset_bus(void *_hdq);
  76. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  77. u8 search_type, w1_slave_found_callback slave_found);
  78. static struct w1_bus_master omap_w1_master = {
  79. .read_byte = omap_w1_read_byte,
  80. .write_byte = omap_w1_write_byte,
  81. .reset_bus = omap_w1_reset_bus,
  82. .search = omap_w1_search_bus,
  83. };
  84. /* HDQ register I/O routines */
  85. static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
  86. {
  87. return __raw_readl(hdq_data->hdq_base + offset);
  88. }
  89. static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
  90. {
  91. __raw_writel(val, hdq_data->hdq_base + offset);
  92. }
  93. static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
  94. u8 val, u8 mask)
  95. {
  96. u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
  97. | (val & mask);
  98. __raw_writel(new_val, hdq_data->hdq_base + offset);
  99. return new_val;
  100. }
  101. /*
  102. * Wait for one or more bits in flag change.
  103. * HDQ_FLAG_SET: wait until any bit in the flag is set.
  104. * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
  105. * return 0 on success and -ETIMEDOUT in the case of timeout.
  106. */
  107. static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
  108. u8 flag, u8 flag_set, u8 *status)
  109. {
  110. int ret = 0;
  111. unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
  112. if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
  113. /* wait for the flag clear */
  114. while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
  115. && time_before(jiffies, timeout)) {
  116. schedule_timeout_uninterruptible(1);
  117. }
  118. if (*status & flag)
  119. ret = -ETIMEDOUT;
  120. } else if (flag_set == OMAP_HDQ_FLAG_SET) {
  121. /* wait for the flag set */
  122. while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
  123. && time_before(jiffies, timeout)) {
  124. schedule_timeout_uninterruptible(1);
  125. }
  126. if (!(*status & flag))
  127. ret = -ETIMEDOUT;
  128. } else
  129. return -EINVAL;
  130. return ret;
  131. }
  132. /* write out a byte and fill *status with HDQ_INT_STATUS */
  133. static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
  134. {
  135. int ret;
  136. u8 tmp_status;
  137. unsigned long irqflags;
  138. *status = 0;
  139. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  140. /* clear interrupt flags via a dummy read */
  141. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  142. /* ISR loads it with new INT_STATUS */
  143. hdq_data->hdq_irqstatus = 0;
  144. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  145. hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
  146. /* set the GO bit */
  147. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
  148. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  149. /* wait for the TXCOMPLETE bit */
  150. ret = wait_event_timeout(hdq_wait_queue,
  151. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  152. if (ret == 0) {
  153. dev_dbg(hdq_data->dev, "TX wait elapsed\n");
  154. ret = -ETIMEDOUT;
  155. goto out;
  156. }
  157. *status = hdq_data->hdq_irqstatus;
  158. /* check irqstatus */
  159. if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
  160. dev_dbg(hdq_data->dev, "timeout waiting for"
  161. " TXCOMPLETE/RXCOMPLETE, %x", *status);
  162. ret = -ETIMEDOUT;
  163. goto out;
  164. }
  165. /* wait for the GO bit return to zero */
  166. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  167. OMAP_HDQ_CTRL_STATUS_GO,
  168. OMAP_HDQ_FLAG_CLEAR, &tmp_status);
  169. if (ret) {
  170. dev_dbg(hdq_data->dev, "timeout waiting GO bit"
  171. " return to zero, %x", tmp_status);
  172. }
  173. out:
  174. return ret;
  175. }
  176. /* HDQ Interrupt service routine */
  177. static irqreturn_t hdq_isr(int irq, void *_hdq)
  178. {
  179. struct hdq_data *hdq_data = _hdq;
  180. unsigned long irqflags;
  181. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  182. hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  183. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  184. dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
  185. if (hdq_data->hdq_irqstatus &
  186. (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
  187. | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  188. /* wake up sleeping process */
  189. wake_up(&hdq_wait_queue);
  190. }
  191. return IRQ_HANDLED;
  192. }
  193. /* HDQ Mode: always return success */
  194. static u8 omap_w1_reset_bus(void *_hdq)
  195. {
  196. return 0;
  197. }
  198. /* W1 search callback function */
  199. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  200. u8 search_type, w1_slave_found_callback slave_found)
  201. {
  202. u64 module_id, rn_le, cs, id;
  203. if (w1_id)
  204. module_id = w1_id;
  205. else
  206. module_id = 0x1;
  207. rn_le = cpu_to_le64(module_id);
  208. /*
  209. * HDQ might not obey truly the 1-wire spec.
  210. * So calculate CRC based on module parameter.
  211. */
  212. cs = w1_calc_crc8((u8 *)&rn_le, 7);
  213. id = (cs << 56) | module_id;
  214. slave_found(master_dev, id);
  215. }
  216. static int _omap_hdq_reset(struct hdq_data *hdq_data)
  217. {
  218. int ret;
  219. u8 tmp_status;
  220. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
  221. /*
  222. * Select HDQ mode & enable clocks.
  223. * It is observed that INT flags can't be cleared via a read and GO/INIT
  224. * won't return to zero if interrupt is disabled. So we always enable
  225. * interrupt.
  226. */
  227. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  228. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  229. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  230. /* wait for reset to complete */
  231. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
  232. OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
  233. if (ret)
  234. dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
  235. tmp_status);
  236. else {
  237. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  238. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  239. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  240. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  241. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  242. }
  243. return ret;
  244. }
  245. /* Issue break pulse to the device */
  246. static int omap_hdq_break(struct hdq_data *hdq_data)
  247. {
  248. int ret = 0;
  249. u8 tmp_status;
  250. unsigned long irqflags;
  251. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  252. if (ret < 0) {
  253. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  254. ret = -EINTR;
  255. goto rtn;
  256. }
  257. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  258. /* clear interrupt flags via a dummy read */
  259. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  260. /* ISR loads it with new INT_STATUS */
  261. hdq_data->hdq_irqstatus = 0;
  262. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  263. /* set the INIT and GO bit */
  264. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  265. OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
  266. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  267. OMAP_HDQ_CTRL_STATUS_GO);
  268. /* wait for the TIMEOUT bit */
  269. ret = wait_event_timeout(hdq_wait_queue,
  270. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  271. if (ret == 0) {
  272. dev_dbg(hdq_data->dev, "break wait elapsed\n");
  273. ret = -EINTR;
  274. goto out;
  275. }
  276. tmp_status = hdq_data->hdq_irqstatus;
  277. /* check irqstatus */
  278. if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  279. dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
  280. tmp_status);
  281. ret = -ETIMEDOUT;
  282. goto out;
  283. }
  284. /*
  285. * wait for both INIT and GO bits rerurn to zero.
  286. * zero wait time expected for interrupt mode.
  287. */
  288. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  289. OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  290. OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
  291. &tmp_status);
  292. if (ret)
  293. dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
  294. " return to zero, %x", tmp_status);
  295. out:
  296. mutex_unlock(&hdq_data->hdq_mutex);
  297. rtn:
  298. return ret;
  299. }
  300. static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
  301. {
  302. int ret = 0;
  303. u8 status;
  304. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  305. if (ret < 0) {
  306. ret = -EINTR;
  307. goto rtn;
  308. }
  309. if (!hdq_data->hdq_usecount) {
  310. ret = -EINVAL;
  311. goto out;
  312. }
  313. if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  314. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  315. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
  316. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  317. /*
  318. * The RX comes immediately after TX.
  319. */
  320. wait_event_timeout(hdq_wait_queue,
  321. (hdq_data->hdq_irqstatus
  322. & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
  323. OMAP_HDQ_TIMEOUT);
  324. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
  325. OMAP_HDQ_CTRL_STATUS_DIR);
  326. status = hdq_data->hdq_irqstatus;
  327. /* check irqstatus */
  328. if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  329. dev_dbg(hdq_data->dev, "timeout waiting for"
  330. " RXCOMPLETE, %x", status);
  331. ret = -ETIMEDOUT;
  332. goto out;
  333. }
  334. }
  335. /* the data is ready. Read it in! */
  336. *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
  337. out:
  338. mutex_unlock(&hdq_data->hdq_mutex);
  339. rtn:
  340. return ret;
  341. }
  342. /* Enable clocks and set the controller to HDQ mode */
  343. static int omap_hdq_get(struct hdq_data *hdq_data)
  344. {
  345. int ret = 0;
  346. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  347. if (ret < 0) {
  348. ret = -EINTR;
  349. goto rtn;
  350. }
  351. if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
  352. dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
  353. ret = -EINVAL;
  354. goto out;
  355. } else {
  356. hdq_data->hdq_usecount++;
  357. try_module_get(THIS_MODULE);
  358. if (1 == hdq_data->hdq_usecount) {
  359. pm_runtime_get_sync(hdq_data->dev);
  360. /* make sure HDQ is out of reset */
  361. if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
  362. OMAP_HDQ_SYSSTATUS_RESETDONE)) {
  363. ret = _omap_hdq_reset(hdq_data);
  364. if (ret)
  365. /* back up the count */
  366. hdq_data->hdq_usecount--;
  367. } else {
  368. /* select HDQ mode & enable clocks */
  369. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  370. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  371. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  372. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  373. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  374. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  375. }
  376. }
  377. }
  378. out:
  379. mutex_unlock(&hdq_data->hdq_mutex);
  380. rtn:
  381. return ret;
  382. }
  383. /* Disable clocks to the module */
  384. static int omap_hdq_put(struct hdq_data *hdq_data)
  385. {
  386. int ret = 0;
  387. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  388. if (ret < 0)
  389. return -EINTR;
  390. if (0 == hdq_data->hdq_usecount) {
  391. dev_dbg(hdq_data->dev, "attempt to decrement use count"
  392. " when it is zero");
  393. ret = -EINVAL;
  394. } else {
  395. hdq_data->hdq_usecount--;
  396. module_put(THIS_MODULE);
  397. if (0 == hdq_data->hdq_usecount)
  398. pm_runtime_put_sync(hdq_data->dev);
  399. }
  400. mutex_unlock(&hdq_data->hdq_mutex);
  401. return ret;
  402. }
  403. /* Read a byte of data from the device */
  404. static u8 omap_w1_read_byte(void *_hdq)
  405. {
  406. struct hdq_data *hdq_data = _hdq;
  407. u8 val = 0;
  408. int ret;
  409. ret = hdq_read_byte(hdq_data, &val);
  410. if (ret) {
  411. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  412. if (ret < 0) {
  413. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  414. return -EINTR;
  415. }
  416. hdq_data->init_trans = 0;
  417. mutex_unlock(&hdq_data->hdq_mutex);
  418. omap_hdq_put(hdq_data);
  419. return -1;
  420. }
  421. /* Write followed by a read, release the module */
  422. if (hdq_data->init_trans) {
  423. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  424. if (ret < 0) {
  425. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  426. return -EINTR;
  427. }
  428. hdq_data->init_trans = 0;
  429. mutex_unlock(&hdq_data->hdq_mutex);
  430. omap_hdq_put(hdq_data);
  431. }
  432. return val;
  433. }
  434. /* Write a byte of data to the device */
  435. static void omap_w1_write_byte(void *_hdq, u8 byte)
  436. {
  437. struct hdq_data *hdq_data = _hdq;
  438. int ret;
  439. u8 status;
  440. /* First write to initialize the transfer */
  441. if (hdq_data->init_trans == 0)
  442. omap_hdq_get(hdq_data);
  443. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  444. if (ret < 0) {
  445. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  446. return;
  447. }
  448. hdq_data->init_trans++;
  449. mutex_unlock(&hdq_data->hdq_mutex);
  450. ret = hdq_write_byte(hdq_data, byte, &status);
  451. if (ret < 0) {
  452. dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
  453. return;
  454. }
  455. /* Second write, data transferred. Release the module */
  456. if (hdq_data->init_trans > 1) {
  457. omap_hdq_put(hdq_data);
  458. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  459. if (ret < 0) {
  460. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  461. return;
  462. }
  463. hdq_data->init_trans = 0;
  464. mutex_unlock(&hdq_data->hdq_mutex);
  465. }
  466. }
  467. static int omap_hdq_probe(struct platform_device *pdev)
  468. {
  469. struct device *dev = &pdev->dev;
  470. struct hdq_data *hdq_data;
  471. struct resource *res;
  472. int ret, irq;
  473. u8 rev;
  474. hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
  475. if (!hdq_data) {
  476. dev_dbg(&pdev->dev, "unable to allocate memory\n");
  477. return -ENOMEM;
  478. }
  479. hdq_data->dev = dev;
  480. platform_set_drvdata(pdev, hdq_data);
  481. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  482. if (!res) {
  483. dev_dbg(&pdev->dev, "unable to get resource\n");
  484. return -ENXIO;
  485. }
  486. hdq_data->hdq_base = devm_ioremap_resource(dev, res);
  487. if (IS_ERR(hdq_data->hdq_base))
  488. return PTR_ERR(hdq_data->hdq_base);
  489. hdq_data->hdq_usecount = 0;
  490. mutex_init(&hdq_data->hdq_mutex);
  491. pm_runtime_enable(&pdev->dev);
  492. pm_runtime_get_sync(&pdev->dev);
  493. rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
  494. dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
  495. (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
  496. spin_lock_init(&hdq_data->hdq_spinlock);
  497. irq = platform_get_irq(pdev, 0);
  498. if (irq < 0) {
  499. ret = -ENXIO;
  500. goto err_irq;
  501. }
  502. ret = devm_request_irq(dev, irq, hdq_isr, IRQF_DISABLED,
  503. "omap_hdq", hdq_data);
  504. if (ret < 0) {
  505. dev_dbg(&pdev->dev, "could not request irq\n");
  506. goto err_irq;
  507. }
  508. omap_hdq_break(hdq_data);
  509. pm_runtime_put_sync(&pdev->dev);
  510. omap_w1_master.data = hdq_data;
  511. ret = w1_add_master_device(&omap_w1_master);
  512. if (ret) {
  513. dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
  514. goto err_w1;
  515. }
  516. return 0;
  517. err_irq:
  518. pm_runtime_put_sync(&pdev->dev);
  519. err_w1:
  520. pm_runtime_disable(&pdev->dev);
  521. return ret;
  522. }
  523. static int omap_hdq_remove(struct platform_device *pdev)
  524. {
  525. struct hdq_data *hdq_data = platform_get_drvdata(pdev);
  526. mutex_lock(&hdq_data->hdq_mutex);
  527. if (hdq_data->hdq_usecount) {
  528. dev_dbg(&pdev->dev, "removed when use count is not zero\n");
  529. mutex_unlock(&hdq_data->hdq_mutex);
  530. return -EBUSY;
  531. }
  532. mutex_unlock(&hdq_data->hdq_mutex);
  533. /* remove module dependency */
  534. pm_runtime_disable(&pdev->dev);
  535. return 0;
  536. }
  537. module_platform_driver(omap_hdq_driver);
  538. module_param(w1_id, int, S_IRUSR);
  539. MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
  540. MODULE_AUTHOR("Texas Instruments");
  541. MODULE_DESCRIPTION("HDQ driver Library");
  542. MODULE_LICENSE("GPL");