exynos_tmu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /*
  2. * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
  3. *
  4. * Copyright (C) 2011 Samsung Electronics
  5. * Donggeun Kim <dg77.kim@samsung.com>
  6. * Amit Daniel Kachhap <amit.kachhap@linaro.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/clk.h>
  24. #include <linux/io.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/of.h>
  28. #include <linux/of_address.h>
  29. #include <linux/of_irq.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/regulator/consumer.h>
  32. #include "exynos_thermal_common.h"
  33. #include "exynos_tmu.h"
  34. #include "exynos_tmu_data.h"
  35. /**
  36. * struct exynos_tmu_data : A structure to hold the private data of the TMU
  37. driver
  38. * @id: identifier of the one instance of the TMU controller.
  39. * @pdata: pointer to the tmu platform/configuration data
  40. * @base: base address of the single instance of the TMU controller.
  41. * @base_common: base address of the common registers of the TMU controller.
  42. * @irq: irq number of the TMU controller.
  43. * @soc: id of the SOC type.
  44. * @irq_work: pointer to the irq work structure.
  45. * @lock: lock to implement synchronization.
  46. * @clk: pointer to the clock structure.
  47. * @temp_error1: fused value of the first point trim.
  48. * @temp_error2: fused value of the second point trim.
  49. * @regulator: pointer to the TMU regulator structure.
  50. * @reg_conf: pointer to structure to register with core thermal.
  51. */
  52. struct exynos_tmu_data {
  53. int id;
  54. struct exynos_tmu_platform_data *pdata;
  55. void __iomem *base;
  56. void __iomem *base_common;
  57. int irq;
  58. enum soc_type soc;
  59. struct work_struct irq_work;
  60. struct mutex lock;
  61. struct clk *clk;
  62. u8 temp_error1, temp_error2;
  63. struct regulator *regulator;
  64. struct thermal_sensor_conf *reg_conf;
  65. };
  66. /*
  67. * TMU treats temperature as a mapped temperature code.
  68. * The temperature is converted differently depending on the calibration type.
  69. */
  70. static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
  71. {
  72. struct exynos_tmu_platform_data *pdata = data->pdata;
  73. int temp_code;
  74. if (pdata->cal_mode == HW_MODE)
  75. return temp;
  76. if (data->soc == SOC_ARCH_EXYNOS4210)
  77. /* temp should range between 25 and 125 */
  78. if (temp < 25 || temp > 125) {
  79. temp_code = -EINVAL;
  80. goto out;
  81. }
  82. switch (pdata->cal_type) {
  83. case TYPE_TWO_POINT_TRIMMING:
  84. temp_code = (temp - pdata->first_point_trim) *
  85. (data->temp_error2 - data->temp_error1) /
  86. (pdata->second_point_trim - pdata->first_point_trim) +
  87. data->temp_error1;
  88. break;
  89. case TYPE_ONE_POINT_TRIMMING:
  90. temp_code = temp + data->temp_error1 - pdata->first_point_trim;
  91. break;
  92. default:
  93. temp_code = temp + pdata->default_temp_offset;
  94. break;
  95. }
  96. out:
  97. return temp_code;
  98. }
  99. /*
  100. * Calculate a temperature value from a temperature code.
  101. * The unit of the temperature is degree Celsius.
  102. */
  103. static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
  104. {
  105. struct exynos_tmu_platform_data *pdata = data->pdata;
  106. int temp;
  107. if (pdata->cal_mode == HW_MODE)
  108. return temp_code;
  109. if (data->soc == SOC_ARCH_EXYNOS4210)
  110. /* temp_code should range between 75 and 175 */
  111. if (temp_code < 75 || temp_code > 175) {
  112. temp = -ENODATA;
  113. goto out;
  114. }
  115. switch (pdata->cal_type) {
  116. case TYPE_TWO_POINT_TRIMMING:
  117. temp = (temp_code - data->temp_error1) *
  118. (pdata->second_point_trim - pdata->first_point_trim) /
  119. (data->temp_error2 - data->temp_error1) +
  120. pdata->first_point_trim;
  121. break;
  122. case TYPE_ONE_POINT_TRIMMING:
  123. temp = temp_code - data->temp_error1 + pdata->first_point_trim;
  124. break;
  125. default:
  126. temp = temp_code - pdata->default_temp_offset;
  127. break;
  128. }
  129. out:
  130. return temp;
  131. }
  132. static int exynos_tmu_initialize(struct platform_device *pdev)
  133. {
  134. struct exynos_tmu_data *data = platform_get_drvdata(pdev);
  135. struct exynos_tmu_platform_data *pdata = data->pdata;
  136. const struct exynos_tmu_registers *reg = pdata->registers;
  137. unsigned int status, trim_info = 0, con;
  138. unsigned int rising_threshold = 0, falling_threshold = 0;
  139. int ret = 0, threshold_code, i, trigger_levs = 0;
  140. mutex_lock(&data->lock);
  141. clk_enable(data->clk);
  142. if (TMU_SUPPORTS(pdata, READY_STATUS)) {
  143. status = readb(data->base + reg->tmu_status);
  144. if (!status) {
  145. ret = -EBUSY;
  146. goto out;
  147. }
  148. }
  149. if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
  150. __raw_writel(1, data->base + reg->triminfo_ctrl);
  151. if (pdata->cal_mode == HW_MODE)
  152. goto skip_calib_data;
  153. /* Save trimming info in order to perform calibration */
  154. if (data->soc == SOC_ARCH_EXYNOS5440) {
  155. /*
  156. * For exynos5440 soc triminfo value is swapped between TMU0 and
  157. * TMU2, so the below logic is needed.
  158. */
  159. switch (data->id) {
  160. case 0:
  161. trim_info = readl(data->base +
  162. EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
  163. break;
  164. case 1:
  165. trim_info = readl(data->base + reg->triminfo_data);
  166. break;
  167. case 2:
  168. trim_info = readl(data->base -
  169. EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
  170. }
  171. } else {
  172. trim_info = readl(data->base + reg->triminfo_data);
  173. }
  174. data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
  175. data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
  176. EXYNOS_TMU_TEMP_MASK);
  177. if (!data->temp_error1 ||
  178. (pdata->min_efuse_value > data->temp_error1) ||
  179. (data->temp_error1 > pdata->max_efuse_value))
  180. data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
  181. if (!data->temp_error2)
  182. data->temp_error2 =
  183. (pdata->efuse_value >> reg->triminfo_85_shift) &
  184. EXYNOS_TMU_TEMP_MASK;
  185. skip_calib_data:
  186. if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
  187. dev_err(&pdev->dev, "Invalid max trigger level\n");
  188. goto out;
  189. }
  190. for (i = 0; i < pdata->max_trigger_level; i++) {
  191. if (!pdata->trigger_levels[i])
  192. continue;
  193. if ((pdata->trigger_type[i] == HW_TRIP) &&
  194. (!pdata->trigger_levels[pdata->max_trigger_level - 1])) {
  195. dev_err(&pdev->dev, "Invalid hw trigger level\n");
  196. ret = -EINVAL;
  197. goto out;
  198. }
  199. /* Count trigger levels except the HW trip*/
  200. if (!(pdata->trigger_type[i] == HW_TRIP))
  201. trigger_levs++;
  202. }
  203. if (data->soc == SOC_ARCH_EXYNOS4210) {
  204. /* Write temperature code for threshold */
  205. threshold_code = temp_to_code(data, pdata->threshold);
  206. if (threshold_code < 0) {
  207. ret = threshold_code;
  208. goto out;
  209. }
  210. writeb(threshold_code,
  211. data->base + reg->threshold_temp);
  212. for (i = 0; i < trigger_levs; i++)
  213. writeb(pdata->trigger_levels[i], data->base +
  214. reg->threshold_th0 + i * sizeof(reg->threshold_th0));
  215. writel(reg->inten_rise_mask, data->base + reg->tmu_intclear);
  216. } else {
  217. /* Write temperature code for rising and falling threshold */
  218. for (i = 0;
  219. i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
  220. threshold_code = temp_to_code(data,
  221. pdata->trigger_levels[i]);
  222. if (threshold_code < 0) {
  223. ret = threshold_code;
  224. goto out;
  225. }
  226. rising_threshold |= threshold_code << 8 * i;
  227. if (pdata->threshold_falling) {
  228. threshold_code = temp_to_code(data,
  229. pdata->trigger_levels[i] -
  230. pdata->threshold_falling);
  231. if (threshold_code > 0)
  232. falling_threshold |=
  233. threshold_code << 8 * i;
  234. }
  235. }
  236. writel(rising_threshold,
  237. data->base + reg->threshold_th0);
  238. writel(falling_threshold,
  239. data->base + reg->threshold_th1);
  240. writel((reg->inten_rise_mask << reg->inten_rise_shift) |
  241. (reg->inten_fall_mask << reg->inten_fall_shift),
  242. data->base + reg->tmu_intclear);
  243. /* if last threshold limit is also present */
  244. i = pdata->max_trigger_level - 1;
  245. if (pdata->trigger_levels[i] &&
  246. (pdata->trigger_type[i] == HW_TRIP)) {
  247. threshold_code = temp_to_code(data,
  248. pdata->trigger_levels[i]);
  249. if (threshold_code < 0) {
  250. ret = threshold_code;
  251. goto out;
  252. }
  253. if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
  254. /* 1-4 level to be assigned in th0 reg */
  255. rising_threshold |= threshold_code << 8 * i;
  256. writel(rising_threshold,
  257. data->base + reg->threshold_th0);
  258. } else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
  259. /* 5th level to be assigned in th2 reg */
  260. rising_threshold =
  261. threshold_code << reg->threshold_th3_l0_shift;
  262. writel(rising_threshold,
  263. data->base + reg->threshold_th2);
  264. }
  265. con = readl(data->base + reg->tmu_ctrl);
  266. con |= (1 << reg->therm_trip_en_shift);
  267. writel(con, data->base + reg->tmu_ctrl);
  268. }
  269. }
  270. /*Clear the PMIN in the common TMU register*/
  271. if (reg->tmu_pmin && !data->id)
  272. writel(0, data->base_common + reg->tmu_pmin);
  273. out:
  274. clk_disable(data->clk);
  275. mutex_unlock(&data->lock);
  276. return ret;
  277. }
  278. static void exynos_tmu_control(struct platform_device *pdev, bool on)
  279. {
  280. struct exynos_tmu_data *data = platform_get_drvdata(pdev);
  281. struct exynos_tmu_platform_data *pdata = data->pdata;
  282. const struct exynos_tmu_registers *reg = pdata->registers;
  283. unsigned int con, interrupt_en, cal_val;
  284. mutex_lock(&data->lock);
  285. clk_enable(data->clk);
  286. con = readl(data->base + reg->tmu_ctrl);
  287. if (pdata->reference_voltage) {
  288. con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
  289. con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
  290. }
  291. if (pdata->gain) {
  292. con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
  293. con |= (pdata->gain << reg->buf_slope_sel_shift);
  294. }
  295. if (pdata->noise_cancel_mode) {
  296. con &= ~(reg->therm_trip_mode_mask <<
  297. reg->therm_trip_mode_shift);
  298. con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
  299. }
  300. if (pdata->cal_mode == HW_MODE) {
  301. con &= ~(reg->calib_mode_mask << reg->calib_mode_shift);
  302. cal_val = 0;
  303. switch (pdata->cal_type) {
  304. case TYPE_TWO_POINT_TRIMMING:
  305. cal_val = 3;
  306. break;
  307. case TYPE_ONE_POINT_TRIMMING_85:
  308. cal_val = 2;
  309. break;
  310. case TYPE_ONE_POINT_TRIMMING_25:
  311. cal_val = 1;
  312. break;
  313. case TYPE_NONE:
  314. break;
  315. default:
  316. dev_err(&pdev->dev, "Invalid calibration type, using none\n");
  317. }
  318. con |= cal_val << reg->calib_mode_shift;
  319. }
  320. if (on) {
  321. con |= (1 << reg->core_en_shift);
  322. interrupt_en =
  323. pdata->trigger_enable[3] << reg->inten_rise3_shift |
  324. pdata->trigger_enable[2] << reg->inten_rise2_shift |
  325. pdata->trigger_enable[1] << reg->inten_rise1_shift |
  326. pdata->trigger_enable[0] << reg->inten_rise0_shift;
  327. if (TMU_SUPPORTS(pdata, FALLING_TRIP))
  328. interrupt_en |=
  329. interrupt_en << reg->inten_fall0_shift;
  330. } else {
  331. con &= ~(1 << reg->core_en_shift);
  332. interrupt_en = 0; /* Disable all interrupts */
  333. }
  334. writel(interrupt_en, data->base + reg->tmu_inten);
  335. writel(con, data->base + reg->tmu_ctrl);
  336. clk_disable(data->clk);
  337. mutex_unlock(&data->lock);
  338. }
  339. static int exynos_tmu_read(struct exynos_tmu_data *data)
  340. {
  341. struct exynos_tmu_platform_data *pdata = data->pdata;
  342. const struct exynos_tmu_registers *reg = pdata->registers;
  343. u8 temp_code;
  344. int temp;
  345. mutex_lock(&data->lock);
  346. clk_enable(data->clk);
  347. temp_code = readb(data->base + reg->tmu_cur_temp);
  348. temp = code_to_temp(data, temp_code);
  349. clk_disable(data->clk);
  350. mutex_unlock(&data->lock);
  351. return temp;
  352. }
  353. #ifdef CONFIG_THERMAL_EMULATION
  354. static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
  355. {
  356. struct exynos_tmu_data *data = drv_data;
  357. struct exynos_tmu_platform_data *pdata = data->pdata;
  358. const struct exynos_tmu_registers *reg = pdata->registers;
  359. unsigned int val;
  360. int ret = -EINVAL;
  361. if (!TMU_SUPPORTS(pdata, EMULATION))
  362. goto out;
  363. if (temp && temp < MCELSIUS)
  364. goto out;
  365. mutex_lock(&data->lock);
  366. clk_enable(data->clk);
  367. val = readl(data->base + reg->emul_con);
  368. if (temp) {
  369. temp /= MCELSIUS;
  370. if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
  371. val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
  372. val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
  373. }
  374. val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
  375. val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
  376. EXYNOS_EMUL_ENABLE;
  377. } else {
  378. val &= ~EXYNOS_EMUL_ENABLE;
  379. }
  380. writel(val, data->base + reg->emul_con);
  381. clk_disable(data->clk);
  382. mutex_unlock(&data->lock);
  383. return 0;
  384. out:
  385. return ret;
  386. }
  387. #else
  388. static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
  389. { return -EINVAL; }
  390. #endif/*CONFIG_THERMAL_EMULATION*/
  391. static void exynos_tmu_work(struct work_struct *work)
  392. {
  393. struct exynos_tmu_data *data = container_of(work,
  394. struct exynos_tmu_data, irq_work);
  395. struct exynos_tmu_platform_data *pdata = data->pdata;
  396. const struct exynos_tmu_registers *reg = pdata->registers;
  397. unsigned int val_irq, val_type;
  398. /* Find which sensor generated this interrupt */
  399. if (reg->tmu_irqstatus) {
  400. val_type = readl(data->base_common + reg->tmu_irqstatus);
  401. if (!((val_type >> data->id) & 0x1))
  402. goto out;
  403. }
  404. exynos_report_trigger(data->reg_conf);
  405. mutex_lock(&data->lock);
  406. clk_enable(data->clk);
  407. /* TODO: take action based on particular interrupt */
  408. val_irq = readl(data->base + reg->tmu_intstat);
  409. /* clear the interrupts */
  410. writel(val_irq, data->base + reg->tmu_intclear);
  411. clk_disable(data->clk);
  412. mutex_unlock(&data->lock);
  413. out:
  414. enable_irq(data->irq);
  415. }
  416. static irqreturn_t exynos_tmu_irq(int irq, void *id)
  417. {
  418. struct exynos_tmu_data *data = id;
  419. disable_irq_nosync(irq);
  420. schedule_work(&data->irq_work);
  421. return IRQ_HANDLED;
  422. }
  423. static const struct of_device_id exynos_tmu_match[] = {
  424. {
  425. .compatible = "samsung,exynos4210-tmu",
  426. .data = (void *)EXYNOS4210_TMU_DRV_DATA,
  427. },
  428. {
  429. .compatible = "samsung,exynos4412-tmu",
  430. .data = (void *)EXYNOS4412_TMU_DRV_DATA,
  431. },
  432. {
  433. .compatible = "samsung,exynos5250-tmu",
  434. .data = (void *)EXYNOS5250_TMU_DRV_DATA,
  435. },
  436. {
  437. .compatible = "samsung,exynos5440-tmu",
  438. .data = (void *)EXYNOS5440_TMU_DRV_DATA,
  439. },
  440. {},
  441. };
  442. MODULE_DEVICE_TABLE(of, exynos_tmu_match);
  443. static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
  444. struct platform_device *pdev, int id)
  445. {
  446. struct exynos_tmu_init_data *data_table;
  447. struct exynos_tmu_platform_data *tmu_data;
  448. const struct of_device_id *match;
  449. match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
  450. if (!match)
  451. return NULL;
  452. data_table = (struct exynos_tmu_init_data *) match->data;
  453. if (!data_table || id >= data_table->tmu_count)
  454. return NULL;
  455. tmu_data = data_table->tmu_data;
  456. return (struct exynos_tmu_platform_data *) (tmu_data + id);
  457. }
  458. static int exynos_map_dt_data(struct platform_device *pdev)
  459. {
  460. struct exynos_tmu_data *data = platform_get_drvdata(pdev);
  461. struct exynos_tmu_platform_data *pdata;
  462. struct resource res;
  463. int ret;
  464. if (!data || !pdev->dev.of_node)
  465. return -ENODEV;
  466. /*
  467. * Try enabling the regulator if found
  468. * TODO: Add regulator as an SOC feature, so that regulator enable
  469. * is a compulsory call.
  470. */
  471. data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
  472. if (!IS_ERR(data->regulator)) {
  473. ret = regulator_enable(data->regulator);
  474. if (ret) {
  475. dev_err(&pdev->dev, "failed to enable vtmu\n");
  476. return ret;
  477. }
  478. } else {
  479. dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
  480. }
  481. data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
  482. if (data->id < 0)
  483. data->id = 0;
  484. data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  485. if (data->irq <= 0) {
  486. dev_err(&pdev->dev, "failed to get IRQ\n");
  487. return -ENODEV;
  488. }
  489. if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
  490. dev_err(&pdev->dev, "failed to get Resource 0\n");
  491. return -ENODEV;
  492. }
  493. data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
  494. if (!data->base) {
  495. dev_err(&pdev->dev, "Failed to ioremap memory\n");
  496. return -EADDRNOTAVAIL;
  497. }
  498. pdata = exynos_get_driver_data(pdev, data->id);
  499. if (!pdata) {
  500. dev_err(&pdev->dev, "No platform init data supplied.\n");
  501. return -ENODEV;
  502. }
  503. data->pdata = pdata;
  504. /*
  505. * Check if the TMU shares some registers and then try to map the
  506. * memory of common registers.
  507. */
  508. if (!TMU_SUPPORTS(pdata, SHARED_MEMORY))
  509. return 0;
  510. if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
  511. dev_err(&pdev->dev, "failed to get Resource 1\n");
  512. return -ENODEV;
  513. }
  514. data->base_common = devm_ioremap(&pdev->dev, res.start,
  515. resource_size(&res));
  516. if (!data->base_common) {
  517. dev_err(&pdev->dev, "Failed to ioremap memory\n");
  518. return -ENOMEM;
  519. }
  520. return 0;
  521. }
  522. static int exynos_tmu_probe(struct platform_device *pdev)
  523. {
  524. struct exynos_tmu_data *data;
  525. struct exynos_tmu_platform_data *pdata;
  526. struct thermal_sensor_conf *sensor_conf;
  527. int ret, i;
  528. data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
  529. GFP_KERNEL);
  530. if (!data) {
  531. dev_err(&pdev->dev, "Failed to allocate driver structure\n");
  532. return -ENOMEM;
  533. }
  534. platform_set_drvdata(pdev, data);
  535. mutex_init(&data->lock);
  536. ret = exynos_map_dt_data(pdev);
  537. if (ret)
  538. return ret;
  539. pdata = data->pdata;
  540. INIT_WORK(&data->irq_work, exynos_tmu_work);
  541. data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
  542. if (IS_ERR(data->clk)) {
  543. dev_err(&pdev->dev, "Failed to get clock\n");
  544. return PTR_ERR(data->clk);
  545. }
  546. ret = clk_prepare(data->clk);
  547. if (ret)
  548. return ret;
  549. if (pdata->type == SOC_ARCH_EXYNOS4210 ||
  550. pdata->type == SOC_ARCH_EXYNOS4412 ||
  551. pdata->type == SOC_ARCH_EXYNOS5250 ||
  552. pdata->type == SOC_ARCH_EXYNOS5440)
  553. data->soc = pdata->type;
  554. else {
  555. ret = -EINVAL;
  556. dev_err(&pdev->dev, "Platform not supported\n");
  557. goto err_clk;
  558. }
  559. ret = exynos_tmu_initialize(pdev);
  560. if (ret) {
  561. dev_err(&pdev->dev, "Failed to initialize TMU\n");
  562. goto err_clk;
  563. }
  564. exynos_tmu_control(pdev, true);
  565. /* Allocate a structure to register with the exynos core thermal */
  566. sensor_conf = devm_kzalloc(&pdev->dev,
  567. sizeof(struct thermal_sensor_conf), GFP_KERNEL);
  568. if (!sensor_conf) {
  569. dev_err(&pdev->dev, "Failed to allocate registration struct\n");
  570. ret = -ENOMEM;
  571. goto err_clk;
  572. }
  573. sprintf(sensor_conf->name, "therm_zone%d", data->id);
  574. sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
  575. sensor_conf->write_emul_temp =
  576. (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
  577. sensor_conf->driver_data = data;
  578. sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
  579. pdata->trigger_enable[1] + pdata->trigger_enable[2]+
  580. pdata->trigger_enable[3];
  581. for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
  582. sensor_conf->trip_data.trip_val[i] =
  583. pdata->threshold + pdata->trigger_levels[i];
  584. sensor_conf->trip_data.trip_type[i] =
  585. pdata->trigger_type[i];
  586. }
  587. sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
  588. sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
  589. for (i = 0; i < pdata->freq_tab_count; i++) {
  590. sensor_conf->cooling_data.freq_data[i].freq_clip_max =
  591. pdata->freq_tab[i].freq_clip_max;
  592. sensor_conf->cooling_data.freq_data[i].temp_level =
  593. pdata->freq_tab[i].temp_level;
  594. }
  595. sensor_conf->dev = &pdev->dev;
  596. /* Register the sensor with thermal management interface */
  597. ret = exynos_register_thermal(sensor_conf);
  598. if (ret) {
  599. dev_err(&pdev->dev, "Failed to register thermal interface\n");
  600. goto err_clk;
  601. }
  602. data->reg_conf = sensor_conf;
  603. ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
  604. IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
  605. if (ret) {
  606. dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
  607. goto err_clk;
  608. }
  609. return 0;
  610. err_clk:
  611. clk_unprepare(data->clk);
  612. return ret;
  613. }
  614. static int exynos_tmu_remove(struct platform_device *pdev)
  615. {
  616. struct exynos_tmu_data *data = platform_get_drvdata(pdev);
  617. exynos_tmu_control(pdev, false);
  618. exynos_unregister_thermal(data->reg_conf);
  619. clk_unprepare(data->clk);
  620. if (!IS_ERR(data->regulator))
  621. regulator_disable(data->regulator);
  622. return 0;
  623. }
  624. #ifdef CONFIG_PM_SLEEP
  625. static int exynos_tmu_suspend(struct device *dev)
  626. {
  627. exynos_tmu_control(to_platform_device(dev), false);
  628. return 0;
  629. }
  630. static int exynos_tmu_resume(struct device *dev)
  631. {
  632. struct platform_device *pdev = to_platform_device(dev);
  633. exynos_tmu_initialize(pdev);
  634. exynos_tmu_control(pdev, true);
  635. return 0;
  636. }
  637. static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
  638. exynos_tmu_suspend, exynos_tmu_resume);
  639. #define EXYNOS_TMU_PM (&exynos_tmu_pm)
  640. #else
  641. #define EXYNOS_TMU_PM NULL
  642. #endif
  643. static struct platform_driver exynos_tmu_driver = {
  644. .driver = {
  645. .name = "exynos-tmu",
  646. .owner = THIS_MODULE,
  647. .pm = EXYNOS_TMU_PM,
  648. .of_match_table = exynos_tmu_match,
  649. },
  650. .probe = exynos_tmu_probe,
  651. .remove = exynos_tmu_remove,
  652. };
  653. module_platform_driver(exynos_tmu_driver);
  654. MODULE_DESCRIPTION("EXYNOS TMU Driver");
  655. MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
  656. MODULE_LICENSE("GPL");
  657. MODULE_ALIAS("platform:exynos-tmu");