coretemp.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. /*
  2. * coretemp.c - Linux kernel module for hardware monitoring
  3. *
  4. * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
  5. *
  6. * Inspired from many hwmon drivers
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; version 2 of the License.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20. * 02110-1301 USA.
  21. */
  22. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23. #include <linux/module.h>
  24. #include <linux/init.h>
  25. #include <linux/slab.h>
  26. #include <linux/jiffies.h>
  27. #include <linux/hwmon.h>
  28. #include <linux/sysfs.h>
  29. #include <linux/hwmon-sysfs.h>
  30. #include <linux/err.h>
  31. #include <linux/mutex.h>
  32. #include <linux/list.h>
  33. #include <linux/platform_device.h>
  34. #include <linux/cpu.h>
  35. #include <linux/pci.h>
  36. #include <linux/smp.h>
  37. #include <asm/msr.h>
  38. #include <asm/processor.h>
  39. #define DRVNAME "coretemp"
  40. #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
  41. #define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
  42. #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
  43. #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
  44. #define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */
  45. #define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
  46. #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
  47. #ifdef CONFIG_SMP
  48. #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
  49. #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
  50. #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
  51. #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
  52. #else
  53. #define TO_PHYS_ID(cpu) (cpu)
  54. #define TO_CORE_ID(cpu) (cpu)
  55. #define TO_ATTR_NO(cpu) (cpu)
  56. #define for_each_sibling(i, cpu) for (i = 0; false; )
  57. #endif
  58. /*
  59. * Per-Core Temperature Data
  60. * @last_updated: The time when the current temperature value was updated
  61. * earlier (in jiffies).
  62. * @cpu_core_id: The CPU Core from which temperature values should be read
  63. * This value is passed as "id" field to rdmsr/wrmsr functions.
  64. * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
  65. * from where the temperature values should be read.
  66. * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
  67. * from where the thresholds are read.
  68. * @attr_size: Total number of pre-core attrs displayed in the sysfs.
  69. * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
  70. * Otherwise, temp_data holds coretemp data.
  71. * @valid: If this is 1, the current temperature is valid.
  72. */
  73. struct temp_data {
  74. int temp;
  75. int ttarget;
  76. int tmin;
  77. int tjmax;
  78. unsigned long last_updated;
  79. unsigned int cpu;
  80. u32 cpu_core_id;
  81. u32 status_reg;
  82. u32 intrpt_reg;
  83. int attr_size;
  84. bool is_pkg_data;
  85. bool valid;
  86. struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
  87. char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
  88. struct mutex update_lock;
  89. };
  90. /* Platform Data per Physical CPU */
  91. struct platform_data {
  92. struct device *hwmon_dev;
  93. u16 phys_proc_id;
  94. struct temp_data *core_data[MAX_CORE_DATA];
  95. struct device_attribute name_attr;
  96. };
  97. struct pdev_entry {
  98. struct list_head list;
  99. struct platform_device *pdev;
  100. u16 phys_proc_id;
  101. };
  102. static LIST_HEAD(pdev_list);
  103. static DEFINE_MUTEX(pdev_list_mutex);
  104. static ssize_t show_name(struct device *dev,
  105. struct device_attribute *devattr, char *buf)
  106. {
  107. return sprintf(buf, "%s\n", DRVNAME);
  108. }
  109. static ssize_t show_label(struct device *dev,
  110. struct device_attribute *devattr, char *buf)
  111. {
  112. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  113. struct platform_data *pdata = dev_get_drvdata(dev);
  114. struct temp_data *tdata = pdata->core_data[attr->index];
  115. if (tdata->is_pkg_data)
  116. return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id);
  117. return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
  118. }
  119. static ssize_t show_crit_alarm(struct device *dev,
  120. struct device_attribute *devattr, char *buf)
  121. {
  122. u32 eax, edx;
  123. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  124. struct platform_data *pdata = dev_get_drvdata(dev);
  125. struct temp_data *tdata = pdata->core_data[attr->index];
  126. rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
  127. return sprintf(buf, "%d\n", (eax >> 5) & 1);
  128. }
  129. static ssize_t show_max_alarm(struct device *dev,
  130. struct device_attribute *devattr, char *buf)
  131. {
  132. u32 eax, edx;
  133. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  134. struct platform_data *pdata = dev_get_drvdata(dev);
  135. struct temp_data *tdata = pdata->core_data[attr->index];
  136. rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
  137. return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
  138. }
  139. static ssize_t show_tjmax(struct device *dev,
  140. struct device_attribute *devattr, char *buf)
  141. {
  142. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  143. struct platform_data *pdata = dev_get_drvdata(dev);
  144. return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tjmax);
  145. }
  146. static ssize_t show_ttarget(struct device *dev,
  147. struct device_attribute *devattr, char *buf)
  148. {
  149. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  150. struct platform_data *pdata = dev_get_drvdata(dev);
  151. return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
  152. }
  153. static ssize_t store_ttarget(struct device *dev,
  154. struct device_attribute *devattr,
  155. const char *buf, size_t count)
  156. {
  157. struct platform_data *pdata = dev_get_drvdata(dev);
  158. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  159. struct temp_data *tdata = pdata->core_data[attr->index];
  160. u32 eax, edx;
  161. unsigned long val;
  162. int diff;
  163. if (strict_strtoul(buf, 10, &val))
  164. return -EINVAL;
  165. /*
  166. * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
  167. * of milli degree celsius. Hence don't accept val > (127 * 1000)
  168. */
  169. if (val > tdata->tjmax || val > 127000)
  170. return -EINVAL;
  171. diff = (tdata->tjmax - val) / 1000;
  172. mutex_lock(&tdata->update_lock);
  173. rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
  174. eax = (eax & ~THERM_MASK_THRESHOLD1) |
  175. (diff << THERM_SHIFT_THRESHOLD1);
  176. wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
  177. tdata->ttarget = val;
  178. mutex_unlock(&tdata->update_lock);
  179. return count;
  180. }
  181. static ssize_t show_tmin(struct device *dev,
  182. struct device_attribute *devattr, char *buf)
  183. {
  184. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  185. struct platform_data *pdata = dev_get_drvdata(dev);
  186. return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
  187. }
  188. static ssize_t store_tmin(struct device *dev,
  189. struct device_attribute *devattr,
  190. const char *buf, size_t count)
  191. {
  192. struct platform_data *pdata = dev_get_drvdata(dev);
  193. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  194. struct temp_data *tdata = pdata->core_data[attr->index];
  195. u32 eax, edx;
  196. unsigned long val;
  197. int diff;
  198. if (strict_strtoul(buf, 10, &val))
  199. return -EINVAL;
  200. /*
  201. * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
  202. * of milli degree celsius. Hence don't accept val > (127 * 1000)
  203. */
  204. if (val > tdata->tjmax || val > 127000)
  205. return -EINVAL;
  206. diff = (tdata->tjmax - val) / 1000;
  207. mutex_lock(&tdata->update_lock);
  208. rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
  209. eax = (eax & ~THERM_MASK_THRESHOLD0) |
  210. (diff << THERM_SHIFT_THRESHOLD0);
  211. wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
  212. tdata->tmin = val;
  213. mutex_unlock(&tdata->update_lock);
  214. return count;
  215. }
  216. static ssize_t show_temp(struct device *dev,
  217. struct device_attribute *devattr, char *buf)
  218. {
  219. u32 eax, edx;
  220. struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
  221. struct platform_data *pdata = dev_get_drvdata(dev);
  222. struct temp_data *tdata = pdata->core_data[attr->index];
  223. mutex_lock(&tdata->update_lock);
  224. /* Check whether the time interval has elapsed */
  225. if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) {
  226. rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
  227. tdata->valid = 0;
  228. /* Check whether the data is valid */
  229. if (eax & 0x80000000) {
  230. tdata->temp = tdata->tjmax -
  231. ((eax >> 16) & 0x7f) * 1000;
  232. tdata->valid = 1;
  233. }
  234. tdata->last_updated = jiffies;
  235. }
  236. mutex_unlock(&tdata->update_lock);
  237. return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
  238. }
  239. static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
  240. {
  241. /* The 100C is default for both mobile and non mobile CPUs */
  242. int tjmax = 100000;
  243. int tjmax_ee = 85000;
  244. int usemsr_ee = 1;
  245. int err;
  246. u32 eax, edx;
  247. struct pci_dev *host_bridge;
  248. /* Early chips have no MSR for TjMax */
  249. if (c->x86_model == 0xf && c->x86_mask < 4)
  250. usemsr_ee = 0;
  251. /* Atom CPUs */
  252. if (c->x86_model == 0x1c) {
  253. usemsr_ee = 0;
  254. host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
  255. if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
  256. && (host_bridge->device == 0xa000 /* NM10 based nettop */
  257. || host_bridge->device == 0xa010)) /* NM10 based netbook */
  258. tjmax = 100000;
  259. else
  260. tjmax = 90000;
  261. pci_dev_put(host_bridge);
  262. }
  263. if (c->x86_model > 0xe && usemsr_ee) {
  264. u8 platform_id;
  265. /*
  266. * Now we can detect the mobile CPU using Intel provided table
  267. * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
  268. * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
  269. */
  270. err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
  271. if (err) {
  272. dev_warn(dev,
  273. "Unable to access MSR 0x17, assuming desktop"
  274. " CPU\n");
  275. usemsr_ee = 0;
  276. } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
  277. /*
  278. * Trust bit 28 up to Penryn, I could not find any
  279. * documentation on that; if you happen to know
  280. * someone at Intel please ask
  281. */
  282. usemsr_ee = 0;
  283. } else {
  284. /* Platform ID bits 52:50 (EDX starts at bit 32) */
  285. platform_id = (edx >> 18) & 0x7;
  286. /*
  287. * Mobile Penryn CPU seems to be platform ID 7 or 5
  288. * (guesswork)
  289. */
  290. if (c->x86_model == 0x17 &&
  291. (platform_id == 5 || platform_id == 7)) {
  292. /*
  293. * If MSR EE bit is set, set it to 90 degrees C,
  294. * otherwise 105 degrees C
  295. */
  296. tjmax_ee = 90000;
  297. tjmax = 105000;
  298. }
  299. }
  300. }
  301. if (usemsr_ee) {
  302. err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
  303. if (err) {
  304. dev_warn(dev,
  305. "Unable to access MSR 0xEE, for Tjmax, left"
  306. " at default\n");
  307. } else if (eax & 0x40000000) {
  308. tjmax = tjmax_ee;
  309. }
  310. } else if (tjmax == 100000) {
  311. /*
  312. * If we don't use msr EE it means we are desktop CPU
  313. * (with exeception of Atom)
  314. */
  315. dev_warn(dev, "Using relative temperature scale!\n");
  316. }
  317. return tjmax;
  318. }
  319. static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
  320. {
  321. /* The 100C is default for both mobile and non mobile CPUs */
  322. int err;
  323. u32 eax, edx;
  324. u32 val;
  325. /*
  326. * A new feature of current Intel(R) processors, the
  327. * IA32_TEMPERATURE_TARGET contains the TjMax value
  328. */
  329. err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
  330. if (err) {
  331. dev_warn(dev, "Unable to read TjMax from CPU.\n");
  332. } else {
  333. val = (eax >> 16) & 0xff;
  334. /*
  335. * If the TjMax is not plausible, an assumption
  336. * will be used
  337. */
  338. if (val) {
  339. dev_info(dev, "TjMax is %d C.\n", val);
  340. return val * 1000;
  341. }
  342. }
  343. /*
  344. * An assumption is made for early CPUs and unreadable MSR.
  345. * NOTE: the calculated value may not be correct.
  346. */
  347. return adjust_tjmax(c, id, dev);
  348. }
  349. static void __devinit get_ucode_rev_on_cpu(void *edx)
  350. {
  351. u32 eax;
  352. wrmsr(MSR_IA32_UCODE_REV, 0, 0);
  353. sync_core();
  354. rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
  355. }
  356. static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
  357. {
  358. int err;
  359. u32 eax, edx, val;
  360. err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
  361. if (!err) {
  362. val = (eax >> 16) & 0xff;
  363. if (val)
  364. return val * 1000;
  365. }
  366. dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
  367. return 100000; /* Default TjMax: 100 degree celsius */
  368. }
  369. static int create_name_attr(struct platform_data *pdata, struct device *dev)
  370. {
  371. sysfs_attr_init(&pdata->name_attr.attr);
  372. pdata->name_attr.attr.name = "name";
  373. pdata->name_attr.attr.mode = S_IRUGO;
  374. pdata->name_attr.show = show_name;
  375. return device_create_file(dev, &pdata->name_attr);
  376. }
  377. static int create_core_attrs(struct temp_data *tdata, struct device *dev,
  378. int attr_no)
  379. {
  380. int err, i;
  381. static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
  382. struct device_attribute *devattr, char *buf) = {
  383. show_label, show_crit_alarm, show_temp, show_tjmax,
  384. show_max_alarm, show_ttarget, show_tmin };
  385. static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
  386. struct device_attribute *devattr, const char *buf,
  387. size_t count) = { NULL, NULL, NULL, NULL, NULL,
  388. store_ttarget, store_tmin };
  389. static const char *names[TOTAL_ATTRS] = {
  390. "temp%d_label", "temp%d_crit_alarm",
  391. "temp%d_input", "temp%d_crit",
  392. "temp%d_max_alarm", "temp%d_max",
  393. "temp%d_max_hyst" };
  394. for (i = 0; i < tdata->attr_size; i++) {
  395. snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
  396. attr_no);
  397. sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
  398. tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
  399. tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
  400. if (rw_ptr[i]) {
  401. tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
  402. tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
  403. }
  404. tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
  405. tdata->sd_attrs[i].index = attr_no;
  406. err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
  407. if (err)
  408. goto exit_free;
  409. }
  410. return 0;
  411. exit_free:
  412. while (--i >= 0)
  413. device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
  414. return err;
  415. }
  416. static int __devinit chk_ucode_version(struct platform_device *pdev)
  417. {
  418. struct cpuinfo_x86 *c = &cpu_data(pdev->id);
  419. int err;
  420. u32 edx;
  421. /*
  422. * Check if we have problem with errata AE18 of Core processors:
  423. * Readings might stop update when processor visited too deep sleep,
  424. * fixed for stepping D0 (6EC).
  425. */
  426. if (c->x86_model == 0xe && c->x86_mask < 0xc) {
  427. /* check for microcode update */
  428. err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu,
  429. &edx, 1);
  430. if (err) {
  431. dev_err(&pdev->dev,
  432. "Cannot determine microcode revision of "
  433. "CPU#%u (%d)!\n", pdev->id, err);
  434. return -ENODEV;
  435. } else if (edx < 0x39) {
  436. dev_err(&pdev->dev,
  437. "Errata AE18 not fixed, update BIOS or "
  438. "microcode of the CPU!\n");
  439. return -ENODEV;
  440. }
  441. }
  442. return 0;
  443. }
  444. static struct platform_device *coretemp_get_pdev(unsigned int cpu)
  445. {
  446. u16 phys_proc_id = TO_PHYS_ID(cpu);
  447. struct pdev_entry *p;
  448. mutex_lock(&pdev_list_mutex);
  449. list_for_each_entry(p, &pdev_list, list)
  450. if (p->phys_proc_id == phys_proc_id) {
  451. mutex_unlock(&pdev_list_mutex);
  452. return p->pdev;
  453. }
  454. mutex_unlock(&pdev_list_mutex);
  455. return NULL;
  456. }
  457. static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
  458. {
  459. struct temp_data *tdata;
  460. tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL);
  461. if (!tdata)
  462. return NULL;
  463. tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
  464. MSR_IA32_THERM_STATUS;
  465. tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
  466. MSR_IA32_THERM_INTERRUPT;
  467. tdata->is_pkg_data = pkg_flag;
  468. tdata->cpu = cpu;
  469. tdata->cpu_core_id = TO_CORE_ID(cpu);
  470. tdata->attr_size = MAX_CORE_ATTRS;
  471. mutex_init(&tdata->update_lock);
  472. return tdata;
  473. }
  474. static int create_core_data(struct platform_data *pdata,
  475. struct platform_device *pdev,
  476. unsigned int cpu, int pkg_flag)
  477. {
  478. struct temp_data *tdata;
  479. struct cpuinfo_x86 *c = &cpu_data(cpu);
  480. u32 eax, edx;
  481. int err, attr_no;
  482. /*
  483. * Find attr number for sysfs:
  484. * We map the attr number to core id of the CPU
  485. * The attr number is always core id + 2
  486. * The Pkgtemp will always show up as temp1_*, if available
  487. */
  488. attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu);
  489. if (attr_no > MAX_CORE_DATA - 1)
  490. return -ERANGE;
  491. /*
  492. * Provide a single set of attributes for all HT siblings of a core
  493. * to avoid duplicate sensors (the processor ID and core ID of all
  494. * HT siblings of a core are the same).
  495. * Skip if a HT sibling of this core is already registered.
  496. * This is not an error.
  497. */
  498. if (pdata->core_data[attr_no] != NULL)
  499. return 0;
  500. tdata = init_temp_data(cpu, pkg_flag);
  501. if (!tdata)
  502. return -ENOMEM;
  503. /* Test if we can access the status register */
  504. err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
  505. if (err)
  506. goto exit_free;
  507. /* We can access status register. Get Critical Temperature */
  508. if (pkg_flag)
  509. tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev);
  510. else
  511. tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
  512. /*
  513. * Test if we can access the intrpt register. If so, increase the
  514. * 'size' enough to have ttarget/tmin/max_alarm interfaces.
  515. * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
  516. */
  517. err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
  518. if (!err) {
  519. tdata->attr_size += MAX_THRESH_ATTRS;
  520. tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
  521. }
  522. pdata->core_data[attr_no] = tdata;
  523. /* Create sysfs interfaces */
  524. err = create_core_attrs(tdata, &pdev->dev, attr_no);
  525. if (err)
  526. goto exit_free;
  527. return 0;
  528. exit_free:
  529. kfree(tdata);
  530. return err;
  531. }
  532. static void coretemp_add_core(unsigned int cpu, int pkg_flag)
  533. {
  534. struct platform_data *pdata;
  535. struct platform_device *pdev = coretemp_get_pdev(cpu);
  536. int err;
  537. if (!pdev)
  538. return;
  539. pdata = platform_get_drvdata(pdev);
  540. err = create_core_data(pdata, pdev, cpu, pkg_flag);
  541. if (err)
  542. dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
  543. }
  544. static void coretemp_remove_core(struct platform_data *pdata,
  545. struct device *dev, int indx)
  546. {
  547. int i;
  548. struct temp_data *tdata = pdata->core_data[indx];
  549. /* Remove the sysfs attributes */
  550. for (i = 0; i < tdata->attr_size; i++)
  551. device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
  552. kfree(pdata->core_data[indx]);
  553. pdata->core_data[indx] = NULL;
  554. }
  555. static int __devinit coretemp_probe(struct platform_device *pdev)
  556. {
  557. struct platform_data *pdata;
  558. int err;
  559. /* Check the microcode version of the CPU */
  560. err = chk_ucode_version(pdev);
  561. if (err)
  562. return err;
  563. /* Initialize the per-package data structures */
  564. pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
  565. if (!pdata)
  566. return -ENOMEM;
  567. err = create_name_attr(pdata, &pdev->dev);
  568. if (err)
  569. goto exit_free;
  570. pdata->phys_proc_id = TO_PHYS_ID(pdev->id);
  571. platform_set_drvdata(pdev, pdata);
  572. pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
  573. if (IS_ERR(pdata->hwmon_dev)) {
  574. err = PTR_ERR(pdata->hwmon_dev);
  575. dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
  576. goto exit_name;
  577. }
  578. return 0;
  579. exit_name:
  580. device_remove_file(&pdev->dev, &pdata->name_attr);
  581. platform_set_drvdata(pdev, NULL);
  582. exit_free:
  583. kfree(pdata);
  584. return err;
  585. }
  586. static int __devexit coretemp_remove(struct platform_device *pdev)
  587. {
  588. struct platform_data *pdata = platform_get_drvdata(pdev);
  589. int i;
  590. for (i = MAX_CORE_DATA - 1; i >= 0; --i)
  591. if (pdata->core_data[i])
  592. coretemp_remove_core(pdata, &pdev->dev, i);
  593. device_remove_file(&pdev->dev, &pdata->name_attr);
  594. hwmon_device_unregister(pdata->hwmon_dev);
  595. platform_set_drvdata(pdev, NULL);
  596. kfree(pdata);
  597. return 0;
  598. }
  599. static struct platform_driver coretemp_driver = {
  600. .driver = {
  601. .owner = THIS_MODULE,
  602. .name = DRVNAME,
  603. },
  604. .probe = coretemp_probe,
  605. .remove = __devexit_p(coretemp_remove),
  606. };
  607. static int __cpuinit coretemp_device_add(unsigned int cpu)
  608. {
  609. int err;
  610. struct platform_device *pdev;
  611. struct pdev_entry *pdev_entry;
  612. mutex_lock(&pdev_list_mutex);
  613. pdev = platform_device_alloc(DRVNAME, cpu);
  614. if (!pdev) {
  615. err = -ENOMEM;
  616. pr_err("Device allocation failed\n");
  617. goto exit;
  618. }
  619. pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
  620. if (!pdev_entry) {
  621. err = -ENOMEM;
  622. goto exit_device_put;
  623. }
  624. err = platform_device_add(pdev);
  625. if (err) {
  626. pr_err("Device addition failed (%d)\n", err);
  627. goto exit_device_free;
  628. }
  629. pdev_entry->pdev = pdev;
  630. pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
  631. list_add_tail(&pdev_entry->list, &pdev_list);
  632. mutex_unlock(&pdev_list_mutex);
  633. return 0;
  634. exit_device_free:
  635. kfree(pdev_entry);
  636. exit_device_put:
  637. platform_device_put(pdev);
  638. exit:
  639. mutex_unlock(&pdev_list_mutex);
  640. return err;
  641. }
  642. static void coretemp_device_remove(unsigned int cpu)
  643. {
  644. struct pdev_entry *p, *n;
  645. u16 phys_proc_id = TO_PHYS_ID(cpu);
  646. mutex_lock(&pdev_list_mutex);
  647. list_for_each_entry_safe(p, n, &pdev_list, list) {
  648. if (p->phys_proc_id != phys_proc_id)
  649. continue;
  650. platform_device_unregister(p->pdev);
  651. list_del(&p->list);
  652. kfree(p);
  653. }
  654. mutex_unlock(&pdev_list_mutex);
  655. }
  656. static bool is_any_core_online(struct platform_data *pdata)
  657. {
  658. int i;
  659. /* Find online cores, except pkgtemp data */
  660. for (i = MAX_CORE_DATA - 1; i >= 0; --i) {
  661. if (pdata->core_data[i] &&
  662. !pdata->core_data[i]->is_pkg_data) {
  663. return true;
  664. }
  665. }
  666. return false;
  667. }
  668. static void __cpuinit get_core_online(unsigned int cpu)
  669. {
  670. struct cpuinfo_x86 *c = &cpu_data(cpu);
  671. struct platform_device *pdev = coretemp_get_pdev(cpu);
  672. int err;
  673. /*
  674. * CPUID.06H.EAX[0] indicates whether the CPU has thermal
  675. * sensors. We check this bit only, all the early CPUs
  676. * without thermal sensors will be filtered out.
  677. */
  678. if (!cpu_has(c, X86_FEATURE_DTS))
  679. return;
  680. if (!pdev) {
  681. /*
  682. * Alright, we have DTS support.
  683. * We are bringing the _first_ core in this pkg
  684. * online. So, initialize per-pkg data structures and
  685. * then bring this core online.
  686. */
  687. err = coretemp_device_add(cpu);
  688. if (err)
  689. return;
  690. /*
  691. * Check whether pkgtemp support is available.
  692. * If so, add interfaces for pkgtemp.
  693. */
  694. if (cpu_has(c, X86_FEATURE_PTS))
  695. coretemp_add_core(cpu, 1);
  696. }
  697. /*
  698. * Physical CPU device already exists.
  699. * So, just add interfaces for this core.
  700. */
  701. coretemp_add_core(cpu, 0);
  702. }
  703. static void __cpuinit put_core_offline(unsigned int cpu)
  704. {
  705. int i, indx;
  706. struct platform_data *pdata;
  707. struct platform_device *pdev = coretemp_get_pdev(cpu);
  708. /* If the physical CPU device does not exist, just return */
  709. if (!pdev)
  710. return;
  711. pdata = platform_get_drvdata(pdev);
  712. indx = TO_ATTR_NO(cpu);
  713. if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
  714. coretemp_remove_core(pdata, &pdev->dev, indx);
  715. /*
  716. * If a HT sibling of a core is taken offline, but another HT sibling
  717. * of the same core is still online, register the alternate sibling.
  718. * This ensures that exactly one set of attributes is provided as long
  719. * as at least one HT sibling of a core is online.
  720. */
  721. for_each_sibling(i, cpu) {
  722. if (i != cpu) {
  723. get_core_online(i);
  724. /*
  725. * Display temperature sensor data for one HT sibling
  726. * per core only, so abort the loop after one such
  727. * sibling has been found.
  728. */
  729. break;
  730. }
  731. }
  732. /*
  733. * If all cores in this pkg are offline, remove the device.
  734. * coretemp_device_remove calls unregister_platform_device,
  735. * which in turn calls coretemp_remove. This removes the
  736. * pkgtemp entry and does other clean ups.
  737. */
  738. if (!is_any_core_online(pdata))
  739. coretemp_device_remove(cpu);
  740. }
  741. static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
  742. unsigned long action, void *hcpu)
  743. {
  744. unsigned int cpu = (unsigned long) hcpu;
  745. switch (action) {
  746. case CPU_ONLINE:
  747. case CPU_DOWN_FAILED:
  748. get_core_online(cpu);
  749. break;
  750. case CPU_DOWN_PREPARE:
  751. put_core_offline(cpu);
  752. break;
  753. }
  754. return NOTIFY_OK;
  755. }
  756. static struct notifier_block coretemp_cpu_notifier __refdata = {
  757. .notifier_call = coretemp_cpu_callback,
  758. };
  759. static int __init coretemp_init(void)
  760. {
  761. int i, err = -ENODEV;
  762. /* quick check if we run Intel */
  763. if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
  764. goto exit;
  765. err = platform_driver_register(&coretemp_driver);
  766. if (err)
  767. goto exit;
  768. for_each_online_cpu(i)
  769. get_core_online(i);
  770. #ifndef CONFIG_HOTPLUG_CPU
  771. if (list_empty(&pdev_list)) {
  772. err = -ENODEV;
  773. goto exit_driver_unreg;
  774. }
  775. #endif
  776. register_hotcpu_notifier(&coretemp_cpu_notifier);
  777. return 0;
  778. #ifndef CONFIG_HOTPLUG_CPU
  779. exit_driver_unreg:
  780. platform_driver_unregister(&coretemp_driver);
  781. #endif
  782. exit:
  783. return err;
  784. }
  785. static void __exit coretemp_exit(void)
  786. {
  787. struct pdev_entry *p, *n;
  788. unregister_hotcpu_notifier(&coretemp_cpu_notifier);
  789. mutex_lock(&pdev_list_mutex);
  790. list_for_each_entry_safe(p, n, &pdev_list, list) {
  791. platform_device_unregister(p->pdev);
  792. list_del(&p->list);
  793. kfree(p);
  794. }
  795. mutex_unlock(&pdev_list_mutex);
  796. platform_driver_unregister(&coretemp_driver);
  797. }
  798. MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
  799. MODULE_DESCRIPTION("Intel Core temperature monitor");
  800. MODULE_LICENSE("GPL");
  801. module_init(coretemp_init)
  802. module_exit(coretemp_exit)