intel_ips.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744
  1. /*
  2. * Copyright (c) 2009-2010 Intel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * The full GNU General Public License is included in this distribution in
  18. * the file called "COPYING".
  19. *
  20. * Authors:
  21. * Jesse Barnes <jbarnes@virtuousgeek.org>
  22. */
  23. /*
  24. * Some Intel Ibex Peak based platforms support so-called "intelligent
  25. * power sharing", which allows the CPU and GPU to cooperate to maximize
  26. * performance within a given TDP (thermal design point). This driver
  27. * performs the coordination between the CPU and GPU, monitors thermal and
  28. * power statistics in the platform, and initializes power monitoring
  29. * hardware. It also provides a few tunables to control behavior. Its
  30. * primary purpose is to safely allow CPU and GPU turbo modes to be enabled
  31. * by tracking power and thermal budget; secondarily it can boost turbo
  32. * performance by allocating more power or thermal budget to the CPU or GPU
  33. * based on available headroom and activity.
  34. *
  35. * The basic algorithm is driven by a 5s moving average of tempurature. If
  36. * thermal headroom is available, the CPU and/or GPU power clamps may be
  37. * adjusted upwards. If we hit the thermal ceiling or a thermal trigger,
  38. * we scale back the clamp. Aside from trigger events (when we're critically
  39. * close or over our TDP) we don't adjust the clamps more than once every
  40. * five seconds.
  41. *
  42. * The thermal device (device 31, function 6) has a set of registers that
  43. * are updated by the ME firmware. The ME should also take the clamp values
  44. * written to those registers and write them to the CPU, but we currently
  45. * bypass that functionality and write the CPU MSR directly.
  46. *
  47. * UNSUPPORTED:
  48. * - dual MCP configs
  49. *
  50. * TODO:
  51. * - handle CPU hotplug
  52. * - provide turbo enable/disable api
  53. *
  54. * Related documents:
  55. * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2
  56. * - CDI 401376 - Ibex Peak EDS
  57. * - ref 26037, 26641 - IPS BIOS spec
  58. * - ref 26489 - Nehalem BIOS writer's guide
  59. * - ref 26921 - Ibex Peak BIOS Specification
  60. */
  61. #include <linux/debugfs.h>
  62. #include <linux/delay.h>
  63. #include <linux/interrupt.h>
  64. #include <linux/kernel.h>
  65. #include <linux/kthread.h>
  66. #include <linux/module.h>
  67. #include <linux/pci.h>
  68. #include <linux/sched.h>
  69. #include <linux/seq_file.h>
  70. #include <linux/string.h>
  71. #include <linux/tick.h>
  72. #include <linux/timer.h>
  73. #include <drm/i915_drm.h>
  74. #include <asm/msr.h>
  75. #include <asm/processor.h>
  76. #include "intel_ips.h"
  77. #include <asm-generic/io-64-nonatomic-lo-hi.h>
  78. #define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32
  79. /*
  80. * Package level MSRs for monitor/control
  81. */
  82. #define PLATFORM_INFO 0xce
  83. #define PLATFORM_TDP (1<<29)
  84. #define PLATFORM_RATIO (1<<28)
  85. #define IA32_MISC_ENABLE 0x1a0
  86. #define IA32_MISC_TURBO_EN (1ULL<<38)
  87. #define TURBO_POWER_CURRENT_LIMIT 0x1ac
  88. #define TURBO_TDC_OVR_EN (1UL<<31)
  89. #define TURBO_TDC_MASK (0x000000007fff0000UL)
  90. #define TURBO_TDC_SHIFT (16)
  91. #define TURBO_TDP_OVR_EN (1UL<<15)
  92. #define TURBO_TDP_MASK (0x0000000000003fffUL)
  93. /*
  94. * Core/thread MSRs for monitoring
  95. */
  96. #define IA32_PERF_CTL 0x199
  97. #define IA32_PERF_TURBO_DIS (1ULL<<32)
  98. /*
  99. * Thermal PCI device regs
  100. */
  101. #define THM_CFG_TBAR 0x10
  102. #define THM_CFG_TBAR_HI 0x14
  103. #define THM_TSIU 0x00
  104. #define THM_TSE 0x01
  105. #define TSE_EN 0xb8
  106. #define THM_TSS 0x02
  107. #define THM_TSTR 0x03
  108. #define THM_TSTTP 0x04
  109. #define THM_TSCO 0x08
  110. #define THM_TSES 0x0c
  111. #define THM_TSGPEN 0x0d
  112. #define TSGPEN_HOT_LOHI (1<<1)
  113. #define TSGPEN_CRIT_LOHI (1<<2)
  114. #define THM_TSPC 0x0e
  115. #define THM_PPEC 0x10
  116. #define THM_CTA 0x12
  117. #define THM_PTA 0x14
  118. #define PTA_SLOPE_MASK (0xff00)
  119. #define PTA_SLOPE_SHIFT 8
  120. #define PTA_OFFSET_MASK (0x00ff)
  121. #define THM_MGTA 0x16
  122. #define MGTA_SLOPE_MASK (0xff00)
  123. #define MGTA_SLOPE_SHIFT 8
  124. #define MGTA_OFFSET_MASK (0x00ff)
  125. #define THM_TRC 0x1a
  126. #define TRC_CORE2_EN (1<<15)
  127. #define TRC_THM_EN (1<<12)
  128. #define TRC_C6_WAR (1<<8)
  129. #define TRC_CORE1_EN (1<<7)
  130. #define TRC_CORE_PWR (1<<6)
  131. #define TRC_PCH_EN (1<<5)
  132. #define TRC_MCH_EN (1<<4)
  133. #define TRC_DIMM4 (1<<3)
  134. #define TRC_DIMM3 (1<<2)
  135. #define TRC_DIMM2 (1<<1)
  136. #define TRC_DIMM1 (1<<0)
  137. #define THM_TES 0x20
  138. #define THM_TEN 0x21
  139. #define TEN_UPDATE_EN 1
  140. #define THM_PSC 0x24
  141. #define PSC_NTG (1<<0) /* No GFX turbo support */
  142. #define PSC_NTPC (1<<1) /* No CPU turbo support */
  143. #define PSC_PP_DEF (0<<2) /* Perf policy up to driver */
  144. #define PSP_PP_PC (1<<2) /* BIOS prefers CPU perf */
  145. #define PSP_PP_BAL (2<<2) /* BIOS wants balanced perf */
  146. #define PSP_PP_GFX (3<<2) /* BIOS prefers GFX perf */
  147. #define PSP_PBRT (1<<4) /* BIOS run time support */
  148. #define THM_CTV1 0x30
  149. #define CTV_TEMP_ERROR (1<<15)
  150. #define CTV_TEMP_MASK 0x3f
  151. #define CTV_
  152. #define THM_CTV2 0x32
  153. #define THM_CEC 0x34 /* undocumented power accumulator in joules */
  154. #define THM_AE 0x3f
  155. #define THM_HTS 0x50 /* 32 bits */
  156. #define HTS_PCPL_MASK (0x7fe00000)
  157. #define HTS_PCPL_SHIFT 21
  158. #define HTS_GPL_MASK (0x001ff000)
  159. #define HTS_GPL_SHIFT 12
  160. #define HTS_PP_MASK (0x00000c00)
  161. #define HTS_PP_SHIFT 10
  162. #define HTS_PP_DEF 0
  163. #define HTS_PP_PROC 1
  164. #define HTS_PP_BAL 2
  165. #define HTS_PP_GFX 3
  166. #define HTS_PCTD_DIS (1<<9)
  167. #define HTS_GTD_DIS (1<<8)
  168. #define HTS_PTL_MASK (0x000000fe)
  169. #define HTS_PTL_SHIFT 1
  170. #define HTS_NVV (1<<0)
  171. #define THM_HTSHI 0x54 /* 16 bits */
  172. #define HTS2_PPL_MASK (0x03ff)
  173. #define HTS2_PRST_MASK (0x3c00)
  174. #define HTS2_PRST_SHIFT 10
  175. #define HTS2_PRST_UNLOADED 0
  176. #define HTS2_PRST_RUNNING 1
  177. #define HTS2_PRST_TDISOP 2 /* turbo disabled due to power */
  178. #define HTS2_PRST_TDISHT 3 /* turbo disabled due to high temp */
  179. #define HTS2_PRST_TDISUSR 4 /* user disabled turbo */
  180. #define HTS2_PRST_TDISPLAT 5 /* platform disabled turbo */
  181. #define HTS2_PRST_TDISPM 6 /* power management disabled turbo */
  182. #define HTS2_PRST_TDISERR 7 /* some kind of error disabled turbo */
  183. #define THM_PTL 0x56
  184. #define THM_MGTV 0x58
  185. #define TV_MASK 0x000000000000ff00
  186. #define TV_SHIFT 8
  187. #define THM_PTV 0x60
  188. #define PTV_MASK 0x00ff
  189. #define THM_MMGPC 0x64
  190. #define THM_MPPC 0x66
  191. #define THM_MPCPC 0x68
  192. #define THM_TSPIEN 0x82
  193. #define TSPIEN_AUX_LOHI (1<<0)
  194. #define TSPIEN_HOT_LOHI (1<<1)
  195. #define TSPIEN_CRIT_LOHI (1<<2)
  196. #define TSPIEN_AUX2_LOHI (1<<3)
  197. #define THM_TSLOCK 0x83
  198. #define THM_ATR 0x84
  199. #define THM_TOF 0x87
  200. #define THM_STS 0x98
  201. #define STS_PCPL_MASK (0x7fe00000)
  202. #define STS_PCPL_SHIFT 21
  203. #define STS_GPL_MASK (0x001ff000)
  204. #define STS_GPL_SHIFT 12
  205. #define STS_PP_MASK (0x00000c00)
  206. #define STS_PP_SHIFT 10
  207. #define STS_PP_DEF 0
  208. #define STS_PP_PROC 1
  209. #define STS_PP_BAL 2
  210. #define STS_PP_GFX 3
  211. #define STS_PCTD_DIS (1<<9)
  212. #define STS_GTD_DIS (1<<8)
  213. #define STS_PTL_MASK (0x000000fe)
  214. #define STS_PTL_SHIFT 1
  215. #define STS_NVV (1<<0)
  216. #define THM_SEC 0x9c
  217. #define SEC_ACK (1<<0)
  218. #define THM_TC3 0xa4
  219. #define THM_TC1 0xa8
  220. #define STS_PPL_MASK (0x0003ff00)
  221. #define STS_PPL_SHIFT 16
  222. #define THM_TC2 0xac
  223. #define THM_DTV 0xb0
  224. #define THM_ITV 0xd8
  225. #define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */
  226. #define ITV_ME_SEQNO_SHIFT (16)
  227. #define ITV_MCH_TEMP_MASK 0x0000ff00
  228. #define ITV_MCH_TEMP_SHIFT (8)
  229. #define ITV_PCH_TEMP_MASK 0x000000ff
  230. #define thm_readb(off) readb(ips->regmap + (off))
  231. #define thm_readw(off) readw(ips->regmap + (off))
  232. #define thm_readl(off) readl(ips->regmap + (off))
  233. #define thm_readq(off) readq(ips->regmap + (off))
  234. #define thm_writeb(off, val) writeb((val), ips->regmap + (off))
  235. #define thm_writew(off, val) writew((val), ips->regmap + (off))
  236. #define thm_writel(off, val) writel((val), ips->regmap + (off))
  237. static const int IPS_ADJUST_PERIOD = 5000; /* ms */
  238. static bool late_i915_load = false;
  239. /* For initial average collection */
  240. static const int IPS_SAMPLE_PERIOD = 200; /* ms */
  241. static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */
  242. #define IPS_SAMPLE_COUNT (IPS_SAMPLE_WINDOW / IPS_SAMPLE_PERIOD)
  243. /* Per-SKU limits */
  244. struct ips_mcp_limits {
  245. int cpu_family;
  246. int cpu_model; /* includes extended model... */
  247. int mcp_power_limit; /* mW units */
  248. int core_power_limit;
  249. int mch_power_limit;
  250. int core_temp_limit; /* degrees C */
  251. int mch_temp_limit;
  252. };
  253. /* Max temps are -10 degrees C to avoid PROCHOT# */
  254. struct ips_mcp_limits ips_sv_limits = {
  255. .mcp_power_limit = 35000,
  256. .core_power_limit = 29000,
  257. .mch_power_limit = 20000,
  258. .core_temp_limit = 95,
  259. .mch_temp_limit = 90
  260. };
  261. struct ips_mcp_limits ips_lv_limits = {
  262. .mcp_power_limit = 25000,
  263. .core_power_limit = 21000,
  264. .mch_power_limit = 13000,
  265. .core_temp_limit = 95,
  266. .mch_temp_limit = 90
  267. };
  268. struct ips_mcp_limits ips_ulv_limits = {
  269. .mcp_power_limit = 18000,
  270. .core_power_limit = 14000,
  271. .mch_power_limit = 11000,
  272. .core_temp_limit = 95,
  273. .mch_temp_limit = 90
  274. };
  275. struct ips_driver {
  276. struct pci_dev *dev;
  277. void *regmap;
  278. struct task_struct *monitor;
  279. struct task_struct *adjust;
  280. struct dentry *debug_root;
  281. /* Average CPU core temps (all averages in .01 degrees C for precision) */
  282. u16 ctv1_avg_temp;
  283. u16 ctv2_avg_temp;
  284. /* GMCH average */
  285. u16 mch_avg_temp;
  286. /* Average for the CPU (both cores?) */
  287. u16 mcp_avg_temp;
  288. /* Average power consumption (in mW) */
  289. u32 cpu_avg_power;
  290. u32 mch_avg_power;
  291. /* Offset values */
  292. u16 cta_val;
  293. u16 pta_val;
  294. u16 mgta_val;
  295. /* Maximums & prefs, protected by turbo status lock */
  296. spinlock_t turbo_status_lock;
  297. u16 mcp_temp_limit;
  298. u16 mcp_power_limit;
  299. u16 core_power_limit;
  300. u16 mch_power_limit;
  301. bool cpu_turbo_enabled;
  302. bool __cpu_turbo_on;
  303. bool gpu_turbo_enabled;
  304. bool __gpu_turbo_on;
  305. bool gpu_preferred;
  306. bool poll_turbo_status;
  307. bool second_cpu;
  308. bool turbo_toggle_allowed;
  309. struct ips_mcp_limits *limits;
  310. /* Optional MCH interfaces for if i915 is in use */
  311. unsigned long (*read_mch_val)(void);
  312. bool (*gpu_raise)(void);
  313. bool (*gpu_lower)(void);
  314. bool (*gpu_busy)(void);
  315. bool (*gpu_turbo_disable)(void);
  316. /* For restoration at unload */
  317. u64 orig_turbo_limit;
  318. u64 orig_turbo_ratios;
  319. };
  320. static bool
  321. ips_gpu_turbo_enabled(struct ips_driver *ips);
  322. /**
  323. * ips_cpu_busy - is CPU busy?
  324. * @ips: IPS driver struct
  325. *
  326. * Check CPU for load to see whether we should increase its thermal budget.
  327. *
  328. * RETURNS:
  329. * True if the CPU could use more power, false otherwise.
  330. */
  331. static bool ips_cpu_busy(struct ips_driver *ips)
  332. {
  333. if ((avenrun[0] >> FSHIFT) > 1)
  334. return true;
  335. return false;
  336. }
  337. /**
  338. * ips_cpu_raise - raise CPU power clamp
  339. * @ips: IPS driver struct
  340. *
  341. * Raise the CPU power clamp by %IPS_CPU_STEP, in accordance with TDP for
  342. * this platform.
  343. *
  344. * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR upwards (as
  345. * long as we haven't hit the TDP limit for the SKU).
  346. */
  347. static void ips_cpu_raise(struct ips_driver *ips)
  348. {
  349. u64 turbo_override;
  350. u16 cur_tdp_limit, new_tdp_limit;
  351. if (!ips->cpu_turbo_enabled)
  352. return;
  353. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  354. cur_tdp_limit = turbo_override & TURBO_TDP_MASK;
  355. new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */
  356. /* Clamp to SKU TDP limit */
  357. if (((new_tdp_limit * 10) / 8) > ips->core_power_limit)
  358. new_tdp_limit = cur_tdp_limit;
  359. thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
  360. turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
  361. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  362. turbo_override &= ~TURBO_TDP_MASK;
  363. turbo_override |= new_tdp_limit;
  364. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  365. }
  366. /**
  367. * ips_cpu_lower - lower CPU power clamp
  368. * @ips: IPS driver struct
  369. *
  370. * Lower CPU power clamp b %IPS_CPU_STEP if possible.
  371. *
  372. * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR down, going
  373. * as low as the platform limits will allow (though we could go lower there
  374. * wouldn't be much point).
  375. */
  376. static void ips_cpu_lower(struct ips_driver *ips)
  377. {
  378. u64 turbo_override;
  379. u16 cur_limit, new_limit;
  380. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  381. cur_limit = turbo_override & TURBO_TDP_MASK;
  382. new_limit = cur_limit - 8; /* 1W decrease */
  383. /* Clamp to SKU TDP limit */
  384. if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK))
  385. new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK;
  386. thm_writew(THM_MPCPC, (new_limit * 10) / 8);
  387. turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
  388. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  389. turbo_override &= ~TURBO_TDP_MASK;
  390. turbo_override |= new_limit;
  391. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  392. }
  393. /**
  394. * do_enable_cpu_turbo - internal turbo enable function
  395. * @data: unused
  396. *
  397. * Internal function for actually updating MSRs. When we enable/disable
  398. * turbo, we need to do it on each CPU; this function is the one called
  399. * by on_each_cpu() when needed.
  400. */
  401. static void do_enable_cpu_turbo(void *data)
  402. {
  403. u64 perf_ctl;
  404. rdmsrl(IA32_PERF_CTL, perf_ctl);
  405. if (perf_ctl & IA32_PERF_TURBO_DIS) {
  406. perf_ctl &= ~IA32_PERF_TURBO_DIS;
  407. wrmsrl(IA32_PERF_CTL, perf_ctl);
  408. }
  409. }
  410. /**
  411. * ips_enable_cpu_turbo - enable turbo mode on all CPUs
  412. * @ips: IPS driver struct
  413. *
  414. * Enable turbo mode by clearing the disable bit in IA32_PERF_CTL on
  415. * all logical threads.
  416. */
  417. static void ips_enable_cpu_turbo(struct ips_driver *ips)
  418. {
  419. /* Already on, no need to mess with MSRs */
  420. if (ips->__cpu_turbo_on)
  421. return;
  422. if (ips->turbo_toggle_allowed)
  423. on_each_cpu(do_enable_cpu_turbo, ips, 1);
  424. ips->__cpu_turbo_on = true;
  425. }
  426. /**
  427. * do_disable_cpu_turbo - internal turbo disable function
  428. * @data: unused
  429. *
  430. * Internal function for actually updating MSRs. When we enable/disable
  431. * turbo, we need to do it on each CPU; this function is the one called
  432. * by on_each_cpu() when needed.
  433. */
  434. static void do_disable_cpu_turbo(void *data)
  435. {
  436. u64 perf_ctl;
  437. rdmsrl(IA32_PERF_CTL, perf_ctl);
  438. if (!(perf_ctl & IA32_PERF_TURBO_DIS)) {
  439. perf_ctl |= IA32_PERF_TURBO_DIS;
  440. wrmsrl(IA32_PERF_CTL, perf_ctl);
  441. }
  442. }
  443. /**
  444. * ips_disable_cpu_turbo - disable turbo mode on all CPUs
  445. * @ips: IPS driver struct
  446. *
  447. * Disable turbo mode by setting the disable bit in IA32_PERF_CTL on
  448. * all logical threads.
  449. */
  450. static void ips_disable_cpu_turbo(struct ips_driver *ips)
  451. {
  452. /* Already off, leave it */
  453. if (!ips->__cpu_turbo_on)
  454. return;
  455. if (ips->turbo_toggle_allowed)
  456. on_each_cpu(do_disable_cpu_turbo, ips, 1);
  457. ips->__cpu_turbo_on = false;
  458. }
  459. /**
  460. * ips_gpu_busy - is GPU busy?
  461. * @ips: IPS driver struct
  462. *
  463. * Check GPU for load to see whether we should increase its thermal budget.
  464. * We need to call into the i915 driver in this case.
  465. *
  466. * RETURNS:
  467. * True if the GPU could use more power, false otherwise.
  468. */
  469. static bool ips_gpu_busy(struct ips_driver *ips)
  470. {
  471. if (!ips_gpu_turbo_enabled(ips))
  472. return false;
  473. return ips->gpu_busy();
  474. }
  475. /**
  476. * ips_gpu_raise - raise GPU power clamp
  477. * @ips: IPS driver struct
  478. *
  479. * Raise the GPU frequency/power if possible. We need to call into the
  480. * i915 driver in this case.
  481. */
  482. static void ips_gpu_raise(struct ips_driver *ips)
  483. {
  484. if (!ips_gpu_turbo_enabled(ips))
  485. return;
  486. if (!ips->gpu_raise())
  487. ips->gpu_turbo_enabled = false;
  488. return;
  489. }
  490. /**
  491. * ips_gpu_lower - lower GPU power clamp
  492. * @ips: IPS driver struct
  493. *
  494. * Lower GPU frequency/power if possible. Need to call i915.
  495. */
  496. static void ips_gpu_lower(struct ips_driver *ips)
  497. {
  498. if (!ips_gpu_turbo_enabled(ips))
  499. return;
  500. if (!ips->gpu_lower())
  501. ips->gpu_turbo_enabled = false;
  502. return;
  503. }
  504. /**
  505. * ips_enable_gpu_turbo - notify the gfx driver turbo is available
  506. * @ips: IPS driver struct
  507. *
  508. * Call into the graphics driver indicating that it can safely use
  509. * turbo mode.
  510. */
  511. static void ips_enable_gpu_turbo(struct ips_driver *ips)
  512. {
  513. if (ips->__gpu_turbo_on)
  514. return;
  515. ips->__gpu_turbo_on = true;
  516. }
  517. /**
  518. * ips_disable_gpu_turbo - notify the gfx driver to disable turbo mode
  519. * @ips: IPS driver struct
  520. *
  521. * Request that the graphics driver disable turbo mode.
  522. */
  523. static void ips_disable_gpu_turbo(struct ips_driver *ips)
  524. {
  525. /* Avoid calling i915 if turbo is already disabled */
  526. if (!ips->__gpu_turbo_on)
  527. return;
  528. if (!ips->gpu_turbo_disable())
  529. dev_err(&ips->dev->dev, "failed to disable graphis turbo\n");
  530. else
  531. ips->__gpu_turbo_on = false;
  532. }
  533. /**
  534. * mcp_exceeded - check whether we're outside our thermal & power limits
  535. * @ips: IPS driver struct
  536. *
  537. * Check whether the MCP is over its thermal or power budget.
  538. */
  539. static bool mcp_exceeded(struct ips_driver *ips)
  540. {
  541. unsigned long flags;
  542. bool ret = false;
  543. u32 temp_limit;
  544. u32 avg_power;
  545. spin_lock_irqsave(&ips->turbo_status_lock, flags);
  546. temp_limit = ips->mcp_temp_limit * 100;
  547. if (ips->mcp_avg_temp > temp_limit)
  548. ret = true;
  549. avg_power = ips->cpu_avg_power + ips->mch_avg_power;
  550. if (avg_power > ips->mcp_power_limit)
  551. ret = true;
  552. spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
  553. return ret;
  554. }
  555. /**
  556. * cpu_exceeded - check whether a CPU core is outside its limits
  557. * @ips: IPS driver struct
  558. * @cpu: CPU number to check
  559. *
  560. * Check a given CPU's average temp or power is over its limit.
  561. */
  562. static bool cpu_exceeded(struct ips_driver *ips, int cpu)
  563. {
  564. unsigned long flags;
  565. int avg;
  566. bool ret = false;
  567. spin_lock_irqsave(&ips->turbo_status_lock, flags);
  568. avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp;
  569. if (avg > (ips->limits->core_temp_limit * 100))
  570. ret = true;
  571. if (ips->cpu_avg_power > ips->core_power_limit * 100)
  572. ret = true;
  573. spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
  574. if (ret)
  575. dev_info(&ips->dev->dev,
  576. "CPU power or thermal limit exceeded\n");
  577. return ret;
  578. }
  579. /**
  580. * mch_exceeded - check whether the GPU is over budget
  581. * @ips: IPS driver struct
  582. *
  583. * Check the MCH temp & power against their maximums.
  584. */
  585. static bool mch_exceeded(struct ips_driver *ips)
  586. {
  587. unsigned long flags;
  588. bool ret = false;
  589. spin_lock_irqsave(&ips->turbo_status_lock, flags);
  590. if (ips->mch_avg_temp > (ips->limits->mch_temp_limit * 100))
  591. ret = true;
  592. if (ips->mch_avg_power > ips->mch_power_limit)
  593. ret = true;
  594. spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
  595. return ret;
  596. }
  597. /**
  598. * verify_limits - verify BIOS provided limits
  599. * @ips: IPS structure
  600. *
  601. * BIOS can optionally provide non-default limits for power and temp. Check
  602. * them here and use the defaults if the BIOS values are not provided or
  603. * are otherwise unusable.
  604. */
  605. static void verify_limits(struct ips_driver *ips)
  606. {
  607. if (ips->mcp_power_limit < ips->limits->mcp_power_limit ||
  608. ips->mcp_power_limit > 35000)
  609. ips->mcp_power_limit = ips->limits->mcp_power_limit;
  610. if (ips->mcp_temp_limit < ips->limits->core_temp_limit ||
  611. ips->mcp_temp_limit < ips->limits->mch_temp_limit ||
  612. ips->mcp_temp_limit > 150)
  613. ips->mcp_temp_limit = min(ips->limits->core_temp_limit,
  614. ips->limits->mch_temp_limit);
  615. }
  616. /**
  617. * update_turbo_limits - get various limits & settings from regs
  618. * @ips: IPS driver struct
  619. *
  620. * Update the IPS power & temp limits, along with turbo enable flags,
  621. * based on latest register contents.
  622. *
  623. * Used at init time and for runtime BIOS support, which requires polling
  624. * the regs for updates (as a result of AC->DC transition for example).
  625. *
  626. * LOCKING:
  627. * Caller must hold turbo_status_lock (outside of init)
  628. */
  629. static void update_turbo_limits(struct ips_driver *ips)
  630. {
  631. u32 hts = thm_readl(THM_HTS);
  632. ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS);
  633. /*
  634. * Disable turbo for now, until we can figure out why the power figures
  635. * are wrong
  636. */
  637. ips->cpu_turbo_enabled = false;
  638. if (ips->gpu_busy)
  639. ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
  640. ips->core_power_limit = thm_readw(THM_MPCPC);
  641. ips->mch_power_limit = thm_readw(THM_MMGPC);
  642. ips->mcp_temp_limit = thm_readw(THM_PTL);
  643. ips->mcp_power_limit = thm_readw(THM_MPPC);
  644. verify_limits(ips);
  645. /* Ignore BIOS CPU vs GPU pref */
  646. }
  647. /**
  648. * ips_adjust - adjust power clamp based on thermal state
  649. * @data: ips driver structure
  650. *
  651. * Wake up every 5s or so and check whether we should adjust the power clamp.
  652. * Check CPU and GPU load to determine which needs adjustment. There are
  653. * several things to consider here:
  654. * - do we need to adjust up or down?
  655. * - is CPU busy?
  656. * - is GPU busy?
  657. * - is CPU in turbo?
  658. * - is GPU in turbo?
  659. * - is CPU or GPU preferred? (CPU is default)
  660. *
  661. * So, given the above, we do the following:
  662. * - up (TDP available)
  663. * - CPU not busy, GPU not busy - nothing
  664. * - CPU busy, GPU not busy - adjust CPU up
  665. * - CPU not busy, GPU busy - adjust GPU up
  666. * - CPU busy, GPU busy - adjust preferred unit up, taking headroom from
  667. * non-preferred unit if necessary
  668. * - down (at TDP limit)
  669. * - adjust both CPU and GPU down if possible
  670. *
  671. cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu-
  672. cpu < gpu < cpu+gpu+ cpu+ gpu+ nothing
  673. cpu < gpu >= cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu-
  674. cpu >= gpu < cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu-
  675. cpu >= gpu >= cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu-
  676. *
  677. */
  678. static int ips_adjust(void *data)
  679. {
  680. struct ips_driver *ips = data;
  681. unsigned long flags;
  682. dev_dbg(&ips->dev->dev, "starting ips-adjust thread\n");
  683. /*
  684. * Adjust CPU and GPU clamps every 5s if needed. Doing it more
  685. * often isn't recommended due to ME interaction.
  686. */
  687. do {
  688. bool cpu_busy = ips_cpu_busy(ips);
  689. bool gpu_busy = ips_gpu_busy(ips);
  690. spin_lock_irqsave(&ips->turbo_status_lock, flags);
  691. if (ips->poll_turbo_status)
  692. update_turbo_limits(ips);
  693. spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
  694. /* Update turbo status if necessary */
  695. if (ips->cpu_turbo_enabled)
  696. ips_enable_cpu_turbo(ips);
  697. else
  698. ips_disable_cpu_turbo(ips);
  699. if (ips->gpu_turbo_enabled)
  700. ips_enable_gpu_turbo(ips);
  701. else
  702. ips_disable_gpu_turbo(ips);
  703. /* We're outside our comfort zone, crank them down */
  704. if (mcp_exceeded(ips)) {
  705. ips_cpu_lower(ips);
  706. ips_gpu_lower(ips);
  707. goto sleep;
  708. }
  709. if (!cpu_exceeded(ips, 0) && cpu_busy)
  710. ips_cpu_raise(ips);
  711. else
  712. ips_cpu_lower(ips);
  713. if (!mch_exceeded(ips) && gpu_busy)
  714. ips_gpu_raise(ips);
  715. else
  716. ips_gpu_lower(ips);
  717. sleep:
  718. schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
  719. } while (!kthread_should_stop());
  720. dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
  721. return 0;
  722. }
  723. /*
  724. * Helpers for reading out temp/power values and calculating their
  725. * averages for the decision making and monitoring functions.
  726. */
  727. static u16 calc_avg_temp(struct ips_driver *ips, u16 *array)
  728. {
  729. u64 total = 0;
  730. int i;
  731. u16 avg;
  732. for (i = 0; i < IPS_SAMPLE_COUNT; i++)
  733. total += (u64)(array[i] * 100);
  734. do_div(total, IPS_SAMPLE_COUNT);
  735. avg = (u16)total;
  736. return avg;
  737. }
  738. static u16 read_mgtv(struct ips_driver *ips)
  739. {
  740. u16 ret;
  741. u64 slope, offset;
  742. u64 val;
  743. val = thm_readq(THM_MGTV);
  744. val = (val & TV_MASK) >> TV_SHIFT;
  745. slope = offset = thm_readw(THM_MGTA);
  746. slope = (slope & MGTA_SLOPE_MASK) >> MGTA_SLOPE_SHIFT;
  747. offset = offset & MGTA_OFFSET_MASK;
  748. ret = ((val * slope + 0x40) >> 7) + offset;
  749. return 0; /* MCH temp reporting buggy */
  750. }
  751. static u16 read_ptv(struct ips_driver *ips)
  752. {
  753. u16 val, slope, offset;
  754. slope = (ips->pta_val & PTA_SLOPE_MASK) >> PTA_SLOPE_SHIFT;
  755. offset = ips->pta_val & PTA_OFFSET_MASK;
  756. val = thm_readw(THM_PTV) & PTV_MASK;
  757. return val;
  758. }
  759. static u16 read_ctv(struct ips_driver *ips, int cpu)
  760. {
  761. int reg = cpu ? THM_CTV2 : THM_CTV1;
  762. u16 val;
  763. val = thm_readw(reg);
  764. if (!(val & CTV_TEMP_ERROR))
  765. val = (val) >> 6; /* discard fractional component */
  766. else
  767. val = 0;
  768. return val;
  769. }
  770. static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period)
  771. {
  772. u32 val;
  773. u32 ret;
  774. /*
  775. * CEC is in joules/65535. Take difference over time to
  776. * get watts.
  777. */
  778. val = thm_readl(THM_CEC);
  779. /* period is in ms and we want mW */
  780. ret = (((val - *last) * 1000) / period);
  781. ret = (ret * 1000) / 65535;
  782. *last = val;
  783. return 0;
  784. }
  785. static const u16 temp_decay_factor = 2;
  786. static u16 update_average_temp(u16 avg, u16 val)
  787. {
  788. u16 ret;
  789. /* Multiply by 100 for extra precision */
  790. ret = (val * 100 / temp_decay_factor) +
  791. (((temp_decay_factor - 1) * avg) / temp_decay_factor);
  792. return ret;
  793. }
  794. static const u16 power_decay_factor = 2;
  795. static u16 update_average_power(u32 avg, u32 val)
  796. {
  797. u32 ret;
  798. ret = (val / power_decay_factor) +
  799. (((power_decay_factor - 1) * avg) / power_decay_factor);
  800. return ret;
  801. }
  802. static u32 calc_avg_power(struct ips_driver *ips, u32 *array)
  803. {
  804. u64 total = 0;
  805. u32 avg;
  806. int i;
  807. for (i = 0; i < IPS_SAMPLE_COUNT; i++)
  808. total += array[i];
  809. do_div(total, IPS_SAMPLE_COUNT);
  810. avg = (u32)total;
  811. return avg;
  812. }
  813. static void monitor_timeout(unsigned long arg)
  814. {
  815. wake_up_process((struct task_struct *)arg);
  816. }
  817. /**
  818. * ips_monitor - temp/power monitoring thread
  819. * @data: ips driver structure
  820. *
  821. * This is the main function for the IPS driver. It monitors power and
  822. * tempurature in the MCP and adjusts CPU and GPU power clams accordingly.
  823. *
  824. * We keep a 5s moving average of power consumption and tempurature. Using
  825. * that data, along with CPU vs GPU preference, we adjust the power clamps
  826. * up or down.
  827. */
  828. static int ips_monitor(void *data)
  829. {
  830. struct ips_driver *ips = data;
  831. struct timer_list timer;
  832. unsigned long seqno_timestamp, expire, last_msecs, last_sample_period;
  833. int i;
  834. u32 *cpu_samples, *mchp_samples, old_cpu_power;
  835. u16 *mcp_samples, *ctv1_samples, *ctv2_samples, *mch_samples;
  836. u8 cur_seqno, last_seqno;
  837. mcp_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  838. ctv1_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  839. ctv2_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  840. mch_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  841. cpu_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  842. mchp_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL);
  843. if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples ||
  844. !cpu_samples || !mchp_samples) {
  845. dev_err(&ips->dev->dev,
  846. "failed to allocate sample array, ips disabled\n");
  847. kfree(mcp_samples);
  848. kfree(ctv1_samples);
  849. kfree(ctv2_samples);
  850. kfree(mch_samples);
  851. kfree(cpu_samples);
  852. kfree(mchp_samples);
  853. return -ENOMEM;
  854. }
  855. last_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
  856. ITV_ME_SEQNO_SHIFT;
  857. seqno_timestamp = get_jiffies_64();
  858. old_cpu_power = thm_readl(THM_CEC);
  859. schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
  860. /* Collect an initial average */
  861. for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
  862. u32 mchp, cpu_power;
  863. u16 val;
  864. mcp_samples[i] = read_ptv(ips);
  865. val = read_ctv(ips, 0);
  866. ctv1_samples[i] = val;
  867. val = read_ctv(ips, 1);
  868. ctv2_samples[i] = val;
  869. val = read_mgtv(ips);
  870. mch_samples[i] = val;
  871. cpu_power = get_cpu_power(ips, &old_cpu_power,
  872. IPS_SAMPLE_PERIOD);
  873. cpu_samples[i] = cpu_power;
  874. if (ips->read_mch_val) {
  875. mchp = ips->read_mch_val();
  876. mchp_samples[i] = mchp;
  877. }
  878. schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
  879. if (kthread_should_stop())
  880. break;
  881. }
  882. ips->mcp_avg_temp = calc_avg_temp(ips, mcp_samples);
  883. ips->ctv1_avg_temp = calc_avg_temp(ips, ctv1_samples);
  884. ips->ctv2_avg_temp = calc_avg_temp(ips, ctv2_samples);
  885. ips->mch_avg_temp = calc_avg_temp(ips, mch_samples);
  886. ips->cpu_avg_power = calc_avg_power(ips, cpu_samples);
  887. ips->mch_avg_power = calc_avg_power(ips, mchp_samples);
  888. kfree(mcp_samples);
  889. kfree(ctv1_samples);
  890. kfree(ctv2_samples);
  891. kfree(mch_samples);
  892. kfree(cpu_samples);
  893. kfree(mchp_samples);
  894. /* Start the adjustment thread now that we have data */
  895. wake_up_process(ips->adjust);
  896. /*
  897. * Ok, now we have an initial avg. From here on out, we track the
  898. * running avg using a decaying average calculation. This allows
  899. * us to reduce the sample frequency if the CPU and GPU are idle.
  900. */
  901. old_cpu_power = thm_readl(THM_CEC);
  902. schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
  903. last_sample_period = IPS_SAMPLE_PERIOD;
  904. setup_deferrable_timer_on_stack(&timer, monitor_timeout,
  905. (unsigned long)current);
  906. do {
  907. u32 cpu_val, mch_val;
  908. u16 val;
  909. /* MCP itself */
  910. val = read_ptv(ips);
  911. ips->mcp_avg_temp = update_average_temp(ips->mcp_avg_temp, val);
  912. /* Processor 0 */
  913. val = read_ctv(ips, 0);
  914. ips->ctv1_avg_temp =
  915. update_average_temp(ips->ctv1_avg_temp, val);
  916. /* Power */
  917. cpu_val = get_cpu_power(ips, &old_cpu_power,
  918. last_sample_period);
  919. ips->cpu_avg_power =
  920. update_average_power(ips->cpu_avg_power, cpu_val);
  921. if (ips->second_cpu) {
  922. /* Processor 1 */
  923. val = read_ctv(ips, 1);
  924. ips->ctv2_avg_temp =
  925. update_average_temp(ips->ctv2_avg_temp, val);
  926. }
  927. /* MCH */
  928. val = read_mgtv(ips);
  929. ips->mch_avg_temp = update_average_temp(ips->mch_avg_temp, val);
  930. /* Power */
  931. if (ips->read_mch_val) {
  932. mch_val = ips->read_mch_val();
  933. ips->mch_avg_power =
  934. update_average_power(ips->mch_avg_power,
  935. mch_val);
  936. }
  937. /*
  938. * Make sure ME is updating thermal regs.
  939. * Note:
  940. * If it's been more than a second since the last update,
  941. * the ME is probably hung.
  942. */
  943. cur_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
  944. ITV_ME_SEQNO_SHIFT;
  945. if (cur_seqno == last_seqno &&
  946. time_after(jiffies, seqno_timestamp + HZ)) {
  947. dev_warn(&ips->dev->dev, "ME failed to update for more than 1s, likely hung\n");
  948. } else {
  949. seqno_timestamp = get_jiffies_64();
  950. last_seqno = cur_seqno;
  951. }
  952. last_msecs = jiffies_to_msecs(jiffies);
  953. expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD);
  954. __set_current_state(TASK_INTERRUPTIBLE);
  955. mod_timer(&timer, expire);
  956. schedule();
  957. /* Calculate actual sample period for power averaging */
  958. last_sample_period = jiffies_to_msecs(jiffies) - last_msecs;
  959. if (!last_sample_period)
  960. last_sample_period = 1;
  961. } while (!kthread_should_stop());
  962. del_timer_sync(&timer);
  963. destroy_timer_on_stack(&timer);
  964. dev_dbg(&ips->dev->dev, "ips-monitor thread stopped\n");
  965. return 0;
  966. }
  967. #if 0
  968. #define THM_DUMPW(reg) \
  969. { \
  970. u16 val = thm_readw(reg); \
  971. dev_dbg(&ips->dev->dev, #reg ": 0x%04x\n", val); \
  972. }
  973. #define THM_DUMPL(reg) \
  974. { \
  975. u32 val = thm_readl(reg); \
  976. dev_dbg(&ips->dev->dev, #reg ": 0x%08x\n", val); \
  977. }
  978. #define THM_DUMPQ(reg) \
  979. { \
  980. u64 val = thm_readq(reg); \
  981. dev_dbg(&ips->dev->dev, #reg ": 0x%016x\n", val); \
  982. }
  983. static void dump_thermal_info(struct ips_driver *ips)
  984. {
  985. u16 ptl;
  986. ptl = thm_readw(THM_PTL);
  987. dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl);
  988. THM_DUMPW(THM_CTA);
  989. THM_DUMPW(THM_TRC);
  990. THM_DUMPW(THM_CTV1);
  991. THM_DUMPL(THM_STS);
  992. THM_DUMPW(THM_PTV);
  993. THM_DUMPQ(THM_MGTV);
  994. }
  995. #endif
  996. /**
  997. * ips_irq_handler - handle temperature triggers and other IPS events
  998. * @irq: irq number
  999. * @arg: unused
  1000. *
  1001. * Handle temperature limit trigger events, generally by lowering the clamps.
  1002. * If we're at a critical limit, we clamp back to the lowest possible value
  1003. * to prevent emergency shutdown.
  1004. */
  1005. static irqreturn_t ips_irq_handler(int irq, void *arg)
  1006. {
  1007. struct ips_driver *ips = arg;
  1008. u8 tses = thm_readb(THM_TSES);
  1009. u8 tes = thm_readb(THM_TES);
  1010. if (!tses && !tes)
  1011. return IRQ_NONE;
  1012. dev_info(&ips->dev->dev, "TSES: 0x%02x\n", tses);
  1013. dev_info(&ips->dev->dev, "TES: 0x%02x\n", tes);
  1014. /* STS update from EC? */
  1015. if (tes & 1) {
  1016. u32 sts, tc1;
  1017. sts = thm_readl(THM_STS);
  1018. tc1 = thm_readl(THM_TC1);
  1019. if (sts & STS_NVV) {
  1020. spin_lock(&ips->turbo_status_lock);
  1021. ips->core_power_limit = (sts & STS_PCPL_MASK) >>
  1022. STS_PCPL_SHIFT;
  1023. ips->mch_power_limit = (sts & STS_GPL_MASK) >>
  1024. STS_GPL_SHIFT;
  1025. /* ignore EC CPU vs GPU pref */
  1026. ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS);
  1027. /*
  1028. * Disable turbo for now, until we can figure
  1029. * out why the power figures are wrong
  1030. */
  1031. ips->cpu_turbo_enabled = false;
  1032. if (ips->gpu_busy)
  1033. ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
  1034. ips->mcp_temp_limit = (sts & STS_PTL_MASK) >>
  1035. STS_PTL_SHIFT;
  1036. ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >>
  1037. STS_PPL_SHIFT;
  1038. verify_limits(ips);
  1039. spin_unlock(&ips->turbo_status_lock);
  1040. thm_writeb(THM_SEC, SEC_ACK);
  1041. }
  1042. thm_writeb(THM_TES, tes);
  1043. }
  1044. /* Thermal trip */
  1045. if (tses) {
  1046. dev_warn(&ips->dev->dev,
  1047. "thermal trip occurred, tses: 0x%04x\n", tses);
  1048. thm_writeb(THM_TSES, tses);
  1049. }
  1050. return IRQ_HANDLED;
  1051. }
  1052. #ifndef CONFIG_DEBUG_FS
  1053. static void ips_debugfs_init(struct ips_driver *ips) { return; }
  1054. static void ips_debugfs_cleanup(struct ips_driver *ips) { return; }
  1055. #else
  1056. /* Expose current state and limits in debugfs if possible */
  1057. struct ips_debugfs_node {
  1058. struct ips_driver *ips;
  1059. char *name;
  1060. int (*show)(struct seq_file *m, void *data);
  1061. };
  1062. static int show_cpu_temp(struct seq_file *m, void *data)
  1063. {
  1064. struct ips_driver *ips = m->private;
  1065. seq_printf(m, "%d.%02d\n", ips->ctv1_avg_temp / 100,
  1066. ips->ctv1_avg_temp % 100);
  1067. return 0;
  1068. }
  1069. static int show_cpu_power(struct seq_file *m, void *data)
  1070. {
  1071. struct ips_driver *ips = m->private;
  1072. seq_printf(m, "%dmW\n", ips->cpu_avg_power);
  1073. return 0;
  1074. }
  1075. static int show_cpu_clamp(struct seq_file *m, void *data)
  1076. {
  1077. u64 turbo_override;
  1078. int tdp, tdc;
  1079. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  1080. tdp = (int)(turbo_override & TURBO_TDP_MASK);
  1081. tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT);
  1082. /* Convert to .1W/A units */
  1083. tdp = tdp * 10 / 8;
  1084. tdc = tdc * 10 / 8;
  1085. /* Watts Amperes */
  1086. seq_printf(m, "%d.%dW %d.%dA\n", tdp / 10, tdp % 10,
  1087. tdc / 10, tdc % 10);
  1088. return 0;
  1089. }
  1090. static int show_mch_temp(struct seq_file *m, void *data)
  1091. {
  1092. struct ips_driver *ips = m->private;
  1093. seq_printf(m, "%d.%02d\n", ips->mch_avg_temp / 100,
  1094. ips->mch_avg_temp % 100);
  1095. return 0;
  1096. }
  1097. static int show_mch_power(struct seq_file *m, void *data)
  1098. {
  1099. struct ips_driver *ips = m->private;
  1100. seq_printf(m, "%dmW\n", ips->mch_avg_power);
  1101. return 0;
  1102. }
  1103. static struct ips_debugfs_node ips_debug_files[] = {
  1104. { NULL, "cpu_temp", show_cpu_temp },
  1105. { NULL, "cpu_power", show_cpu_power },
  1106. { NULL, "cpu_clamp", show_cpu_clamp },
  1107. { NULL, "mch_temp", show_mch_temp },
  1108. { NULL, "mch_power", show_mch_power },
  1109. };
  1110. static int ips_debugfs_open(struct inode *inode, struct file *file)
  1111. {
  1112. struct ips_debugfs_node *node = inode->i_private;
  1113. return single_open(file, node->show, node->ips);
  1114. }
  1115. static const struct file_operations ips_debugfs_ops = {
  1116. .owner = THIS_MODULE,
  1117. .open = ips_debugfs_open,
  1118. .read = seq_read,
  1119. .llseek = seq_lseek,
  1120. .release = single_release,
  1121. };
  1122. static void ips_debugfs_cleanup(struct ips_driver *ips)
  1123. {
  1124. if (ips->debug_root)
  1125. debugfs_remove_recursive(ips->debug_root);
  1126. return;
  1127. }
  1128. static void ips_debugfs_init(struct ips_driver *ips)
  1129. {
  1130. int i;
  1131. ips->debug_root = debugfs_create_dir("ips", NULL);
  1132. if (!ips->debug_root) {
  1133. dev_err(&ips->dev->dev,
  1134. "failed to create debugfs entries: %ld\n",
  1135. PTR_ERR(ips->debug_root));
  1136. return;
  1137. }
  1138. for (i = 0; i < ARRAY_SIZE(ips_debug_files); i++) {
  1139. struct dentry *ent;
  1140. struct ips_debugfs_node *node = &ips_debug_files[i];
  1141. node->ips = ips;
  1142. ent = debugfs_create_file(node->name, S_IFREG | S_IRUGO,
  1143. ips->debug_root, node,
  1144. &ips_debugfs_ops);
  1145. if (!ent) {
  1146. dev_err(&ips->dev->dev,
  1147. "failed to create debug file: %ld\n",
  1148. PTR_ERR(ent));
  1149. goto err_cleanup;
  1150. }
  1151. }
  1152. return;
  1153. err_cleanup:
  1154. ips_debugfs_cleanup(ips);
  1155. return;
  1156. }
  1157. #endif /* CONFIG_DEBUG_FS */
  1158. /**
  1159. * ips_detect_cpu - detect whether CPU supports IPS
  1160. *
  1161. * Walk our list and see if we're on a supported CPU. If we find one,
  1162. * return the limits for it.
  1163. */
  1164. static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
  1165. {
  1166. u64 turbo_power, misc_en;
  1167. struct ips_mcp_limits *limits = NULL;
  1168. u16 tdp;
  1169. if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) {
  1170. dev_info(&ips->dev->dev, "Non-IPS CPU detected.\n");
  1171. goto out;
  1172. }
  1173. rdmsrl(IA32_MISC_ENABLE, misc_en);
  1174. /*
  1175. * If the turbo enable bit isn't set, we shouldn't try to enable/disable
  1176. * turbo manually or we'll get an illegal MSR access, even though
  1177. * turbo will still be available.
  1178. */
  1179. if (misc_en & IA32_MISC_TURBO_EN)
  1180. ips->turbo_toggle_allowed = true;
  1181. else
  1182. ips->turbo_toggle_allowed = false;
  1183. if (strstr(boot_cpu_data.x86_model_id, "CPU M"))
  1184. limits = &ips_sv_limits;
  1185. else if (strstr(boot_cpu_data.x86_model_id, "CPU L"))
  1186. limits = &ips_lv_limits;
  1187. else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
  1188. limits = &ips_ulv_limits;
  1189. else {
  1190. dev_info(&ips->dev->dev, "No CPUID match found.\n");
  1191. goto out;
  1192. }
  1193. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
  1194. tdp = turbo_power & TURBO_TDP_MASK;
  1195. /* Sanity check TDP against CPU */
  1196. if (limits->core_power_limit != (tdp / 8) * 1000) {
  1197. dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n",
  1198. tdp / 8, limits->core_power_limit / 1000);
  1199. limits->core_power_limit = (tdp / 8) * 1000;
  1200. }
  1201. out:
  1202. return limits;
  1203. }
  1204. /**
  1205. * ips_get_i915_syms - try to get GPU control methods from i915 driver
  1206. * @ips: IPS driver
  1207. *
  1208. * The i915 driver exports several interfaces to allow the IPS driver to
  1209. * monitor and control graphics turbo mode. If we can find them, we can
  1210. * enable graphics turbo, otherwise we must disable it to avoid exceeding
  1211. * thermal and power limits in the MCP.
  1212. */
  1213. static bool ips_get_i915_syms(struct ips_driver *ips)
  1214. {
  1215. ips->read_mch_val = symbol_get(i915_read_mch_val);
  1216. if (!ips->read_mch_val)
  1217. goto out_err;
  1218. ips->gpu_raise = symbol_get(i915_gpu_raise);
  1219. if (!ips->gpu_raise)
  1220. goto out_put_mch;
  1221. ips->gpu_lower = symbol_get(i915_gpu_lower);
  1222. if (!ips->gpu_lower)
  1223. goto out_put_raise;
  1224. ips->gpu_busy = symbol_get(i915_gpu_busy);
  1225. if (!ips->gpu_busy)
  1226. goto out_put_lower;
  1227. ips->gpu_turbo_disable = symbol_get(i915_gpu_turbo_disable);
  1228. if (!ips->gpu_turbo_disable)
  1229. goto out_put_busy;
  1230. return true;
  1231. out_put_busy:
  1232. symbol_put(i915_gpu_busy);
  1233. out_put_lower:
  1234. symbol_put(i915_gpu_lower);
  1235. out_put_raise:
  1236. symbol_put(i915_gpu_raise);
  1237. out_put_mch:
  1238. symbol_put(i915_read_mch_val);
  1239. out_err:
  1240. return false;
  1241. }
  1242. static bool
  1243. ips_gpu_turbo_enabled(struct ips_driver *ips)
  1244. {
  1245. if (!ips->gpu_busy && late_i915_load) {
  1246. if (ips_get_i915_syms(ips)) {
  1247. dev_info(&ips->dev->dev,
  1248. "i915 driver attached, reenabling gpu turbo\n");
  1249. ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
  1250. }
  1251. }
  1252. return ips->gpu_turbo_enabled;
  1253. }
  1254. void
  1255. ips_link_to_i915_driver(void)
  1256. {
  1257. /* We can't cleanly get at the various ips_driver structs from
  1258. * this caller (the i915 driver), so just set a flag saying
  1259. * that it's time to try getting the symbols again.
  1260. */
  1261. late_i915_load = true;
  1262. }
  1263. EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
  1264. static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
  1265. { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
  1266. PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
  1267. { 0, }
  1268. };
  1269. MODULE_DEVICE_TABLE(pci, ips_id_table);
  1270. static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
  1271. {
  1272. u64 platform_info;
  1273. struct ips_driver *ips;
  1274. u32 hts;
  1275. int ret = 0;
  1276. u16 htshi, trc, trc_required_mask;
  1277. u8 tse;
  1278. ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
  1279. if (!ips)
  1280. return -ENOMEM;
  1281. pci_set_drvdata(dev, ips);
  1282. ips->dev = dev;
  1283. ips->limits = ips_detect_cpu(ips);
  1284. if (!ips->limits) {
  1285. dev_info(&dev->dev, "IPS not supported on this CPU\n");
  1286. ret = -ENXIO;
  1287. goto error_free;
  1288. }
  1289. spin_lock_init(&ips->turbo_status_lock);
  1290. ret = pci_enable_device(dev);
  1291. if (ret) {
  1292. dev_err(&dev->dev, "can't enable PCI device, aborting\n");
  1293. goto error_free;
  1294. }
  1295. if (!pci_resource_start(dev, 0)) {
  1296. dev_err(&dev->dev, "TBAR not assigned, aborting\n");
  1297. ret = -ENXIO;
  1298. goto error_free;
  1299. }
  1300. ret = pci_request_regions(dev, "ips thermal sensor");
  1301. if (ret) {
  1302. dev_err(&dev->dev, "thermal resource busy, aborting\n");
  1303. goto error_free;
  1304. }
  1305. ips->regmap = ioremap(pci_resource_start(dev, 0),
  1306. pci_resource_len(dev, 0));
  1307. if (!ips->regmap) {
  1308. dev_err(&dev->dev, "failed to map thermal regs, aborting\n");
  1309. ret = -EBUSY;
  1310. goto error_release;
  1311. }
  1312. tse = thm_readb(THM_TSE);
  1313. if (tse != TSE_EN) {
  1314. dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse);
  1315. ret = -ENXIO;
  1316. goto error_unmap;
  1317. }
  1318. trc = thm_readw(THM_TRC);
  1319. trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN;
  1320. if ((trc & trc_required_mask) != trc_required_mask) {
  1321. dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n");
  1322. ret = -ENXIO;
  1323. goto error_unmap;
  1324. }
  1325. if (trc & TRC_CORE2_EN)
  1326. ips->second_cpu = true;
  1327. update_turbo_limits(ips);
  1328. dev_dbg(&dev->dev, "max cpu power clamp: %dW\n",
  1329. ips->mcp_power_limit / 10);
  1330. dev_dbg(&dev->dev, "max core power clamp: %dW\n",
  1331. ips->core_power_limit / 10);
  1332. /* BIOS may update limits at runtime */
  1333. if (thm_readl(THM_PSC) & PSP_PBRT)
  1334. ips->poll_turbo_status = true;
  1335. if (!ips_get_i915_syms(ips)) {
  1336. dev_err(&dev->dev, "failed to get i915 symbols, graphics turbo disabled\n");
  1337. ips->gpu_turbo_enabled = false;
  1338. } else {
  1339. dev_dbg(&dev->dev, "graphics turbo enabled\n");
  1340. ips->gpu_turbo_enabled = true;
  1341. }
  1342. /*
  1343. * Check PLATFORM_INFO MSR to make sure this chip is
  1344. * turbo capable.
  1345. */
  1346. rdmsrl(PLATFORM_INFO, platform_info);
  1347. if (!(platform_info & PLATFORM_TDP)) {
  1348. dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
  1349. ret = -ENODEV;
  1350. goto error_unmap;
  1351. }
  1352. /*
  1353. * IRQ handler for ME interaction
  1354. * Note: don't use MSI here as the PCH has bugs.
  1355. */
  1356. pci_disable_msi(dev);
  1357. ret = request_irq(dev->irq, ips_irq_handler, IRQF_SHARED, "ips",
  1358. ips);
  1359. if (ret) {
  1360. dev_err(&dev->dev, "request irq failed, aborting\n");
  1361. goto error_unmap;
  1362. }
  1363. /* Enable aux, hot & critical interrupts */
  1364. thm_writeb(THM_TSPIEN, TSPIEN_AUX2_LOHI | TSPIEN_CRIT_LOHI |
  1365. TSPIEN_HOT_LOHI | TSPIEN_AUX_LOHI);
  1366. thm_writeb(THM_TEN, TEN_UPDATE_EN);
  1367. /* Collect adjustment values */
  1368. ips->cta_val = thm_readw(THM_CTA);
  1369. ips->pta_val = thm_readw(THM_PTA);
  1370. ips->mgta_val = thm_readw(THM_MGTA);
  1371. /* Save turbo limits & ratios */
  1372. rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
  1373. ips_disable_cpu_turbo(ips);
  1374. ips->cpu_turbo_enabled = false;
  1375. /* Create thermal adjust thread */
  1376. ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
  1377. if (IS_ERR(ips->adjust)) {
  1378. dev_err(&dev->dev,
  1379. "failed to create thermal adjust thread, aborting\n");
  1380. ret = -ENOMEM;
  1381. goto error_free_irq;
  1382. }
  1383. /*
  1384. * Set up the work queue and monitor thread. The monitor thread
  1385. * will wake up ips_adjust thread.
  1386. */
  1387. ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
  1388. if (IS_ERR(ips->monitor)) {
  1389. dev_err(&dev->dev,
  1390. "failed to create thermal monitor thread, aborting\n");
  1391. ret = -ENOMEM;
  1392. goto error_thread_cleanup;
  1393. }
  1394. hts = (ips->core_power_limit << HTS_PCPL_SHIFT) |
  1395. (ips->mcp_temp_limit << HTS_PTL_SHIFT) | HTS_NVV;
  1396. htshi = HTS2_PRST_RUNNING << HTS2_PRST_SHIFT;
  1397. thm_writew(THM_HTSHI, htshi);
  1398. thm_writel(THM_HTS, hts);
  1399. ips_debugfs_init(ips);
  1400. dev_info(&dev->dev, "IPS driver initialized, MCP temp limit %d\n",
  1401. ips->mcp_temp_limit);
  1402. return ret;
  1403. error_thread_cleanup:
  1404. kthread_stop(ips->adjust);
  1405. error_free_irq:
  1406. free_irq(ips->dev->irq, ips);
  1407. error_unmap:
  1408. iounmap(ips->regmap);
  1409. error_release:
  1410. pci_release_regions(dev);
  1411. error_free:
  1412. kfree(ips);
  1413. return ret;
  1414. }
  1415. static void ips_remove(struct pci_dev *dev)
  1416. {
  1417. struct ips_driver *ips = pci_get_drvdata(dev);
  1418. u64 turbo_override;
  1419. if (!ips)
  1420. return;
  1421. ips_debugfs_cleanup(ips);
  1422. /* Release i915 driver */
  1423. if (ips->read_mch_val)
  1424. symbol_put(i915_read_mch_val);
  1425. if (ips->gpu_raise)
  1426. symbol_put(i915_gpu_raise);
  1427. if (ips->gpu_lower)
  1428. symbol_put(i915_gpu_lower);
  1429. if (ips->gpu_busy)
  1430. symbol_put(i915_gpu_busy);
  1431. if (ips->gpu_turbo_disable)
  1432. symbol_put(i915_gpu_turbo_disable);
  1433. rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  1434. turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN);
  1435. wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
  1436. wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
  1437. free_irq(ips->dev->irq, ips);
  1438. if (ips->adjust)
  1439. kthread_stop(ips->adjust);
  1440. if (ips->monitor)
  1441. kthread_stop(ips->monitor);
  1442. iounmap(ips->regmap);
  1443. pci_release_regions(dev);
  1444. kfree(ips);
  1445. dev_dbg(&dev->dev, "IPS driver removed\n");
  1446. }
  1447. #ifdef CONFIG_PM
  1448. static int ips_suspend(struct pci_dev *dev, pm_message_t state)
  1449. {
  1450. return 0;
  1451. }
  1452. static int ips_resume(struct pci_dev *dev)
  1453. {
  1454. return 0;
  1455. }
  1456. #else
  1457. #define ips_suspend NULL
  1458. #define ips_resume NULL
  1459. #endif /* CONFIG_PM */
  1460. static void ips_shutdown(struct pci_dev *dev)
  1461. {
  1462. }
  1463. static struct pci_driver ips_pci_driver = {
  1464. .name = "intel ips",
  1465. .id_table = ips_id_table,
  1466. .probe = ips_probe,
  1467. .remove = ips_remove,
  1468. .suspend = ips_suspend,
  1469. .resume = ips_resume,
  1470. .shutdown = ips_shutdown,
  1471. };
  1472. static int __init ips_init(void)
  1473. {
  1474. return pci_register_driver(&ips_pci_driver);
  1475. }
  1476. module_init(ips_init);
  1477. static void ips_exit(void)
  1478. {
  1479. pci_unregister_driver(&ips_pci_driver);
  1480. return;
  1481. }
  1482. module_exit(ips_exit);
  1483. MODULE_LICENSE("GPL");
  1484. MODULE_AUTHOR("Jesse Barnes <jbarnes@virtuousgeek.org>");
  1485. MODULE_DESCRIPTION("Intelligent Power Sharing Driver");