ds.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889
  1. /*
  2. * Debug Store support
  3. *
  4. * This provides a low-level interface to the hardware's Debug Store
  5. * feature that is used for branch trace store (BTS) and
  6. * precise-event based sampling (PEBS).
  7. *
  8. * It manages:
  9. * - per-thread and per-cpu allocation of BTS and PEBS
  10. * - buffer memory allocation (optional)
  11. * - buffer overflow handling
  12. * - buffer access
  13. *
  14. * It assumes:
  15. * - get_task_struct on all parameter tasks
  16. * - current is allowed to trace parameter tasks
  17. *
  18. *
  19. * Copyright (C) 2007-2008 Intel Corporation.
  20. * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
  21. */
  22. #include <asm/ds.h>
  23. #include <linux/errno.h>
  24. #include <linux/string.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched.h>
  27. #include <linux/mm.h>
  28. #include <linux/kernel.h>
  29. /*
  30. * The configuration for a particular DS hardware implementation.
  31. */
  32. struct ds_configuration {
  33. /* the size of the DS structure in bytes */
  34. unsigned char sizeof_ds;
  35. /* the size of one pointer-typed field in the DS structure in bytes;
  36. this covers the first 8 fields related to buffer management. */
  37. unsigned char sizeof_field;
  38. /* the size of a BTS/PEBS record in bytes */
  39. unsigned char sizeof_rec[2];
  40. };
  41. static struct ds_configuration ds_cfg;
  42. /*
  43. * A BTS or PEBS tracer.
  44. *
  45. * This holds the configuration of the tracer and serves as a handle
  46. * to identify tracers.
  47. */
  48. struct ds_tracer {
  49. /* the DS context (partially) owned by this tracer */
  50. struct ds_context *context;
  51. /* the buffer provided on ds_request() and its size in bytes */
  52. void *buffer;
  53. size_t size;
  54. /* the number of allocated pages for on-request allocated buffers */
  55. unsigned int pages;
  56. };
  57. struct bts_tracer {
  58. /* the common DS part */
  59. struct ds_tracer ds;
  60. /* buffer overflow notification function */
  61. bts_ovfl_callback_t ovfl;
  62. };
  63. struct pebs_tracer {
  64. /* the common DS part */
  65. struct ds_tracer ds;
  66. /* buffer overflow notification function */
  67. pebs_ovfl_callback_t ovfl;
  68. };
  69. /*
  70. * Debug Store (DS) save area configuration (see Intel64 and IA32
  71. * Architectures Software Developer's Manual, section 18.5)
  72. *
  73. * The DS configuration consists of the following fields; different
  74. * architetures vary in the size of those fields.
  75. * - double-word aligned base linear address of the BTS buffer
  76. * - write pointer into the BTS buffer
  77. * - end linear address of the BTS buffer (one byte beyond the end of
  78. * the buffer)
  79. * - interrupt pointer into BTS buffer
  80. * (interrupt occurs when write pointer passes interrupt pointer)
  81. * - double-word aligned base linear address of the PEBS buffer
  82. * - write pointer into the PEBS buffer
  83. * - end linear address of the PEBS buffer (one byte beyond the end of
  84. * the buffer)
  85. * - interrupt pointer into PEBS buffer
  86. * (interrupt occurs when write pointer passes interrupt pointer)
  87. * - value to which counter is reset following counter overflow
  88. *
  89. * Later architectures use 64bit pointers throughout, whereas earlier
  90. * architectures use 32bit pointers in 32bit mode.
  91. *
  92. *
  93. * We compute the base address for the first 8 fields based on:
  94. * - the field size stored in the DS configuration
  95. * - the relative field position
  96. * - an offset giving the start of the respective region
  97. *
  98. * This offset is further used to index various arrays holding
  99. * information for BTS and PEBS at the respective index.
  100. *
  101. * On later 32bit processors, we only access the lower 32bit of the
  102. * 64bit pointer fields. The upper halves will be zeroed out.
  103. */
  104. enum ds_field {
  105. ds_buffer_base = 0,
  106. ds_index,
  107. ds_absolute_maximum,
  108. ds_interrupt_threshold,
  109. };
  110. enum ds_qualifier {
  111. ds_bts = 0,
  112. ds_pebs
  113. };
  114. static inline unsigned long ds_get(const unsigned char *base,
  115. enum ds_qualifier qual, enum ds_field field)
  116. {
  117. base += (ds_cfg.sizeof_field * (field + (4 * qual)));
  118. return *(unsigned long *)base;
  119. }
  120. static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
  121. enum ds_field field, unsigned long value)
  122. {
  123. base += (ds_cfg.sizeof_field * (field + (4 * qual)));
  124. (*(unsigned long *)base) = value;
  125. }
  126. #define DS_ALIGNMENT (1 << 3) /* BTS and PEBS buffer alignment */
  127. /*
  128. * Locking is done only for allocating BTS or PEBS resources and for
  129. * guarding context and buffer memory allocation.
  130. */
  131. static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
  132. /*
  133. * We either support (system-wide) per-cpu or per-thread allocation.
  134. * We distinguish the two based on the task_struct pointer, where a
  135. * NULL pointer indicates per-cpu allocation for the current cpu.
  136. *
  137. * Allocations are use-counted. As soon as resources are allocated,
  138. * further allocations must be of the same type (per-cpu or
  139. * per-thread). We model this by counting allocations (i.e. the number
  140. * of tracers of a certain type) for one type negatively:
  141. * =0 no tracers
  142. * >0 number of per-thread tracers
  143. * <0 number of per-cpu tracers
  144. *
  145. * The below functions to get and put tracers and to check the
  146. * allocation type require the ds_lock to be held by the caller.
  147. *
  148. * Tracers essentially gives the number of ds contexts for a certain
  149. * type of allocation.
  150. */
  151. static long tracers;
  152. static inline void get_tracer(struct task_struct *task)
  153. {
  154. tracers += (task ? 1 : -1);
  155. }
  156. static inline void put_tracer(struct task_struct *task)
  157. {
  158. tracers -= (task ? 1 : -1);
  159. }
  160. static inline int check_tracer(struct task_struct *task)
  161. {
  162. return (task ? (tracers >= 0) : (tracers <= 0));
  163. }
  164. /*
  165. * The DS context is either attached to a thread or to a cpu:
  166. * - in the former case, the thread_struct contains a pointer to the
  167. * attached context.
  168. * - in the latter case, we use a static array of per-cpu context
  169. * pointers.
  170. *
  171. * Contexts are use-counted. They are allocated on first access and
  172. * deallocated when the last user puts the context.
  173. */
  174. static DEFINE_PER_CPU(struct ds_context *, system_context);
  175. #define this_system_context per_cpu(system_context, smp_processor_id())
  176. static inline struct ds_context *ds_get_context(struct task_struct *task)
  177. {
  178. struct ds_context **p_context =
  179. (task ? &task->thread.ds_ctx : &this_system_context);
  180. struct ds_context *context = *p_context;
  181. unsigned long irq;
  182. if (!context) {
  183. context = kzalloc(sizeof(*context), GFP_KERNEL);
  184. if (!context)
  185. return NULL;
  186. spin_lock_irqsave(&ds_lock, irq);
  187. if (*p_context) {
  188. kfree(context);
  189. context = *p_context;
  190. } else {
  191. *p_context = context;
  192. context->this = p_context;
  193. context->task = task;
  194. if (task)
  195. set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
  196. if (!task || (task == current))
  197. wrmsrl(MSR_IA32_DS_AREA,
  198. (unsigned long)context->ds);
  199. }
  200. spin_unlock_irqrestore(&ds_lock, irq);
  201. }
  202. context->count++;
  203. return context;
  204. }
  205. static inline void ds_put_context(struct ds_context *context)
  206. {
  207. unsigned long irq;
  208. if (!context)
  209. return;
  210. spin_lock_irqsave(&ds_lock, irq);
  211. if (--context->count)
  212. goto out;
  213. *(context->this) = NULL;
  214. if (context->task)
  215. clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
  216. if (!context->task || (context->task == current))
  217. wrmsrl(MSR_IA32_DS_AREA, 0);
  218. kfree(context);
  219. out:
  220. spin_unlock_irqrestore(&ds_lock, irq);
  221. }
  222. /*
  223. * Handle a buffer overflow
  224. *
  225. * context: the ds context
  226. * qual: the buffer type
  227. */
  228. static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
  229. {
  230. switch (qual) {
  231. case ds_bts: {
  232. struct bts_tracer *tracer =
  233. container_of(context->owner[qual],
  234. struct bts_tracer, ds);
  235. if (tracer->ovfl)
  236. tracer->ovfl(tracer);
  237. }
  238. break;
  239. case ds_pebs: {
  240. struct pebs_tracer *tracer =
  241. container_of(context->owner[qual],
  242. struct pebs_tracer, ds);
  243. if (tracer->ovfl)
  244. tracer->ovfl(tracer);
  245. }
  246. break;
  247. }
  248. }
  249. /*
  250. * Allocate a non-pageable buffer of the parameter size.
  251. * Checks the memory and the locked memory rlimit.
  252. *
  253. * Returns the buffer, if successful;
  254. * NULL, if out of memory or rlimit exceeded.
  255. *
  256. * size: the requested buffer size in bytes
  257. * pages (out): if not NULL, contains the number of pages reserved
  258. */
  259. static inline void *ds_allocate_buffer(size_t size, unsigned int *pages)
  260. {
  261. unsigned long rlim, vm, pgsz;
  262. void *buffer = NULL;
  263. pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
  264. down_write(&current->mm->mmap_sem);
  265. rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
  266. vm = current->mm->total_vm + pgsz;
  267. if (rlim < vm)
  268. goto out;
  269. rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
  270. vm = current->mm->locked_vm + pgsz;
  271. if (rlim < vm)
  272. goto out;
  273. buffer = kzalloc(size, GFP_KERNEL);
  274. if (!buffer)
  275. goto out;
  276. current->mm->total_vm += pgsz;
  277. current->mm->locked_vm += pgsz;
  278. if (pages)
  279. *pages = pgsz;
  280. out:
  281. up_write(&current->mm->mmap_sem);
  282. return buffer;
  283. }
  284. static void ds_install_ds_config(struct ds_context *context,
  285. enum ds_qualifier qual,
  286. void *base, size_t size, size_t ith)
  287. {
  288. unsigned long buffer, adj;
  289. /* adjust the buffer address and size to meet alignment
  290. * constraints:
  291. * - buffer is double-word aligned
  292. * - size is multiple of record size
  293. *
  294. * We checked the size at the very beginning; we have enough
  295. * space to do the adjustment.
  296. */
  297. buffer = (unsigned long)base;
  298. adj = ALIGN(buffer, DS_ALIGNMENT) - buffer;
  299. buffer += adj;
  300. size -= adj;
  301. size /= ds_cfg.sizeof_rec[qual];
  302. size *= ds_cfg.sizeof_rec[qual];
  303. ds_set(context->ds, qual, ds_buffer_base, buffer);
  304. ds_set(context->ds, qual, ds_index, buffer);
  305. ds_set(context->ds, qual, ds_absolute_maximum, buffer + size);
  306. /* The value for 'no threshold' is -1, which will set the
  307. * threshold outside of the buffer, just like we want it.
  308. */
  309. ds_set(context->ds, qual,
  310. ds_interrupt_threshold, buffer + size - ith);
  311. }
  312. static int ds_request(struct ds_tracer *tracer, enum ds_qualifier qual,
  313. struct task_struct *task,
  314. void *base, size_t size, size_t th)
  315. {
  316. struct ds_context *context;
  317. unsigned long irq;
  318. int error;
  319. error = -EOPNOTSUPP;
  320. if (!ds_cfg.sizeof_ds)
  321. goto out;
  322. /* we require some space to do alignment adjustments below */
  323. error = -EINVAL;
  324. if (size < (DS_ALIGNMENT + ds_cfg.sizeof_rec[qual]))
  325. goto out;
  326. if (th != (size_t)-1) {
  327. th *= ds_cfg.sizeof_rec[qual];
  328. error = -EINVAL;
  329. if (size <= th)
  330. goto out;
  331. }
  332. error = -ENOMEM;
  333. if (!base) {
  334. base = ds_allocate_buffer(size, &tracer->pages);
  335. if (!base)
  336. goto out;
  337. }
  338. tracer->buffer = base;
  339. tracer->size = size;
  340. error = -ENOMEM;
  341. context = ds_get_context(task);
  342. if (!context)
  343. goto out;
  344. tracer->context = context;
  345. spin_lock_irqsave(&ds_lock, irq);
  346. error = -EPERM;
  347. if (!check_tracer(task))
  348. goto out_unlock;
  349. get_tracer(task);
  350. error = -EPERM;
  351. if (context->owner[qual])
  352. goto out_put_tracer;
  353. context->owner[qual] = tracer;
  354. spin_unlock_irqrestore(&ds_lock, irq);
  355. ds_install_ds_config(context, qual, base, size, th);
  356. return 0;
  357. out_put_tracer:
  358. put_tracer(task);
  359. out_unlock:
  360. spin_unlock_irqrestore(&ds_lock, irq);
  361. ds_put_context(context);
  362. tracer->context = NULL;
  363. out:
  364. return error;
  365. }
  366. struct bts_tracer *ds_request_bts(struct task_struct *task,
  367. void *base, size_t size,
  368. bts_ovfl_callback_t ovfl, size_t th)
  369. {
  370. struct bts_tracer *tracer;
  371. int error;
  372. /* buffer overflow notification is not yet implemented */
  373. error = -EOPNOTSUPP;
  374. if (ovfl)
  375. goto out;
  376. error = -ENOMEM;
  377. tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
  378. if (!tracer)
  379. goto out;
  380. tracer->ovfl = ovfl;
  381. error = ds_request(&tracer->ds, ds_bts, task, base, size, th);
  382. if (error < 0)
  383. goto out_tracer;
  384. return tracer;
  385. out_tracer:
  386. (void)ds_release_bts(tracer);
  387. out:
  388. return ERR_PTR(error);
  389. }
  390. struct pebs_tracer *ds_request_pebs(struct task_struct *task,
  391. void *base, size_t size,
  392. pebs_ovfl_callback_t ovfl, size_t th)
  393. {
  394. struct pebs_tracer *tracer;
  395. int error;
  396. /* buffer overflow notification is not yet implemented */
  397. error = -EOPNOTSUPP;
  398. if (ovfl)
  399. goto out;
  400. error = -ENOMEM;
  401. tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
  402. if (!tracer)
  403. goto out;
  404. tracer->ovfl = ovfl;
  405. error = ds_request(&tracer->ds, ds_pebs, task, base, size, th);
  406. if (error < 0)
  407. goto out_tracer;
  408. return tracer;
  409. out_tracer:
  410. (void)ds_release_pebs(tracer);
  411. out:
  412. return ERR_PTR(error);
  413. }
  414. static void ds_release(struct ds_tracer *tracer, enum ds_qualifier qual)
  415. {
  416. if (tracer->context) {
  417. BUG_ON(tracer->context->owner[qual] != tracer);
  418. tracer->context->owner[qual] = NULL;
  419. put_tracer(tracer->context->task);
  420. ds_put_context(tracer->context);
  421. }
  422. if (tracer->pages) {
  423. kfree(tracer->buffer);
  424. down_write(&current->mm->mmap_sem);
  425. current->mm->total_vm -= tracer->pages;
  426. current->mm->locked_vm -= tracer->pages;
  427. up_write(&current->mm->mmap_sem);
  428. }
  429. }
  430. int ds_release_bts(struct bts_tracer *tracer)
  431. {
  432. if (!tracer)
  433. return -EINVAL;
  434. ds_release(&tracer->ds, ds_bts);
  435. kfree(tracer);
  436. return 0;
  437. }
  438. int ds_release_pebs(struct pebs_tracer *tracer)
  439. {
  440. if (!tracer)
  441. return -EINVAL;
  442. ds_release(&tracer->ds, ds_pebs);
  443. kfree(tracer);
  444. return 0;
  445. }
  446. static size_t ds_get_index(struct ds_context *context, enum ds_qualifier qual)
  447. {
  448. unsigned long base, index;
  449. base = ds_get(context->ds, qual, ds_buffer_base);
  450. index = ds_get(context->ds, qual, ds_index);
  451. return (index - base) / ds_cfg.sizeof_rec[qual];
  452. }
  453. int ds_get_bts_index(struct bts_tracer *tracer, size_t *pos)
  454. {
  455. if (!tracer)
  456. return -EINVAL;
  457. if (!pos)
  458. return -EINVAL;
  459. *pos = ds_get_index(tracer->ds.context, ds_bts);
  460. return 0;
  461. }
  462. int ds_get_pebs_index(struct pebs_tracer *tracer, size_t *pos)
  463. {
  464. if (!tracer)
  465. return -EINVAL;
  466. if (!pos)
  467. return -EINVAL;
  468. *pos = ds_get_index(tracer->ds.context, ds_pebs);
  469. return 0;
  470. }
  471. static size_t ds_get_end(struct ds_context *context, enum ds_qualifier qual)
  472. {
  473. unsigned long base, max;
  474. base = ds_get(context->ds, qual, ds_buffer_base);
  475. max = ds_get(context->ds, qual, ds_absolute_maximum);
  476. return (max - base) / ds_cfg.sizeof_rec[qual];
  477. }
  478. int ds_get_bts_end(struct bts_tracer *tracer, size_t *pos)
  479. {
  480. if (!tracer)
  481. return -EINVAL;
  482. if (!pos)
  483. return -EINVAL;
  484. *pos = ds_get_end(tracer->ds.context, ds_bts);
  485. return 0;
  486. }
  487. int ds_get_pebs_end(struct pebs_tracer *tracer, size_t *pos)
  488. {
  489. if (!tracer)
  490. return -EINVAL;
  491. if (!pos)
  492. return -EINVAL;
  493. *pos = ds_get_end(tracer->ds.context, ds_pebs);
  494. return 0;
  495. }
  496. static int ds_access(struct ds_context *context, enum ds_qualifier qual,
  497. size_t index, const void **record)
  498. {
  499. unsigned long base, idx;
  500. if (!record)
  501. return -EINVAL;
  502. base = ds_get(context->ds, qual, ds_buffer_base);
  503. idx = base + (index * ds_cfg.sizeof_rec[qual]);
  504. if (idx > ds_get(context->ds, qual, ds_absolute_maximum))
  505. return -EINVAL;
  506. *record = (const void *)idx;
  507. return ds_cfg.sizeof_rec[qual];
  508. }
  509. int ds_access_bts(struct bts_tracer *tracer, size_t index,
  510. const void **record)
  511. {
  512. if (!tracer)
  513. return -EINVAL;
  514. return ds_access(tracer->ds.context, ds_bts, index, record);
  515. }
  516. int ds_access_pebs(struct pebs_tracer *tracer, size_t index,
  517. const void **record)
  518. {
  519. if (!tracer)
  520. return -EINVAL;
  521. return ds_access(tracer->ds.context, ds_pebs, index, record);
  522. }
  523. static int ds_write(struct ds_context *context, enum ds_qualifier qual,
  524. const void *record, size_t size)
  525. {
  526. int bytes_written = 0;
  527. if (!record)
  528. return -EINVAL;
  529. while (size) {
  530. unsigned long base, index, end, write_end, int_th;
  531. unsigned long write_size, adj_write_size;
  532. /*
  533. * write as much as possible without producing an
  534. * overflow interrupt.
  535. *
  536. * interrupt_threshold must either be
  537. * - bigger than absolute_maximum or
  538. * - point to a record between buffer_base and absolute_maximum
  539. *
  540. * index points to a valid record.
  541. */
  542. base = ds_get(context->ds, qual, ds_buffer_base);
  543. index = ds_get(context->ds, qual, ds_index);
  544. end = ds_get(context->ds, qual, ds_absolute_maximum);
  545. int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
  546. write_end = min(end, int_th);
  547. /* if we are already beyond the interrupt threshold,
  548. * we fill the entire buffer */
  549. if (write_end <= index)
  550. write_end = end;
  551. if (write_end <= index)
  552. break;
  553. write_size = min((unsigned long) size, write_end - index);
  554. memcpy((void *)index, record, write_size);
  555. record = (const char *)record + write_size;
  556. size -= write_size;
  557. bytes_written += write_size;
  558. adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
  559. adj_write_size *= ds_cfg.sizeof_rec[qual];
  560. /* zero out trailing bytes */
  561. memset((char *)index + write_size, 0,
  562. adj_write_size - write_size);
  563. index += adj_write_size;
  564. if (index >= end)
  565. index = base;
  566. ds_set(context->ds, qual, ds_index, index);
  567. if (index >= int_th)
  568. ds_overflow(context, qual);
  569. }
  570. return bytes_written;
  571. }
  572. int ds_write_bts(struct bts_tracer *tracer, const void *record, size_t size)
  573. {
  574. if (!tracer)
  575. return -EINVAL;
  576. return ds_write(tracer->ds.context, ds_bts, record, size);
  577. }
  578. int ds_write_pebs(struct pebs_tracer *tracer, const void *record, size_t size)
  579. {
  580. if (!tracer)
  581. return -EINVAL;
  582. return ds_write(tracer->ds.context, ds_pebs, record, size);
  583. }
  584. static void ds_reset_or_clear(struct ds_context *context,
  585. enum ds_qualifier qual, int clear)
  586. {
  587. unsigned long base, end;
  588. base = ds_get(context->ds, qual, ds_buffer_base);
  589. end = ds_get(context->ds, qual, ds_absolute_maximum);
  590. if (clear)
  591. memset((void *)base, 0, end - base);
  592. ds_set(context->ds, qual, ds_index, base);
  593. }
  594. int ds_reset_bts(struct bts_tracer *tracer)
  595. {
  596. if (!tracer)
  597. return -EINVAL;
  598. ds_reset_or_clear(tracer->ds.context, ds_bts, /* clear = */ 0);
  599. return 0;
  600. }
  601. int ds_reset_pebs(struct pebs_tracer *tracer)
  602. {
  603. if (!tracer)
  604. return -EINVAL;
  605. ds_reset_or_clear(tracer->ds.context, ds_pebs, /* clear = */ 0);
  606. return 0;
  607. }
  608. int ds_clear_bts(struct bts_tracer *tracer)
  609. {
  610. if (!tracer)
  611. return -EINVAL;
  612. ds_reset_or_clear(tracer->ds.context, ds_bts, /* clear = */ 1);
  613. return 0;
  614. }
  615. int ds_clear_pebs(struct pebs_tracer *tracer)
  616. {
  617. if (!tracer)
  618. return -EINVAL;
  619. ds_reset_or_clear(tracer->ds.context, ds_pebs, /* clear = */ 1);
  620. return 0;
  621. }
  622. int ds_get_pebs_reset(struct pebs_tracer *tracer, u64 *value)
  623. {
  624. if (!tracer)
  625. return -EINVAL;
  626. if (!value)
  627. return -EINVAL;
  628. *value = *(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8));
  629. return 0;
  630. }
  631. int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
  632. {
  633. if (!tracer)
  634. return -EINVAL;
  635. *(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8)) = value;
  636. return 0;
  637. }
  638. static const struct ds_configuration ds_cfg_var = {
  639. .sizeof_ds = sizeof(long) * 12,
  640. .sizeof_field = sizeof(long),
  641. .sizeof_rec[ds_bts] = sizeof(long) * 3,
  642. #ifdef __i386__
  643. .sizeof_rec[ds_pebs] = sizeof(long) * 10
  644. #else
  645. .sizeof_rec[ds_pebs] = sizeof(long) * 18
  646. #endif
  647. };
  648. static const struct ds_configuration ds_cfg_64 = {
  649. .sizeof_ds = 8 * 12,
  650. .sizeof_field = 8,
  651. .sizeof_rec[ds_bts] = 8 * 3,
  652. #ifdef __i386__
  653. .sizeof_rec[ds_pebs] = 8 * 10
  654. #else
  655. .sizeof_rec[ds_pebs] = 8 * 18
  656. #endif
  657. };
  658. static inline void
  659. ds_configure(const struct ds_configuration *cfg)
  660. {
  661. ds_cfg = *cfg;
  662. printk(KERN_INFO "DS available\n");
  663. BUG_ON(MAX_SIZEOF_DS < ds_cfg.sizeof_ds);
  664. }
  665. void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
  666. {
  667. switch (c->x86) {
  668. case 0x6:
  669. switch (c->x86_model) {
  670. case 0 ... 0xC:
  671. /* sorry, don't know about them */
  672. break;
  673. case 0xD:
  674. case 0xE: /* Pentium M */
  675. ds_configure(&ds_cfg_var);
  676. break;
  677. default: /* Core2, Atom, ... */
  678. ds_configure(&ds_cfg_64);
  679. break;
  680. }
  681. break;
  682. case 0xF:
  683. switch (c->x86_model) {
  684. case 0x0:
  685. case 0x1:
  686. case 0x2: /* Netburst */
  687. ds_configure(&ds_cfg_var);
  688. break;
  689. default:
  690. /* sorry, don't know about them */
  691. break;
  692. }
  693. break;
  694. default:
  695. /* sorry, don't know about them */
  696. break;
  697. }
  698. }
  699. void ds_free(struct ds_context *context)
  700. {
  701. /* This is called when the task owning the parameter context
  702. * is dying. There should not be any user of that context left
  703. * to disturb us, anymore. */
  704. unsigned long leftovers = context->count;
  705. while (leftovers--) {
  706. put_tracer(context->task);
  707. ds_put_context(context);
  708. }
  709. }