|
@@ -10,6 +10,192 @@ static cpumask_t uncore_cpu_mask;
|
|
|
static struct event_constraint constraint_fixed =
|
|
|
EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
|
|
|
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
|
|
|
+
|
|
|
+/* Sandy Bridge uncore support */
|
|
|
+static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
|
|
|
+ struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+
|
|
|
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
|
|
|
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
|
|
|
+ else
|
|
|
+ wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
|
|
|
+}
|
|
|
+
|
|
|
+static void snb_uncore_msr_disable_event(struct intel_uncore_box *box,
|
|
|
+ struct perf_event *event)
|
|
|
+{
|
|
|
+ wrmsrl(event->hw.config_base, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box,
|
|
|
+ struct perf_event *event)
|
|
|
+{
|
|
|
+ u64 count;
|
|
|
+ rdmsrl(event->hw.event_base, count);
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
+static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
|
|
|
+{
|
|
|
+ if (box->pmu->pmu_idx == 0) {
|
|
|
+ wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
|
|
|
+ SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static struct attribute *snb_uncore_formats_attr[] = {
|
|
|
+ &format_attr_event.attr,
|
|
|
+ &format_attr_umask.attr,
|
|
|
+ &format_attr_edge.attr,
|
|
|
+ &format_attr_inv.attr,
|
|
|
+ &format_attr_cmask5.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute_group snb_uncore_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = snb_uncore_formats_attr,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_ops snb_uncore_msr_ops = {
|
|
|
+ .init_box = snb_uncore_msr_init_box,
|
|
|
+ .disable_event = snb_uncore_msr_disable_event,
|
|
|
+ .enable_event = snb_uncore_msr_enable_event,
|
|
|
+ .read_counter = snb_uncore_msr_read_counter,
|
|
|
+};
|
|
|
+
|
|
|
+static struct event_constraint snb_uncore_cbox_constraints[] = {
|
|
|
+ UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
|
|
|
+ UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
|
|
|
+ EVENT_CONSTRAINT_END
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type snb_uncore_cbox = {
|
|
|
+ .name = "cbox",
|
|
|
+ .num_counters = 2,
|
|
|
+ .num_boxes = 4,
|
|
|
+ .perf_ctr_bits = 44,
|
|
|
+ .fixed_ctr_bits = 48,
|
|
|
+ .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
|
|
|
+ .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
|
|
|
+ .fixed_ctr = SNB_UNC_FIXED_CTR,
|
|
|
+ .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
|
|
|
+ .single_fixed = 1,
|
|
|
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
|
|
|
+ .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
|
|
|
+ .constraints = snb_uncore_cbox_constraints,
|
|
|
+ .ops = &snb_uncore_msr_ops,
|
|
|
+ .format_group = &snb_uncore_format_group,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type *snb_msr_uncores[] = {
|
|
|
+ &snb_uncore_cbox,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+/* end of Sandy Bridge uncore support */
|
|
|
+
|
|
|
+/* Nehalem uncore support */
|
|
|
+static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
|
|
|
+{
|
|
|
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
|
|
|
+{
|
|
|
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL,
|
|
|
+ NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
|
|
|
+}
|
|
|
+
|
|
|
+static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box,
|
|
|
+ struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+
|
|
|
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
|
|
|
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
|
|
|
+ else
|
|
|
+ wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
|
|
|
+}
|
|
|
+
|
|
|
+static struct attribute *nhm_uncore_formats_attr[] = {
|
|
|
+ &format_attr_event.attr,
|
|
|
+ &format_attr_umask.attr,
|
|
|
+ &format_attr_edge.attr,
|
|
|
+ &format_attr_inv.attr,
|
|
|
+ &format_attr_cmask8.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute_group nhm_uncore_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = nhm_uncore_formats_attr,
|
|
|
+};
|
|
|
+
|
|
|
+static struct uncore_event_desc nhm_uncore_events[] = {
|
|
|
+ INTEL_UNCORE_EVENT_DESC(CLOCKTICKS, "config=0xffff"),
|
|
|
+ /* full cache line writes to DRAM */
|
|
|
+ INTEL_UNCORE_EVENT_DESC(QMC_WRITES_FULL_ANY, "event=0x2f,umask=0xf"),
|
|
|
+ /* Quickpath Memory Controller normal priority read requests */
|
|
|
+ INTEL_UNCORE_EVENT_DESC(QMC_NORMAL_READS_ANY, "event=0x2c,umask=0xf"),
|
|
|
+ /* Quickpath Home Logic read requests from the IOH */
|
|
|
+ INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_IOH_READS,
|
|
|
+ "event=0x20,umask=0x1"),
|
|
|
+ /* Quickpath Home Logic write requests from the IOH */
|
|
|
+ INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_IOH_WRITES,
|
|
|
+ "event=0x20,umask=0x2"),
|
|
|
+ /* Quickpath Home Logic read requests from a remote socket */
|
|
|
+ INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_REMOTE_READS,
|
|
|
+ "event=0x20,umask=0x4"),
|
|
|
+ /* Quickpath Home Logic write requests from a remote socket */
|
|
|
+ INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_REMOTE_WRITES,
|
|
|
+ "event=0x20,umask=0x8"),
|
|
|
+ /* Quickpath Home Logic read requests from the local socket */
|
|
|
+ INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_LOCAL_READS,
|
|
|
+ "event=0x20,umask=0x10"),
|
|
|
+ /* Quickpath Home Logic write requests from the local socket */
|
|
|
+ INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_LOCAL_WRITES,
|
|
|
+ "event=0x20,umask=0x20"),
|
|
|
+ { /* end: all zeroes */ },
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_ops nhm_uncore_msr_ops = {
|
|
|
+ .disable_box = nhm_uncore_msr_disable_box,
|
|
|
+ .enable_box = nhm_uncore_msr_enable_box,
|
|
|
+ .disable_event = snb_uncore_msr_disable_event,
|
|
|
+ .enable_event = nhm_uncore_msr_enable_event,
|
|
|
+ .read_counter = snb_uncore_msr_read_counter,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type nhm_uncore = {
|
|
|
+ .name = "",
|
|
|
+ .num_counters = 8,
|
|
|
+ .num_boxes = 1,
|
|
|
+ .perf_ctr_bits = 48,
|
|
|
+ .fixed_ctr_bits = 48,
|
|
|
+ .event_ctl = NHM_UNC_PERFEVTSEL0,
|
|
|
+ .perf_ctr = NHM_UNC_UNCORE_PMC0,
|
|
|
+ .fixed_ctr = NHM_UNC_FIXED_CTR,
|
|
|
+ .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
|
|
|
+ .event_mask = NHM_UNC_RAW_EVENT_MASK,
|
|
|
+ .event_descs = nhm_uncore_events,
|
|
|
+ .ops = &nhm_uncore_msr_ops,
|
|
|
+ .format_group = &nhm_uncore_format_group,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type *nhm_msr_uncores[] = {
|
|
|
+ &nhm_uncore,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+/* end of Nehalem uncore support */
|
|
|
+
|
|
|
static void uncore_assign_hw_event(struct intel_uncore_box *box,
|
|
|
struct perf_event *event, int idx)
|
|
|
{
|
|
@@ -808,6 +994,15 @@ static int __init uncore_cpu_init(void)
|
|
|
int ret, cpu;
|
|
|
|
|
|
switch (boot_cpu_data.x86_model) {
|
|
|
+ case 26: /* Nehalem */
|
|
|
+ case 30:
|
|
|
+ case 37: /* Westmere */
|
|
|
+ case 44:
|
|
|
+ msr_uncores = nhm_msr_uncores;
|
|
|
+ break;
|
|
|
+ case 42: /* Sandy Bridge */
|
|
|
+ msr_uncores = snb_msr_uncores;
|
|
|
+ break;
|
|
|
default:
|
|
|
return 0;
|
|
|
}
|