diff options
Diffstat (limited to 'arch/x86/events/intel')
| -rw-r--r-- | arch/x86/events/intel/core.c | 16 | ||||
| -rw-r--r-- | arch/x86/events/intel/cstate.c | 2 | ||||
| -rw-r--r-- | arch/x86/events/intel/ds.c | 56 | ||||
| -rw-r--r-- | arch/x86/events/intel/uncore_snbep.c | 12 | 
4 files changed, 60 insertions, 26 deletions
| diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a3fb996a86a1..070cc4ef2672 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5470,6 +5470,15 @@ pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)  }  static umode_t +mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ +	if (attr == &event_attr_mem_ld_aux.attr.attr) +		return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0; + +	return pebs_is_visible(kobj, attr, i); +} + +static umode_t  lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)  {  	return x86_pmu.lbr_nr ? attr->mode : 0; @@ -5496,7 +5505,7 @@ static struct attribute_group group_events_td  = {  static struct attribute_group group_events_mem = {  	.name       = "events", -	.is_visible = pebs_is_visible, +	.is_visible = mem_is_visible,  };  static struct attribute_group group_events_tsx = { @@ -6486,6 +6495,10 @@ __init int intel_pmu_init(void)  	case INTEL_FAM6_SAPPHIRERAPIDS_X:  	case INTEL_FAM6_EMERALDRAPIDS_X: +		x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; +		fallthrough; +	case INTEL_FAM6_GRANITERAPIDS_X: +	case INTEL_FAM6_GRANITERAPIDS_D:  		pmem = true;  		x86_pmu.late_ack = true;  		memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -6502,7 +6515,6 @@ __init int intel_pmu_init(void)  		x86_pmu.flags |= PMU_FL_HAS_RSP_1;  		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;  		x86_pmu.flags |= PMU_FL_INSTR_LATENCY; -		x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;  		x86_pmu.hw_config = hsw_hw_config;  		x86_pmu.get_event_constraints = spr_get_event_constraints; diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 551741e79e03..835862c548cc 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -678,6 +678,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {  	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,		&icx_cstates),  	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,	&icx_cstates),  	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,	&icx_cstates), +	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X,	&icx_cstates), +	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D,	&icx_cstates),  	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,		&icl_cstates),  	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,		&icl_cstates), diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index a2e566e53076..df88576d6b2a 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1229,12 +1229,14 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,  		  struct perf_event *event, bool add)  {  	struct pmu *pmu = event->pmu; +  	/*  	 * Make sure we get updated with the first PEBS  	 * event. It will trigger also during removal, but  	 * that does not hurt:  	 */ -	bool update = cpuc->n_pebs == 1; +	if (cpuc->n_pebs == 1) +		cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;  	if (needed_cb != pebs_needs_sched_cb(cpuc)) {  		if (!needed_cb) @@ -1242,7 +1244,7 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,  		else  			perf_sched_cb_dec(pmu); -		update = true; +		cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;  	}  	/* @@ -1252,24 +1254,13 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,  	if (x86_pmu.intel_cap.pebs_baseline && add) {  		u64 pebs_data_cfg; -		/* Clear pebs_data_cfg and pebs_record_size for first PEBS. */ -		if (cpuc->n_pebs == 1) { -			cpuc->pebs_data_cfg = 0; -			cpuc->pebs_record_size = sizeof(struct pebs_basic); -		} -  		pebs_data_cfg = pebs_update_adaptive_cfg(event); - -		/* Update pebs_record_size if new event requires more data. */ -		if (pebs_data_cfg & ~cpuc->pebs_data_cfg) { -			cpuc->pebs_data_cfg |= pebs_data_cfg; -			adaptive_pebs_record_size_update(); -			update = true; -		} +		/* +		 * Be sure to update the thresholds when we change the record. +		 */ +		if (pebs_data_cfg & ~cpuc->pebs_data_cfg) +			cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;  	} - -	if (update) -		pebs_update_threshold(cpuc);  }  void intel_pmu_pebs_add(struct perf_event *event) @@ -1326,9 +1317,17 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)  	wrmsrl(base + idx, value);  } +static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc) +{ +	if (cpuc->n_pebs == cpuc->n_large_pebs && +	    cpuc->n_pebs != cpuc->n_pebs_via_pt) +		intel_pmu_drain_pebs_buffer(); +} +  void intel_pmu_pebs_enable(struct perf_event *event)  {  	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); +	u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;  	struct hw_perf_event *hwc = &event->hw;  	struct debug_store *ds = cpuc->ds;  	unsigned int idx = hwc->idx; @@ -1344,11 +1343,22 @@ void intel_pmu_pebs_enable(struct perf_event *event)  	if (x86_pmu.intel_cap.pebs_baseline) {  		hwc->config |= ICL_EVENTSEL_ADAPTIVE; -		if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) { -			wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg); -			cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg; +		if (pebs_data_cfg != cpuc->active_pebs_data_cfg) { +			/* +			 * drain_pebs() assumes uniform record size; +			 * hence we need to drain when changing said +			 * size. +			 */ +			intel_pmu_drain_large_pebs(cpuc); +			adaptive_pebs_record_size_update(); +			wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg); +			cpuc->active_pebs_data_cfg = pebs_data_cfg;  		}  	} +	if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) { +		cpuc->pebs_data_cfg = pebs_data_cfg; +		pebs_update_threshold(cpuc); +	}  	if (idx >= INTEL_PMC_IDX_FIXED) {  		if (x86_pmu.intel_cap.pebs_format < 5) @@ -1391,9 +1401,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)  	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);  	struct hw_perf_event *hwc = &event->hw; -	if (cpuc->n_pebs == cpuc->n_large_pebs && -	    cpuc->n_pebs != cpuc->n_pebs_via_pt) -		intel_pmu_drain_pebs_buffer(); +	intel_pmu_drain_large_pebs(cpuc);  	cpuc->pebs_enabled &= ~(1ULL << hwc->idx); diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 7d1199554fe3..fa9b209a11fa 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6068,6 +6068,17 @@ static struct intel_uncore_ops spr_uncore_mmio_ops = {  	.read_counter		= uncore_mmio_read_counter,  }; +static struct uncore_event_desc spr_uncore_imc_events[] = { +	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x01,umask=0x00"), +	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x05,umask=0xcf"), +	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), +	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), +	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0"), +	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), +	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), +	{ /* end: all zeroes */ }, +}; +  static struct intel_uncore_type spr_uncore_imc = {  	SPR_UNCORE_COMMON_FORMAT(),  	.name			= "imc", @@ -6075,6 +6086,7 @@ static struct intel_uncore_type spr_uncore_imc = {  	.fixed_ctr		= SNR_IMC_MMIO_PMON_FIXED_CTR,  	.fixed_ctl		= SNR_IMC_MMIO_PMON_FIXED_CTL,  	.ops			= &spr_uncore_mmio_ops, +	.event_descs		= spr_uncore_imc_events,  };  static void spr_uncore_pci_enable_event(struct intel_uncore_box *box, | 
