PAPI  5.7.0.0
perf_event_uncore.c File Reference
Include dependency graph for perf_event_uncore.c:

Go to the source code of this file.

Macros

#define PERF_EVENTS_OPENED   0x01
 
#define PERF_EVENTS_RUNNING   0x02
 
#define READ_BUFFER_SIZE   (3 + (2 * PERF_EVENT_MAX_MPX_COUNTERS))
 

Functions

static int _peu_set_domain (hwd_control_state_t *ctl, int domain)
 
static int _peu_shutdown_component (void)
 
static unsigned int get_read_format (unsigned int multiplex, unsigned int inherit, int format_group)
 
static long sys_perf_event_open (struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
 
static int map_perf_event_errors_to_papi (int perf_event_error)
 
static int check_scheduability (pe_context_t *ctx, pe_control_t *ctl)
 
static int open_pe_events (pe_context_t *ctx, pe_control_t *ctl)
 
static int close_pe_events (pe_context_t *ctx, pe_control_t *ctl)
 
static int _peu_init_thread (hwd_context_t *hwd_ctx)
 
static int _peu_init_control_state (hwd_control_state_t *ctl)
 
static int _peu_init_component (int cidx)
 
int _peu_update_control_state (hwd_control_state_t *ctl, NativeInfo_t *native, int count, hwd_context_t *ctx)
 
static int _peu_shutdown_thread (hwd_context_t *ctx)
 
static int _peu_reset (hwd_context_t *ctx, hwd_control_state_t *ctl)
 
static int _peu_write (hwd_context_t *ctx, hwd_control_state_t *ctl, long long *from)
 
static int _peu_read (hwd_context_t *ctx, hwd_control_state_t *ctl, long long **events, int flags)
 
static int _peu_start (hwd_context_t *ctx, hwd_control_state_t *ctl)
 
static int _peu_stop (hwd_context_t *ctx, hwd_control_state_t *ctl)
 
static int _peu_ctl (hwd_context_t *ctx, int code, _papi_int_option_t *option)
 
static int _peu_ntv_enum_events (unsigned int *PapiEventCode, int modifier)
 
static int _peu_ntv_name_to_code (const char *name, unsigned int *event_code)
 
static int _peu_ntv_code_to_name (unsigned int EventCode, char *ntv_name, int len)
 
static int _peu_ntv_code_to_descr (unsigned int EventCode, char *ntv_descr, int len)
 
static int _peu_ntv_code_to_info (unsigned int EventCode, PAPI_event_info_t *info)
 

Variables

papi_vector_t _perf_event_uncore_vector
 
struct native_event_table_t uncore_native_event_table
 
static int our_cidx
 

Macro Definition Documentation

◆ PERF_EVENTS_OPENED

#define PERF_EVENTS_OPENED   0x01

Definition at line 60 of file perf_event_uncore.c.

◆ PERF_EVENTS_RUNNING

#define PERF_EVENTS_RUNNING   0x02

Definition at line 61 of file perf_event_uncore.c.

◆ READ_BUFFER_SIZE

#define READ_BUFFER_SIZE   (3 + (2 * PERF_EVENT_MAX_MPX_COUNTERS))

Definition at line 217 of file perf_event_uncore.c.

Function Documentation

◆ _peu_ctl()

static int _peu_ctl ( hwd_context_t ctx,
int  code,
_papi_int_option_t option 
)
static

Definition at line 1127 of file perf_event_uncore.c.

1128 {
1129  int ret;
1130  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
1131  pe_control_t *pe_ctl = NULL;
1132 
1133  switch ( code ) {
1134  case PAPI_MULTIPLEX:
1135  pe_ctl = ( pe_control_t * ) ( option->multiplex.ESI->ctl_state );
1136 
1137  pe_ctl->multiplexed = 1;
1138  ret = _peu_update_control_state( pe_ctl, NULL,
1139  pe_ctl->num_events, pe_ctx );
1140  if (ret != PAPI_OK) {
1141  pe_ctl->multiplexed = 0;
1142  }
1143  return ret;
1144 
1145  case PAPI_ATTACH:
1146  pe_ctl = ( pe_control_t * ) ( option->attach.ESI->ctl_state );
1147 
1148  pe_ctl->tid = option->attach.tid;
1149 
1150  /* If events have been already been added, something may */
1151  /* have been done to the kernel, so update */
1152  ret =_peu_update_control_state( pe_ctl, NULL,
1153  pe_ctl->num_events, pe_ctx);
1154 
1155  return ret;
1156 
1157  case PAPI_DETACH:
1158  pe_ctl = ( pe_control_t *) ( option->attach.ESI->ctl_state );
1159 
1160  pe_ctl->tid = 0;
1161  return PAPI_OK;
1162 
1163  case PAPI_CPU_ATTACH:
1164  pe_ctl = ( pe_control_t *) ( option->cpu.ESI->ctl_state );
1165 
1166  /* this tells the kernel not to count for a thread */
1167  /* should we warn if we try to set both? perf_event */
1168  /* will reject it. */
1169  pe_ctl->tid = -1;
1170 
1171  pe_ctl->cpu = option->cpu.cpu_num;
1172 
1173  return PAPI_OK;
1174 
1175  case PAPI_DOMAIN:
1176  pe_ctl = ( pe_control_t *) ( option->domain.ESI->ctl_state );
1177 
1178  /* looks like we are allowed, so set event set level counting domains */
1179  pe_ctl->domain = option->domain.domain;
1180  return PAPI_OK;
1181 
1182  case PAPI_GRANUL:
1183  pe_ctl = (pe_control_t *) ( option->granularity.ESI->ctl_state );
1184 
1185  /* FIXME: we really don't support this yet */
1186 
1187  switch ( option->granularity.granularity ) {
1188  case PAPI_GRN_PROCG:
1189  case PAPI_GRN_SYS_CPU:
1190  case PAPI_GRN_PROC:
1191  return PAPI_ECMP;
1192 
1193  /* Currently we only support thread and CPU granularity */
1194  case PAPI_GRN_SYS:
1195  pe_ctl->granularity=PAPI_GRN_SYS;
1196  break;
1197 
1198  case PAPI_GRN_THR:
1199  pe_ctl->granularity=PAPI_GRN_THR;
1200  break;
1201 
1202 
1203  default:
1204  return PAPI_EINVAL;
1205  }
1206  return PAPI_OK;
1207 
1208  case PAPI_INHERIT:
1209  pe_ctl = (pe_control_t *) ( option->inherit.ESI->ctl_state );
1210 
1211  if (option->inherit.inherit) {
1212  /* children will inherit counters */
1213  pe_ctl->inherit = 1;
1214  } else {
1215  /* children won't inherit counters */
1216  pe_ctl->inherit = 0;
1217  }
1218  return PAPI_OK;
1219 
1220  case PAPI_DATA_ADDRESS:
1221  return PAPI_ENOSUPP;
1222 
1223  case PAPI_INSTR_ADDRESS:
1224  return PAPI_ENOSUPP;
1225 
1226  case PAPI_DEF_ITIMER:
1227  return PAPI_ENOSUPP;
1228 
1229  case PAPI_DEF_MPX_NS:
1230  return PAPI_ENOSUPP;
1231 
1232  case PAPI_DEF_ITIMER_NS:
1233  return PAPI_ENOSUPP;
1234 
1235  default:
1236  return PAPI_ENOSUPP;
1237  }
1238 }
#define PAPI_OK
Definition: fpapi.h:105
_papi_int_inherit_t inherit
#define PAPI_GRANUL
Definition: fpapi.h:52
#define PAPI_CPU_ATTACH
Definition: papi.h:458
#define PAPI_EINVAL
Definition: fpapi.h:106
EventSetInfo_t * ESI
#define PAPI_GRN_THR
Definition: fpapi.h:67
unsigned int granularity
#define PAPI_DEF_ITIMER_NS
Definition: papi.h:456
EventSetInfo_t * ESI
#define PAPI_ENOSUPP
Definition: fpapi.h:123
#define PAPI_INSTR_ADDRESS
Definition: papi.h:454
#define PAPI_DATA_ADDRESS
Definition: papi.h:453
EventSetInfo_t * ESI
#define PAPI_MULTIPLEX
Definition: fpapi.h:48
#define PAPI_GRN_SYS
Definition: fpapi.h:71
#define PAPI_INHERIT
Definition: papi.h:459
unsigned int domain
#define PAPI_ECMP
Definition: fpapi.h:109
_papi_int_attach_t attach
unsigned long tid
#define PAPI_DEF_MPX_NS
Definition: fpapi.h:53
_papi_int_cpu_t cpu
_papi_int_granularity_t granularity
EventSetInfo_t * ESI
unsigned int multiplexed
long long ret
Definition: iozone.c:1346
EventSetInfo_t * ESI
_papi_int_multiplex_t multiplex
unsigned int cpu_num
#define PAPI_DEF_ITIMER
Definition: papi.h:455
#define PAPI_DETACH
Definition: fpapi.h:66
#define PAPI_ATTACH
Definition: fpapi.h:62
unsigned int inherit
#define PAPI_DOMAIN
Definition: fpapi.h:50
#define PAPI_GRN_SYS_CPU
Definition: fpapi.h:72
_papi_int_domain_t domain
EventSetInfo_t * ESI
hwd_control_state_t * ctl_state
int _peu_update_control_state(hwd_control_state_t *ctl, NativeInfo_t *native, int count, hwd_context_t *ctx)
#define PAPI_GRN_PROCG
Definition: fpapi.h:70
#define PAPI_GRN_PROC
Definition: fpapi.h:69
Here is the call graph for this function:

◆ _peu_init_component()

static int _peu_init_component ( int  cidx)
static

Definition at line 588 of file perf_event_uncore.c.

589 {
590 
591  int retval;
592  int paranoid_level;
593 
594  FILE *fff;
595 
596  our_cidx=cidx;
597 
598  /* The is the official way to detect if perf_event support exists */
599  /* The file is called perf_counter_paranoid on 2.6.31 */
600  /* currently we are lazy and do not support 2.6.31 kernels */
601 
602  fff=fopen("/proc/sys/kernel/perf_event_paranoid","r");
603  if (fff==NULL) {
604  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
605  "perf_event support not detected",PAPI_MAX_STR_LEN);
606  return PAPI_ENOCMP;
607  }
608  retval=fscanf(fff,"%d",&paranoid_level);
609  if (retval!=1) fprintf(stderr,"Error reading paranoid level\n");
610  fclose(fff);
611 
612 
613  /* Run the libpfm4-specific setup */
614 
616  if (retval) {
617  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
618  "Error initializing libpfm4",PAPI_MAX_STR_LEN);
620  return PAPI_ENOCMP;
621  }
622 
623 
624  /* Run the uncore specific libpfm4 setup */
625 
629  if (retval) {
630  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
631  "Error setting up libpfm4",PAPI_MAX_STR_LEN);
633  return PAPI_ENOCMP;
634  }
635 
636  /* Check if no uncore events found */
637 
638  if (_papi_hwd[cidx]->cmp_info.num_native_events==0) {
639  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
640  "No uncore PMUs or events found",PAPI_MAX_STR_LEN);
642  return PAPI_ENOCMP;
643  }
644 
645  /* Check if we have enough permissions for uncore */
646 
647  /* 2 means no kernel measurements allowed */
648  /* 1 means normal counter access */
649  /* 0 means you can access CPU-specific data */
650  /* -1 means no restrictions */
651 
652  if ((paranoid_level>0) && (getuid()!=0)) {
653  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
654  "Insufficient permissions for uncore access. Set /proc/sys/kernel/perf_event_paranoid to 0 or run as root.",
657  return PAPI_ENOCMP;
658  }
659 
660  return PAPI_OK;
661 
662 }
#define PAPI_OK
Definition: fpapi.h:105
static int our_cidx
int _papi_libpfm4_init(papi_vector_t *my_vector)
static int _peu_shutdown_component(void)
int retval
Definition: zero_fork.c:53
FILE * fff[MAX_EVENTS]
#define PAPI_ENOCMP
Definition: fpapi.h:122
static int cidx
struct native_event_table_t uncore_native_event_table
int _peu_libpfm4_init(papi_vector_t *my_vector, int cidx, struct native_event_table_t *event_table, int pmu_type)
#define PMU_TYPE_UNCORE
struct papi_vectors * _papi_hwd[]
#define PAPI_MAX_STR_LEN
Definition: fpapi.h:43
Here is the call graph for this function:

◆ _peu_init_control_state()

static int _peu_init_control_state ( hwd_control_state_t ctl)
static

Definition at line 563 of file perf_event_uncore.c.

564 {
565  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
566 
567  /* clear the contents */
568  memset( pe_ctl, 0, sizeof ( pe_control_t ) );
569 
570  /* Set the default domain */
572 
573  /* Set the default granularity */
575 
576  pe_ctl->cidx=our_cidx;
577 
578  /* Set cpu number in the control block to show events */
579  /* are not tied to specific cpu */
580  pe_ctl->cpu = -1;
581  return PAPI_OK;
582 }
#define PAPI_OK
Definition: fpapi.h:105
static int our_cidx
unsigned int granularity
int default_granularity
Definition: papi.h:646
papi_vector_t _perf_event_uncore_vector
PAPI_component_info_t cmp_info
Definition: papi_vector.h:20
static int _peu_set_domain(hwd_control_state_t *ctl, int domain)
Here is the call graph for this function:

◆ _peu_init_thread()

static int _peu_init_thread ( hwd_context_t hwd_ctx)
static

Definition at line 546 of file perf_event_uncore.c.

547 {
548 
549  pe_context_t *pe_ctx = ( pe_context_t *) hwd_ctx;
550 
551  /* clear the context structure and mark as initialized */
552  memset( pe_ctx, 0, sizeof ( pe_context_t ) );
553  pe_ctx->initialized=1;
554 
556  pe_ctx->cidx=our_cidx;
557 
558  return PAPI_OK;
559 }
#define PAPI_OK
Definition: fpapi.h:105
static int our_cidx
struct native_event_table_t uncore_native_event_table
struct native_event_table_t * event_table

◆ _peu_ntv_code_to_descr()

static int _peu_ntv_code_to_descr ( unsigned int  EventCode,
char *  ntv_descr,
int  len 
)
static

Definition at line 1273 of file perf_event_uncore.c.

1274  {
1275 
1277 
1278  return _pe_libpfm4_ntv_code_to_descr(EventCode,ntv_descr,len,
1280 }
papi_vector_t _perf_event_uncore_vector
PAPI_component_info_t cmp_info
Definition: papi_vector.h:20
struct native_event_table_t uncore_native_event_table
int _pe_libpfm4_ntv_code_to_descr(unsigned int EventCode, char *ntv_descr, int len, struct native_event_table_t *event_table)
#define PAPI_ENOEVNT
Definition: fpapi.h:112
Here is the call graph for this function:

◆ _peu_ntv_code_to_info()

static int _peu_ntv_code_to_info ( unsigned int  EventCode,
PAPI_event_info_t info 
)
static

Definition at line 1283 of file perf_event_uncore.c.

1284  {
1285 
1287 
1288  return _pe_libpfm4_ntv_code_to_info(EventCode, info,
1290 }
papi_vector_t _perf_event_uncore_vector
PAPI_component_info_t cmp_info
Definition: papi_vector.h:20
struct native_event_table_t uncore_native_event_table
int _pe_libpfm4_ntv_code_to_info(unsigned int EventCode, PAPI_event_info_t *info, struct native_event_table_t *event_table)
#define PAPI_ENOEVNT
Definition: fpapi.h:112
Here is the call graph for this function:

◆ _peu_ntv_code_to_name()

static int _peu_ntv_code_to_name ( unsigned int  EventCode,
char *  ntv_name,
int  len 
)
static

Definition at line 1262 of file perf_event_uncore.c.

1263  {
1264 
1266 
1267  return _pe_libpfm4_ntv_code_to_name(EventCode,
1268  ntv_name, len,
1270 }
papi_vector_t _perf_event_uncore_vector
PAPI_component_info_t cmp_info
Definition: papi_vector.h:20
int _pe_libpfm4_ntv_code_to_name(unsigned int EventCode, char *ntv_name, int len, struct native_event_table_t *event_table)
struct native_event_table_t uncore_native_event_table
#define PAPI_ENOEVNT
Definition: fpapi.h:112
Here is the call graph for this function:

◆ _peu_ntv_enum_events()

static int _peu_ntv_enum_events ( unsigned int *  PapiEventCode,
int  modifier 
)
static

Definition at line 1242 of file perf_event_uncore.c.

1243 {
1244 
1246 
1247 
1248  return _pe_libpfm4_ntv_enum_events(PapiEventCode, modifier, our_cidx,
1250 }
static int our_cidx
papi_vector_t _perf_event_uncore_vector
int _pe_libpfm4_ntv_enum_events(unsigned int *PapiEventCode, int modifier, int cidx, struct native_event_table_t *event_table)
PAPI_component_info_t cmp_info
Definition: papi_vector.h:20
struct native_event_table_t uncore_native_event_table
#define PAPI_ENOEVNT
Definition: fpapi.h:112
Here is the call graph for this function:

◆ _peu_ntv_name_to_code()

static int _peu_ntv_name_to_code ( const char *  name,
unsigned int *  event_code 
)
static

Definition at line 1253 of file perf_event_uncore.c.

1253  {
1254 
1256 
1257  return _pe_libpfm4_ntv_name_to_code(name,event_code, our_cidx,
1259 }
static int our_cidx
static const char * name
Definition: fork_overflow.c:31
papi_vector_t _perf_event_uncore_vector
int _pe_libpfm4_ntv_name_to_code(const char *name, unsigned int *event_code, int cidx, struct native_event_table_t *event_table)
PAPI_component_info_t cmp_info
Definition: papi_vector.h:20
struct native_event_table_t uncore_native_event_table
#define PAPI_ENOEVNT
Definition: fpapi.h:112
Here is the call graph for this function:

◆ _peu_read()

static int _peu_read ( hwd_context_t ctx,
hwd_control_state_t ctl,
long long **  events,
int  flags 
)
static

Definition at line 891 of file perf_event_uncore.c.

893 {
894  SUBDBG("ENTER: ctx: %p, ctl: %p, events: %p, flags: %#x\n", ctx, ctl, events, flags);
895 
896  ( void ) flags; /*unused */
897  int i, ret = -1;
898  /* pe_context_t *pe_ctx = ( pe_context_t *) ctx; */
899  (void) ctx; /*unused*/
900  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
901  long long papi_pe_buffer[READ_BUFFER_SIZE];
902  long long tot_time_running, tot_time_enabled, scale;
903 
904  /* Handle case where we are multiplexing */
905  if (pe_ctl->multiplexed) {
906 
907  /* currently we handle multiplexing by having individual events */
908  /* so we read from each in turn. */
909 
910  for ( i = 0; i < pe_ctl->num_events; i++ ) {
911 
912  ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer,
913  sizeof ( papi_pe_buffer ) );
914  if ( ret == -1 ) {
915  PAPIERROR("read returned an error: ", strerror( errno ));
916  SUBDBG("EXIT: PAPI_ESYS\n");
917  return PAPI_ESYS;
918  }
919 
920  /* We should read 3 64-bit values from the counter */
921  if (ret<(signed)(3*sizeof(long long))) {
922  PAPIERROR("Error! short read!\n");
923  SUBDBG("EXIT: PAPI_ESYS\n");
924  return PAPI_ESYS;
925  }
926 
927  SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
928  pe_ctl->events[i].event_fd,
929  (long)pe_ctl->tid, pe_ctl->events[i].cpu, ret);
930  SUBDBG("read: %lld %lld %lld\n",papi_pe_buffer[0],
931  papi_pe_buffer[1],papi_pe_buffer[2]);
932 
933  tot_time_enabled = papi_pe_buffer[1];
934  tot_time_running = papi_pe_buffer[2];
935 
936  SUBDBG("count[%d] = (papi_pe_buffer[%d] %lld * "
937  "tot_time_enabled %lld) / tot_time_running %lld\n",
938  i, 0,papi_pe_buffer[0],
939  tot_time_enabled,tot_time_running);
940 
941  if (tot_time_running == tot_time_enabled) {
942  /* No scaling needed */
943  pe_ctl->counts[i] = papi_pe_buffer[0];
944  } else if (tot_time_running && tot_time_enabled) {
945  /* Scale factor of 100 to avoid overflows when computing */
946  /*enabled/running */
947 
948  scale = (tot_time_enabled * 100LL) / tot_time_running;
949  scale = scale * papi_pe_buffer[0];
950  scale = scale / 100LL;
951  pe_ctl->counts[i] = scale;
952  } else {
953  /* This should not happen, but Phil reports it sometime does. */
954  SUBDBG("perf_event kernel bug(?) count, enabled, "
955  "running: %lld, %lld, %lld\n",
956  papi_pe_buffer[0],tot_time_enabled,
957  tot_time_running);
958 
959  pe_ctl->counts[i] = papi_pe_buffer[0];
960  }
961  }
962  }
963 
964  /* Handle cases where we cannot use FORMAT GROUP */
965  else if (pe_ctl->inherit) {
966 
967  /* we must read each counter individually */
968  for ( i = 0; i < pe_ctl->num_events; i++ ) {
969 
970  ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer,
971  sizeof ( papi_pe_buffer ) );
972  if ( ret == -1 ) {
973  PAPIERROR("read returned an error: ", strerror( errno ));
974  SUBDBG("EXIT: PAPI_ESYS\n");
975  return PAPI_ESYS;
976  }
977 
978  /* we should read one 64-bit value from each counter */
979  if (ret!=sizeof(long long)) {
980  PAPIERROR("Error! short read!\n");
981  PAPIERROR("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
982  pe_ctl->events[i].event_fd,
983  (long)pe_ctl->tid, pe_ctl->events[i].cpu, ret);
984  SUBDBG("EXIT: PAPI_ESYS\n");
985  return PAPI_ESYS;
986  }
987 
988  SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
989  pe_ctl->events[i].event_fd, (long)pe_ctl->tid,
990  pe_ctl->events[i].cpu, ret);
991  SUBDBG("read: %lld\n",papi_pe_buffer[0]);
992 
993  pe_ctl->counts[i] = papi_pe_buffer[0];
994  }
995  }
996 
997 
998  /* Handle cases where we are using FORMAT_GROUP */
999  /* We assume only one group leader, in position 0 */
1000 
1001  else {
1002  if (pe_ctl->events[0].group_leader_fd!=-1) {
1003  PAPIERROR("Was expecting group leader!\n");
1004  }
1005 
1006  ret = read( pe_ctl->events[0].event_fd, papi_pe_buffer,
1007  sizeof ( papi_pe_buffer ) );
1008 
1009  if ( ret == -1 ) {
1010  PAPIERROR("read returned an error: ", strerror( errno ));
1011  SUBDBG("EXIT: PAPI_ESYS\n");
1012  return PAPI_ESYS;
1013  }
1014 
1015  /* we read 1 64-bit value (number of events) then */
1016  /* num_events more 64-bit values that hold the counts */
1017  if (ret<(signed)((1+pe_ctl->num_events)*sizeof(long long))) {
1018  PAPIERROR("Error! short read!\n");
1019  SUBDBG("EXIT: PAPI_ESYS\n");
1020  return PAPI_ESYS;
1021  }
1022 
1023  SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
1024  pe_ctl->events[0].event_fd,
1025  (long)pe_ctl->tid, pe_ctl->events[0].cpu, ret);
1026  {
1027  int j;
1028  for(j=0;j<ret/8;j++) {
1029  SUBDBG("read %d: %lld\n",j,papi_pe_buffer[j]);
1030  }
1031  }
1032 
1033  /* Make sure the kernel agrees with how many events we have */
1034  if (papi_pe_buffer[0]!=pe_ctl->num_events) {
1035  PAPIERROR("Error! Wrong number of events!\n");
1036  SUBDBG("EXIT: PAPI_ESYS\n");
1037  return PAPI_ESYS;
1038  }
1039 
1040  /* put the count values in their proper location */
1041  for(i=0;i<pe_ctl->num_events;i++) {
1042  pe_ctl->counts[i] = papi_pe_buffer[1+i];
1043  }
1044  }
1045 
1046  /* point PAPI to the values we read */
1047  *events = pe_ctl->counts;
1048 
1049  SUBDBG("EXIT: PAPI_OK\n");
1050  return PAPI_OK;
1051 }
#define PAPI_OK
Definition: fpapi.h:105
ssize_t read(int fd, void *buf, size_t count)
Definition: appio.c:225
long long counts[PERF_EVENT_MAX_MPX_COUNTERS]
int errno
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
char events[MAX_EVENTS][BUFSIZ]
#define PAPI_ESYS
Definition: fpapi.h:108
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
void PAPIERROR(char *format,...)
unsigned int multiplexed
long long ret
Definition: iozone.c:1346
unsigned int inherit
#define READ_BUFFER_SIZE
int i
Definition: fileop.c:140
Here is the call graph for this function:

◆ _peu_reset()

static int _peu_reset ( hwd_context_t ctx,
hwd_control_state_t ctl 
)
static

Definition at line 836 of file perf_event_uncore.c.

837 {
838  int i, ret;
839  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
840 
841  ( void ) ctx; /*unused */
842 
843  /* We need to reset all of the events, not just the group leaders */
844  for( i = 0; i < pe_ctl->num_events; i++ ) {
845  ret = ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_RESET, NULL );
846  if ( ret == -1 ) {
847  PAPIERROR("ioctl(%d, PERF_EVENT_IOC_RESET, NULL) "
848  "returned error, Linux says: %s",
849  pe_ctl->events[i].event_fd, strerror( errno ) );
850  return PAPI_ESYS;
851  }
852  }
853 
854  return PAPI_OK;
855 }
#define PAPI_OK
Definition: fpapi.h:105
int errno
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
#define PAPI_ESYS
Definition: fpapi.h:108
void PAPIERROR(char *format,...)
long long ret
Definition: iozone.c:1346
int i
Definition: fileop.c:140
Here is the call graph for this function:
Here is the caller graph for this function:

◆ _peu_set_domain()

static int _peu_set_domain ( hwd_control_state_t ctl,
int  domain 
)
static

Definition at line 809 of file perf_event_uncore.c.

810 {
811  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
812 
813  SUBDBG("old control domain %d, new domain %d\n",
814  pe_ctl->domain,domain);
815 
816  pe_ctl->domain = domain;
817  return PAPI_OK;
818 }
#define PAPI_OK
Definition: fpapi.h:105
unsigned int domain
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
Here is the caller graph for this function:

◆ _peu_shutdown_component()

static int _peu_shutdown_component ( void  )
static

Definition at line 666 of file perf_event_uncore.c.

666  {
667 
668  /* deallocate our event table */
671 
672  /* Shutdown libpfm4 */
674 
675  return PAPI_OK;
676 }
#define PAPI_OK
Definition: fpapi.h:105
papi_vector_t _perf_event_uncore_vector
struct native_event_table_t uncore_native_event_table
int _papi_libpfm4_shutdown(papi_vector_t *my_vector)
int _pe_libpfm4_shutdown(papi_vector_t *my_vector, struct native_event_table_t *event_table)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ _peu_shutdown_thread()

static int _peu_shutdown_thread ( hwd_context_t ctx)
static

Definition at line 822 of file perf_event_uncore.c.

823 {
824  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
825 
826  pe_ctx->initialized=0;
827 
828  return PAPI_OK;
829 }
#define PAPI_OK
Definition: fpapi.h:105

◆ _peu_start()

static int _peu_start ( hwd_context_t ctx,
hwd_control_state_t ctl 
)
static

Definition at line 1055 of file perf_event_uncore.c.

1056 {
1057  int ret;
1058  int i;
1059  int did_something = 0;
1060  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
1061  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
1062 
1063  /* Reset the counters first. Is this necessary? */
1064  ret = _peu_reset( pe_ctx, pe_ctl );
1065  if ( ret ) {
1066  return ret;
1067  }
1068 
1069  /* Enable all of the group leaders */
1070  /* All group leaders have a group_leader_fd of -1 */
1071  for( i = 0; i < pe_ctl->num_events; i++ ) {
1072  if (pe_ctl->events[i].group_leader_fd == -1) {
1073  SUBDBG("ioctl(enable): fd: %d\n", pe_ctl->events[i].event_fd);
1074  ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_ENABLE, NULL) ;
1075 
1076  /* ioctls always return -1 on failure */
1077  if (ret == -1) {
1078  PAPIERROR("ioctl(PERF_EVENT_IOC_ENABLE) failed.\n");
1079  return PAPI_ESYS;
1080  }
1081 
1082  did_something++;
1083  }
1084  }
1085 
1086  if (!did_something) {
1087  PAPIERROR("Did not enable any counters.\n");
1088  return PAPI_EBUG;
1089  }
1090 
1091  pe_ctx->state |= PERF_EVENTS_RUNNING;
1092 
1093  return PAPI_OK;
1094 
1095 }
#define PAPI_OK
Definition: fpapi.h:105
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
#define PAPI_EBUG
Definition: fpapi.h:111
#define PAPI_ESYS
Definition: fpapi.h:108
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
void PAPIERROR(char *format,...)
long long ret
Definition: iozone.c:1346
#define PERF_EVENTS_RUNNING
static int _peu_reset(hwd_context_t *ctx, hwd_control_state_t *ctl)
int i
Definition: fileop.c:140
Here is the call graph for this function:

◆ _peu_stop()

static int _peu_stop ( hwd_context_t ctx,
hwd_control_state_t ctl 
)
static

Definition at line 1099 of file perf_event_uncore.c.

1100 {
1101 
1102  int ret;
1103  int i;
1104  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
1105  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
1106 
1107  /* Just disable the group leaders */
1108  for ( i = 0; i < pe_ctl->num_events; i++ ) {
1109  if ( pe_ctl->events[i].group_leader_fd == -1 ) {
1110  ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_DISABLE, NULL);
1111  if ( ret == -1 ) {
1112  PAPIERROR( "ioctl(%d, PERF_EVENT_IOC_DISABLE, NULL) "
1113  "returned error, Linux says: %s",
1114  pe_ctl->events[i].event_fd, strerror( errno ) );
1115  return PAPI_EBUG;
1116  }
1117  }
1118  }
1119 
1120  pe_ctx->state &= ~PERF_EVENTS_RUNNING;
1121 
1122  return PAPI_OK;
1123 }
#define PAPI_OK
Definition: fpapi.h:105
int errno
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
#define PAPI_EBUG
Definition: fpapi.h:111
void PAPIERROR(char *format,...)
long long ret
Definition: iozone.c:1346
#define PERF_EVENTS_RUNNING
int i
Definition: fileop.c:140
Here is the call graph for this function:

◆ _peu_update_control_state()

int _peu_update_control_state ( hwd_control_state_t ctl,
NativeInfo_t native,
int  count,
hwd_context_t ctx 
)

Definition at line 683 of file perf_event_uncore.c.

686 {
687  int i;
688  int j;
689  int ret;
690  int skipped_events=0;
691  struct native_event_t *ntv_evt;
692  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
693  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
694 
695  /* close all of the existing fds and start over again */
696  /* In theory we could have finer-grained control and know if */
697  /* things were changed, but it's easier to tear things down and rebuild. */
698  close_pe_events( pe_ctx, pe_ctl );
699 
700  /* Calling with count==0 should be OK, it's how things are deallocated */
701  /* when an eventset is destroyed. */
702  if ( count == 0 ) {
703  SUBDBG( "Called with count == 0\n" );
704  return PAPI_OK;
705  }
706 
707  /* set up all the events */
708  for( i = 0; i < count; i++ ) {
709  if ( native ) {
710  // get the native event pointer used for this papi event
711  int ntv_idx = _papi_hwi_get_ntv_idx((unsigned)(native[i].ni_papi_code));
712  if (ntv_idx < -1) {
713  SUBDBG("papi_event_code: %#x known by papi but not by the component\n", native[i].ni_papi_code);
714  continue;
715  }
716  // if native index is -1, then we have an event without a mask and need to find the right native index to use
717  if (ntv_idx == -1) {
718  // find the native event index we want by matching for the right papi event code
719  for (j=0 ; j<pe_ctx->event_table->num_native_events ; j++) {
720  if (pe_ctx->event_table->native_events[j].papi_event_code == native[i].ni_papi_code) {
721  ntv_idx = j;
722  }
723  }
724  }
725 
726  // if native index is still negative, we did not find event we wanted so just return error
727  if (ntv_idx < 0) {
728  SUBDBG("papi_event_code: %#x not found in native event tables\n", native[i].ni_papi_code);
729  continue;
730  }
731 
732  // this native index is positive so there was a mask with the event, the ntv_idx identifies which native event to use
733  ntv_evt = (struct native_event_t *)(&(pe_ctx->event_table->native_events[ntv_idx]));
734 
735  SUBDBG("ntv_evt: %p\n", ntv_evt);
736 
737  SUBDBG("i: %d, pe_ctx->event_table->num_native_events: %d\n", i, pe_ctx->event_table->num_native_events);
738 
739  // Move this events hardware config values and other attributes to the perf_events attribute structure
740  memcpy (&pe_ctl->events[i].attr, &ntv_evt->attr, sizeof(perf_event_attr_t));
741 
742  // may need to update the attribute structure with information from event set level domain settings (values set by PAPI_set_domain)
743  // only done if the event mask which controls each counting domain was not provided
744 
745  // get pointer to allocated name, will be NULL when adding preset events to event set
746  char *aName = ntv_evt->allocated_name;
747  if ((aName == NULL) || (strstr(aName, ":u=") == NULL)) {
748  SUBDBG("set exclude_user attribute from eventset level domain flags, encode: %d, eventset: %d\n", pe_ctl->events[i].attr.exclude_user, !(pe_ctl->domain & PAPI_DOM_USER));
749  pe_ctl->events[i].attr.exclude_user = !(pe_ctl->domain & PAPI_DOM_USER);
750  }
751  if ((aName == NULL) || (strstr(aName, ":k=") == NULL)) {
752  SUBDBG("set exclude_kernel attribute from eventset level domain flags, encode: %d, eventset: %d\n", pe_ctl->events[i].attr.exclude_kernel, !(pe_ctl->domain & PAPI_DOM_KERNEL));
753  pe_ctl->events[i].attr.exclude_kernel = !(pe_ctl->domain & PAPI_DOM_KERNEL);
754  }
755 
756  // set the cpu number provided with an event mask if there was one (will be -1 if mask not provided)
757  pe_ctl->events[i].cpu = ntv_evt->cpu;
758  // if cpu event mask not provided, then set the cpu to use to what may have been set on call to PAPI_set_opt (will still be -1 if not called)
759  if (pe_ctl->events[i].cpu == -1) {
760  pe_ctl->events[i].cpu = pe_ctl->cpu;
761  }
762  } else {
763  // This case happens when called from _pe_set_overflow and _pe_ctl
764  // Those callers put things directly into the pe_ctl structure so it is already set for the open call
765  }
766 
767  // Copy the inherit flag into the attribute block that will be passed to the kernel
768  pe_ctl->events[i].attr.inherit = pe_ctl->inherit;
769 
770  /* Set the position in the native structure */
771  /* We just set up events linearly */
772  if ( native ) {
773  native[i].ni_position = i;
774  SUBDBG( "&native[%d]: %p, ni_papi_code: %#x, ni_event: %#x, ni_position: %d, ni_owners: %d\n",
775  i, &(native[i]), native[i].ni_papi_code, native[i].ni_event, native[i].ni_position, native[i].ni_owners);
776  }
777  }
778 
779  if (count <= skipped_events) {
780  SUBDBG("EXIT: No events to count, they all contained invalid umasks\n");
781  return PAPI_ENOEVNT;
782  }
783 
784  pe_ctl->num_events = count - skipped_events;
785 
786  /* actuall open the events */
787  /* (why is this a separate function?) */
788  ret = open_pe_events( pe_ctx, pe_ctl );
789  if ( ret != PAPI_OK ) {
790  SUBDBG("open_pe_events failed\n");
791  /* Restore values ? */
792  return ret;
793  }
794 
795  SUBDBG( "EXIT: PAPI_OK\n" );
796  return PAPI_OK;
797 }
#define PAPI_OK
Definition: fpapi.h:105
static int open_pe_events(pe_context_t *ctx, pe_control_t *ctl)
int _papi_hwi_get_ntv_idx(unsigned int papi_evt_code)
#define PAPI_DOM_KERNEL
Definition: fpapi.h:22
struct native_event_t * native_events
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
static int close_pe_events(pe_context_t *ctx, pe_control_t *ctl)
unsigned int domain
#define PAPI_DOM_USER
Definition: fpapi.h:21
static int native
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
struct native_event_table_t * event_table
long long ret
Definition: iozone.c:1346
perf_event_attr_t attr
struct perf_event_attr attr
#define PAPI_ENOEVNT
Definition: fpapi.h:112
unsigned int inherit
static long count
int i
Definition: fileop.c:140
Here is the call graph for this function:
Here is the caller graph for this function:

◆ _peu_write()

static int _peu_write ( hwd_context_t ctx,
hwd_control_state_t ctl,
long long *  from 
)
static

Definition at line 861 of file perf_event_uncore.c.

863 {
864  ( void ) ctx; /*unused */
865  ( void ) ctl; /*unused */
866  ( void ) from; /*unused */
867  /*
868  * Counters cannot be written. Do we need to virtualize the
869  * counters so that they can be written, or perhaps modify code so that
870  * they can be written? FIXME ?
871  */
872 
873  return PAPI_ENOSUPP;
874 }
#define PAPI_ENOSUPP
Definition: fpapi.h:123

◆ check_scheduability()

static int check_scheduability ( pe_context_t ctx,
pe_control_t ctl 
)
static

Definition at line 227 of file perf_event_uncore.c.

228 {
229  SUBDBG("ENTER: ctx: %p, ctl: %p\n", ctx, ctl);
230  int retval = 0, cnt = -1;
231  ( void ) ctx; /*unused */
232  long long papi_pe_buffer[READ_BUFFER_SIZE];
233  int i;
234 
235  /* If the kernel isn't tracking scheduability right */
236  /* Then we need to start/stop/read to force the event */
237  /* to be scheduled and see if an error condition happens. */
238 
239  /* start all events */
240  for( i = 0; i < ctl->num_events; i++) {
241  retval = ioctl( ctl->events[i].event_fd, PERF_EVENT_IOC_ENABLE, NULL );
242  if (retval == -1) {
243  SUBDBG("EXIT: Enable failed event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
244  return PAPI_ESYS;
245  }
246  }
247 
248  /* stop all events */
249  for( i = 0; i < ctl->num_events; i++) {
250  retval = ioctl(ctl->events[i].event_fd, PERF_EVENT_IOC_DISABLE, NULL );
251  if (retval == -1) {
252  SUBDBG("EXIT: Disable failed: event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
253  return PAPI_ESYS;
254  }
255  }
256 
257  /* See if a read of each event returns results */
258  for( i = 0; i < ctl->num_events; i++) {
259  cnt = read( ctl->events[i].event_fd, papi_pe_buffer, sizeof(papi_pe_buffer));
260  if ( cnt == -1 ) {
261  SUBDBG( "EXIT: read failed: event index: %d, num_events: %d, return PAPI_ESYS. Should never happen.\n", i, ctl->num_events);
262  return PAPI_ESYS;
263  }
264 
265  if ( cnt == 0 ) {
266  /* We read 0 bytes if we could not schedule the event */
267  /* The kernel should have detected this at open */
268  /* but various bugs (including NMI watchdog) */
269  /* result in this behavior */
270 
271  SUBDBG( "EXIT: read returned 0: event index: %d, num_events: %d, return PAPI_ECNFLCT.\n", i, ctl->num_events);
272  return PAPI_ECNFLCT;
273  }
274  }
275 
276  /* Reset all of the counters (opened so far) back to zero */
277  /* from the above brief enable/disable call pair. */
278 
279  /* We have to reset all events because reset of group leader */
280  /* does not reset all. */
281  /* we assume that the events are being added one by one and that */
282  /* we do not need to reset higher events (doing so may reset ones */
283  /* that have not been initialized yet. */
284 
285  /* Note... PERF_EVENT_IOC_RESET does not reset time running */
286  /* info if multiplexing, so we should avoid coming here if */
287  /* we are multiplexing the event. */
288  for( i = 0; i < ctl->num_events; i++) {
289  retval=ioctl( ctl->events[i].event_fd, PERF_EVENT_IOC_RESET, NULL );
290  if (retval == -1) {
291  SUBDBG("EXIT: Reset failed: event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
292  return PAPI_ESYS;
293  }
294  }
295  SUBDBG("EXIT: return PAPI_OK\n");
296  return PAPI_OK;
297 }
#define PAPI_OK
Definition: fpapi.h:105
ssize_t read(int fd, void *buf, size_t count)
Definition: appio.c:225
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
int retval
Definition: zero_fork.c:53
#define PAPI_ESYS
Definition: fpapi.h:108
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
int cnt[ctr_pcp_ntv_code_to_info+1]
Definition: linux-pcp.c:215
#define PAPI_ECNFLCT
Definition: fpapi.h:113
#define READ_BUFFER_SIZE
int i
Definition: fileop.c:140
Here is the call graph for this function:
Here is the caller graph for this function:

◆ close_pe_events()

static int close_pe_events ( pe_context_t ctx,
pe_control_t ctl 
)
static

Definition at line 449 of file perf_event_uncore.c.

450 {
451  int i;
452  int num_closed=0;
453  int events_not_opened=0;
454 
455  /* should this be a more serious error? */
456  if ( ctx->state & PERF_EVENTS_RUNNING ) {
457  SUBDBG("Closing without stopping first\n");
458  }
459 
460  /* Close child events first */
461  for( i=0; i<ctl->num_events; i++ ) {
462 
463  if (ctl->events[i].event_opened) {
464 
465  if (ctl->events[i].group_leader_fd!=-1) {
466  if ( ctl->events[i].mmap_buf ) {
467  if ( munmap ( ctl->events[i].mmap_buf,
468  ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
469  PAPIERROR( "munmap of fd = %d returned error: %s",
470  ctl->events[i].event_fd, strerror( errno ) );
471  return PAPI_ESYS;
472  }
473  }
474 
475  if ( close( ctl->events[i].event_fd ) ) {
476  PAPIERROR( "close of fd = %d returned error: %s",
477  ctl->events[i].event_fd, strerror( errno ) );
478  return PAPI_ESYS;
479  } else {
480  num_closed++;
481  }
482  ctl->events[i].event_opened=0;
483  }
484  }
485  else {
486  events_not_opened++;
487  }
488  }
489 
490  /* Close the group leaders last */
491  for( i=0; i<ctl->num_events; i++ ) {
492 
493  if (ctl->events[i].event_opened) {
494 
495  if (ctl->events[i].group_leader_fd==-1) {
496  if ( ctl->events[i].mmap_buf ) {
497  if ( munmap ( ctl->events[i].mmap_buf,
498  ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
499  PAPIERROR( "munmap of fd = %d returned error: %s",
500  ctl->events[i].event_fd, strerror( errno ) );
501  return PAPI_ESYS;
502  }
503  }
504 
505 
506  if ( close( ctl->events[i].event_fd ) ) {
507  PAPIERROR( "close of fd = %d returned error: %s",
508  ctl->events[i].event_fd, strerror( errno ) );
509  return PAPI_ESYS;
510  } else {
511  num_closed++;
512  }
513  ctl->events[i].event_opened=0;
514  }
515  }
516  }
517 
518 
519  if (ctl->num_events!=num_closed) {
520  if (ctl->num_events!=(num_closed+events_not_opened)) {
521  PAPIERROR("Didn't close all events: "
522  "Closed %d Not Opened: %d Expected %d\n",
523  num_closed,events_not_opened,ctl->num_events);
524  return PAPI_EBUG;
525  }
526  }
527 
528  ctl->num_events=0;
529 
530  ctx->state &= ~PERF_EVENTS_OPENED;
531 
532  return PAPI_OK;
533 }
#define PAPI_OK
Definition: fpapi.h:105
int errno
int close(int fd)
Definition: appio.c:175
#define PERF_EVENTS_OPENED
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
#define PAPI_EBUG
Definition: fpapi.h:111
uint32_t nr_mmap_pages
#define PAPI_ESYS
Definition: fpapi.h:108
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
void PAPIERROR(char *format,...)
#define PERF_EVENTS_RUNNING
int i
Definition: fileop.c:140
Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_read_format()

static unsigned int get_read_format ( unsigned int  multiplex,
unsigned int  inherit,
int  format_group 
)
static

Definition at line 76 of file perf_event_uncore.c.

79 {
80  unsigned int format = 0;
81 
82  /* if we need read format options for multiplexing, add them now */
83  if (multiplex) {
84  format |= PERF_FORMAT_TOTAL_TIME_ENABLED;
85  format |= PERF_FORMAT_TOTAL_TIME_RUNNING;
86  }
87 
88  /* If we are not using inherit, add the group read options */
89  if (!inherit) {
90  if (format_group) {
91  format |= PERF_FORMAT_GROUP;
92  }
93  }
94 
95  SUBDBG("multiplex: %d, inherit: %d, group_leader: %d, format: %#x\n",
96  multiplex, inherit, format_group, format);
97 
98  return format;
99 }
i inherit inherit
int multiplex(void)
Definition: multiplex.c:35
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
Here is the call graph for this function:
Here is the caller graph for this function:

◆ map_perf_event_errors_to_papi()

static int map_perf_event_errors_to_papi ( int  perf_event_error)
static

Definition at line 176 of file perf_event_uncore.c.

176  {
177 
178  int ret;
179 
180  /* These mappings are approximate.
181  EINVAL in particular can mean lots of different things */
182  switch(perf_event_error) {
183  case EPERM:
184  case EACCES:
185  ret = PAPI_EPERM;
186  break;
187  case ENODEV:
188  case EOPNOTSUPP:
189  ret = PAPI_ENOSUPP;
190  break;
191  case ENOENT:
192  ret = PAPI_ENOEVNT;
193  break;
194  case ENOSYS:
195  case EAGAIN:
196  case EBUSY:
197  case E2BIG:
198  ret = PAPI_ESYS;
199  break;
200  case ENOMEM:
201  ret = PAPI_ENOMEM;
202  break;
203  case EINVAL:
204  default:
205  ret = PAPI_EINVAL;
206  break;
207  }
208  return ret;
209 }
#define PAPI_ENOMEM
Definition: fpapi.h:107
#define PAPI_EINVAL
Definition: fpapi.h:106
#define PAPI_ENOSUPP
Definition: fpapi.h:123
#define PAPI_EPERM
Definition: fpapi.h:120
#define PAPI_ESYS
Definition: fpapi.h:108
long long ret
Definition: iozone.c:1346
#define PAPI_ENOEVNT
Definition: fpapi.h:112
Here is the caller graph for this function:

◆ open_pe_events()

static int open_pe_events ( pe_context_t ctx,
pe_control_t ctl 
)
static

Definition at line 302 of file perf_event_uncore.c.

303 {
304 
305  int i, ret = PAPI_OK;
306  long pid;
307 
308  if (ctl->granularity==PAPI_GRN_SYS) {
309  pid = -1;
310  }
311  else {
312  pid = ctl->tid;
313  }
314 
315  for( i = 0; i < ctl->num_events; i++ ) {
316 
317  ctl->events[i].event_opened=0;
318 
319  /* set up the attr structure. We don't set up all fields here */
320  /* as some have already been set up previously. */
321 
322 /*
323  * The following code controls how the uncore component interfaces with the
324  * kernel for uncore events. The code inside the ifdef will use grouping of
325  * uncore events which can make the cost of reading the results more efficient.
326  * The problem with it is that the uncore component supports 20 different uncore
327  * PMU's. The kernel requires that all events in a group must be for the same PMU.
328  * This means that with grouping enabled papi applications can count events on only
329  * one of the 20 PMU's during a run.
330  *
331  * The code inside the else clause treats each event in the event set as
332  * independent. When running in this mode the kernel allows the papi multiple
333  * uncore PMU's at the same time.
334  *
335  * Example:
336  * An application wants to measure all the L3 cache write requests.
337  * The event to do this is part of a cbox pmu (there are 8 cbox pmu's).
338  * When built with the code in the ifdef, the application would have to be
339  * run 8 times and count write requests from one pmu at a time.
340  * When built with the code in the else, the write requests in all 8 cbox
341  * pmu's could be counted in the same run.
342  *
343  */
344 // #define GROUPIT 1 // remove the comment on this line to force event grouping
345 #ifdef GROUPIT
346  /* group leader (event 0) is special */
347  /* If we're multiplexed, everyone is a group leader */
348  if (( i == 0 ) || (ctl->multiplexed)) {
349  ctl->events[i].attr.pinned = !ctl->multiplexed;
350  ctl->events[i].attr.disabled = 1;
351  ctl->events[i].group_leader_fd=-1;
352  ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
353  ctl->inherit,
354  !ctl->multiplexed );
355  } else {
356  ctl->events[i].attr.pinned=0;
357  ctl->events[i].attr.disabled = 0;
358  ctl->events[i].group_leader_fd=ctl->events[0].event_fd,
359  ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
360  ctl->inherit,
361  0 );
362  }
363 #else
364  ctl->events[i].attr.pinned = !ctl->multiplexed;
365  ctl->events[i].attr.disabled = 1;
366  ctl->inherit = 1;
367  ctl->events[i].group_leader_fd=-1;
368  ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed, ctl->inherit, 0 );
369 #endif
370 
371 
372  /* try to open */
373  ctl->events[i].event_fd = sys_perf_event_open( &ctl->events[i].attr,
374  pid,
375  ctl->events[i].cpu,
376  ctl->events[i].group_leader_fd,
377  0 /* flags */
378  );
379 
380  /* Try to match Linux errors to PAPI errors */
381  if ( ctl->events[i].event_fd == -1 ) {
382  SUBDBG("sys_perf_event_open returned error on event #%d."
383  " Error: %s\n",
384  i, strerror( errno ) );
386 
387  goto open_peu_cleanup;
388  }
389 
390  SUBDBG ("sys_perf_event_open: tid: %ld, cpu_num: %d,"
391  " group_leader/fd: %d, event_fd: %d,"
392  " read_format: %"PRIu64"\n",
393  pid, ctl->events[i].cpu, ctl->events[i].group_leader_fd,
394  ctl->events[i].event_fd, ctl->events[i].attr.read_format);
395 
396  ctl->events[i].event_opened=1;
397  }
398 
399 
400  /* in many situations the kernel will indicate we opened fine */
401  /* yet things will fail later. So we need to double check */
402  /* we actually can use the events we've set up. */
403 
404  /* This is not necessary if we are multiplexing, and in fact */
405  /* we cannot do this properly if multiplexed because */
406  /* PERF_EVENT_IOC_RESET does not reset the time running info */
407  if (!ctl->multiplexed) {
408  ret = check_scheduability( ctx, ctl);
409 
410  if ( ret != PAPI_OK ) {
411  /* the last event did open, so we need to bump the counter */
412  /* before doing the cleanup */
413  i++;
414  goto open_peu_cleanup;
415  }
416  }
417 
418  /* Now that we've successfully opened all of the events, do whatever */
419  /* "tune-up" is needed to attach the mmap'd buffers, signal handlers, */
420  /* and so on. */
421  for ( i = 0; i < ctl->num_events; i++ ) {
422 
423  /* No sampling if uncore */
424  ctl->events[i].mmap_buf = NULL;
425  }
426 
427  /* Set num_evts only if completely successful */
428  ctx->state |= PERF_EVENTS_OPENED;
429 
430  return PAPI_OK;
431 
432 open_peu_cleanup:
433  /* We encountered an error, close up the fds we successfully opened. */
434  /* We go backward in an attempt to close group leaders last, although */
435  /* That's probably not strictly necessary. */
436  while ( i > 0 ) {
437  i--;
438  if (ctl->events[i].event_fd>=0) {
439  close( ctl->events[i].event_fd );
440  ctl->events[i].event_opened=0;
441  }
442  }
443 
444  return ret;
445 }
#define PAPI_OK
Definition: fpapi.h:105
int errno
int close(int fd)
Definition: appio.c:175
unsigned int granularity
static int map_perf_event_errors_to_papi(int perf_event_error)
#define PAPI_GRN_SYS
Definition: fpapi.h:71
#define PERF_EVENTS_OPENED
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
static int check_scheduability(pe_context_t *ctx, pe_control_t *ctl)
static int pid
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
unsigned int multiplexed
long long ret
Definition: iozone.c:1346
struct perf_event_attr attr
static unsigned int get_read_format(unsigned int multiplex, unsigned int inherit, int format_group)
unsigned int inherit
static long sys_perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
int i
Definition: fileop.c:140
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sys_perf_event_open()

static long sys_perf_event_open ( struct perf_event_attr hw_event,
pid_t  pid,
int  cpu,
int  group_fd,
unsigned long  flags 
)
static

Definition at line 125 of file perf_event_uncore.c.

127 {
128  int ret;
129 
130  SUBDBG("sys_perf_event_open(hw_event: %p, pid: %d, cpu: %d, group_fd: %d, flags: %lx\n",hw_event,pid,cpu,group_fd,flags);
131  SUBDBG(" type: %d\n",hw_event->type);
132  SUBDBG(" size: %d\n",hw_event->size);
133  SUBDBG(" config: %#"PRIx64" (%"PRIu64")\n",hw_event->config,
134  hw_event->config);
135  SUBDBG(" sample_period: %"PRIu64"\n",hw_event->sample_period);
136  SUBDBG(" sample_type: %"PRIu64"\n",hw_event->sample_type);
137  SUBDBG(" read_format: %"PRIu64"\n",hw_event->read_format);
138  SUBDBG(" disabled: %d\n",hw_event->disabled);
139  SUBDBG(" inherit: %d\n",hw_event->inherit);
140  SUBDBG(" pinned: %d\n",hw_event->pinned);
141  SUBDBG(" exclusive: %d\n",hw_event->exclusive);
142  SUBDBG(" exclude_user: %d\n",hw_event->exclude_user);
143  SUBDBG(" exclude_kernel: %d\n",hw_event->exclude_kernel);
144  SUBDBG(" exclude_hv: %d\n",hw_event->exclude_hv);
145  SUBDBG(" exclude_idle: %d\n",hw_event->exclude_idle);
146  SUBDBG(" mmap: %d\n",hw_event->mmap);
147  SUBDBG(" comm: %d\n",hw_event->comm);
148  SUBDBG(" freq: %d\n",hw_event->freq);
149  SUBDBG(" inherit_stat: %d\n",hw_event->inherit_stat);
150  SUBDBG(" enable_on_exec: %d\n",hw_event->enable_on_exec);
151  SUBDBG(" task: %d\n",hw_event->task);
152  SUBDBG(" watermark: %d\n",hw_event->watermark);
153  SUBDBG(" precise_ip: %d\n",hw_event->precise_ip);
154  SUBDBG(" mmap_data: %d\n",hw_event->mmap_data);
155  SUBDBG(" sample_id_all: %d\n",hw_event->sample_id_all);
156  SUBDBG(" exclude_host: %d\n",hw_event->exclude_host);
157  SUBDBG(" exclude_guest: %d\n",hw_event->exclude_guest);
158  SUBDBG(" exclude_callchain_kernel: %d\n",hw_event->exclude_callchain_kernel);
159  SUBDBG(" exclude_callchain_user: %d\n",hw_event->exclude_callchain_user);
160  SUBDBG(" wakeup_watermark: %d\n",hw_event->wakeup_watermark);
161  SUBDBG(" bp_type: %d\n",hw_event->bp_type);
162  SUBDBG(" config1: %#lx (%lu)\n",hw_event->config1,hw_event->config1);
163  SUBDBG(" config2: %#lx (%lu)\n",hw_event->config2,hw_event->config2);
164  SUBDBG(" branch_sample_type: %lu\n",hw_event->branch_sample_type);
165  SUBDBG(" sample_regs_user: %lu\n",hw_event->sample_regs_user);
166  SUBDBG(" sample_stack_user: %d\n",hw_event->sample_stack_user);
167 
168  ret = syscall( __NR_perf_event_open, hw_event, pid, cpu, group_fd, flags );
169  SUBDBG("Returned %d %d %s\n",ret,
170  ret<0?errno:0,
171  ret<0?strerror(errno):" ");
172  return ret;
173 }
int errno
static int pid
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
long long ret
Definition: iozone.c:1346
Here is the caller graph for this function:

Variable Documentation

◆ _perf_event_uncore_vector

papi_vector_t _perf_event_uncore_vector

Definition at line 49 of file perf_event_uncore.c.

◆ our_cidx

int our_cidx
static

Definition at line 53 of file perf_event_uncore.c.

◆ uncore_native_event_table

struct native_event_table_t uncore_native_event_table

Definition at line 52 of file perf_event_uncore.c.