DPDK  17.11.0
rte_eventdev.h
Go to the documentation of this file.
1 /*
2  * BSD LICENSE
3  *
4  * Copyright 2016 Cavium, Inc.
5  * Copyright 2016 Intel Corporation.
6  * Copyright 2016 NXP.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Cavium, Inc nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_EVENTDEV_H_
36 #define _RTE_EVENTDEV_H_
37 
237 #ifdef __cplusplus
238 extern "C" {
239 #endif
240 
241 #include <rte_common.h>
242 #include <rte_memory.h>
243 #include <rte_errno.h>
244 
245 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
246 
247 /* Event device capability bitmap flags */
248 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
249 
254 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
255 
261 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
262 
270 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
271 
277 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
278 
286 /* Event device priority levels */
287 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
288 
292 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
293 
297 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
298 
310 uint8_t
311 rte_event_dev_count(void);
312 
323 int
324 rte_event_dev_get_dev_id(const char *name);
325 
336 int
337 rte_event_dev_socket_id(uint8_t dev_id);
338 
343  const char *driver_name;
344  struct rte_device *dev;
375  int32_t max_num_events;
380  uint32_t event_dev_cap;
382 };
383 
399 int
400 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
401 
405 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
406 
409 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
410 
413 #define RTE_EVENT_DEV_ATTR_STARTED 2
414 
427 int
428 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
429  uint32_t *attr_value);
430 
431 
432 /* Event device configuration bitmap flags */
433 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
434 
461  uint8_t nb_event_ports;
487  uint32_t event_dev_cfg;
489 };
490 
510 int
511 rte_event_dev_configure(uint8_t dev_id,
512  const struct rte_event_dev_config *dev_conf);
513 
514 
515 /* Event queue specific APIs */
516 
517 /* Event queue configuration bitmap flags */
518 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
519 
524 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
525 
532  uint32_t nb_atomic_flows;
554  uint32_t event_queue_cfg;
556  uint8_t schedule_type;
561  uint8_t priority;
569 };
570 
593 int
594 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
595  struct rte_event_queue_conf *queue_conf);
596 
615 int
616 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
617  const struct rte_event_queue_conf *queue_conf);
618 
622 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
623 
626 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
627 
630 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
631 
634 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
635 
638 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
639 
656 int
657 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
658  uint32_t *attr_value);
659 
660 /* Event port specific APIs */
661 
677  uint16_t dequeue_depth;
683  uint16_t enqueue_depth;
689 };
690 
713 int
714 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
715  struct rte_event_port_conf *port_conf);
716 
737 int
738 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
739  const struct rte_event_port_conf *port_conf);
740 
744 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
745 
748 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
749 
752 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
753 
766 int
767 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
768  uint32_t *attr_value);
769 
786 int
787 rte_event_dev_start(uint8_t dev_id);
788 
796 void
797 rte_event_dev_stop(uint8_t dev_id);
798 
810 int
811 rte_event_dev_close(uint8_t dev_id);
812 
813 /* Scheduler type definitions */
814 #define RTE_SCHED_TYPE_ORDERED 0
815 
841 #define RTE_SCHED_TYPE_ATOMIC 1
842 
860 #define RTE_SCHED_TYPE_PARALLEL 2
861 
873 /* Event types to classify the event source */
874 #define RTE_EVENT_TYPE_ETHDEV 0x0
875 
876 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
877 
878 #define RTE_EVENT_TYPE_TIMERDEV 0x2
879 
880 #define RTE_EVENT_TYPE_CPU 0x3
881 
884 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
885 
886 #define RTE_EVENT_TYPE_MAX 0x10
887 
889 /* Event enqueue operations */
890 #define RTE_EVENT_OP_NEW 0
891 
894 #define RTE_EVENT_OP_FORWARD 1
895 
902 #define RTE_EVENT_OP_RELEASE 2
903 
940 struct rte_event {
942  union {
943  uint64_t event;
945  struct {
946  uint32_t flow_id:20;
953  uint32_t sub_event_type:8;
957  uint32_t event_type:4;
961  uint8_t op:2;
967  uint8_t rsvd:4;
969  uint8_t sched_type:2;
974  uint8_t queue_id;
981  uint8_t priority;
991  uint8_t impl_opaque;
998  };
999  };
1001  union {
1002  uint64_t u64;
1004  void *event_ptr;
1006  struct rte_mbuf *mbuf;
1008  };
1009 };
1010 
1011 /* Ethdev Rx adapter capability bitmap flags */
1012 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1013 
1016 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1017 
1020 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1021 
1047 int
1048 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
1049  uint32_t *caps);
1050 
1051 struct rte_eventdev_driver;
1052 struct rte_eventdev_ops;
1053 struct rte_eventdev;
1054 
1055 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
1058 typedef uint16_t (*event_enqueue_burst_t)(void *port,
1059  const struct rte_event ev[], uint16_t nb_events);
1062 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
1063  uint64_t timeout_ticks);
1066 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
1067  uint16_t nb_events, uint64_t timeout_ticks);
1070 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1071 
1080 struct rte_eventdev_data {
1081  int socket_id;
1083  uint8_t dev_id;
1085  uint8_t nb_queues;
1087  uint8_t nb_ports;
1089  void **ports;
1091  struct rte_event_port_conf *ports_cfg;
1093  struct rte_event_queue_conf *queues_cfg;
1095  uint16_t *links_map;
1097  void *dev_private;
1099  uint32_t event_dev_cap;
1101  struct rte_event_dev_config dev_conf;
1103  uint8_t service_inited;
1104  /* Service initialization state */
1105  uint32_t service_id;
1106  /* Service ID*/
1107 
1108  RTE_STD_C11
1109  uint8_t dev_started : 1;
1112  char name[RTE_EVENTDEV_NAME_MAX_LEN];
1115 
1117 struct rte_eventdev {
1118  event_enqueue_t enqueue;
1120  event_enqueue_burst_t enqueue_burst;
1122  event_enqueue_burst_t enqueue_new_burst;
1124  event_enqueue_burst_t enqueue_forward_burst;
1126  event_dequeue_t dequeue;
1128  event_dequeue_burst_t dequeue_burst;
1131  struct rte_eventdev_data *data;
1133  const struct rte_eventdev_ops *dev_ops;
1135  struct rte_device *dev;
1138  RTE_STD_C11
1139  uint8_t attached : 1;
1142 
1143 extern struct rte_eventdev *rte_eventdevs;
1146 static __rte_always_inline uint16_t
1147 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1148  const struct rte_event ev[], uint16_t nb_events,
1149  const event_enqueue_burst_t fn)
1150 {
1151  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1152 
1153 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1154  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1155  rte_errno = -EINVAL;
1156  return 0;
1157  }
1158 
1159  if (port_id >= dev->data->nb_ports) {
1160  rte_errno = -EINVAL;
1161  return 0;
1162  }
1163 #endif
1164  /*
1165  * Allow zero cost non burst mode routine invocation if application
1166  * requests nb_events as const one
1167  */
1168  if (nb_events == 1)
1169  return (*dev->enqueue)(dev->data->ports[port_id], ev);
1170  else
1171  return fn(dev->data->ports[port_id], ev, nb_events);
1172 }
1173 
1216 static inline uint16_t
1217 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1218  const struct rte_event ev[], uint16_t nb_events)
1219 {
1220  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1221 
1222  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1223  dev->enqueue_burst);
1224 }
1225 
1265 static inline uint16_t
1266 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
1267  const struct rte_event ev[], uint16_t nb_events)
1268 {
1269  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1270 
1271  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1272  dev->enqueue_new_burst);
1273 }
1274 
1314 static inline uint16_t
1315 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
1316  const struct rte_event ev[], uint16_t nb_events)
1317 {
1318  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1319 
1320  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1321  dev->enqueue_forward_burst);
1322 }
1323 
1349 int
1350 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1351  uint64_t *timeout_ticks);
1352 
1419 static inline uint16_t
1420 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
1421  uint16_t nb_events, uint64_t timeout_ticks)
1422 {
1423  struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1424 
1425 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1426  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1427  rte_errno = -EINVAL;
1428  return 0;
1429  }
1430 
1431  if (port_id >= dev->data->nb_ports) {
1432  rte_errno = -EINVAL;
1433  return 0;
1434  }
1435 #endif
1436 
1437  /*
1438  * Allow zero cost non burst mode routine invocation if application
1439  * requests nb_events as const one
1440  */
1441  if (nb_events == 1)
1442  return (*dev->dequeue)(
1443  dev->data->ports[port_id], ev, timeout_ticks);
1444  else
1445  return (*dev->dequeue_burst)(
1446  dev->data->ports[port_id], ev, nb_events,
1447  timeout_ticks);
1448 }
1449 
1510 int
1511 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1512  const uint8_t queues[], const uint8_t priorities[],
1513  uint16_t nb_links);
1514 
1554 int
1555 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1556  uint8_t queues[], uint16_t nb_unlinks);
1557 
1585 int
1586 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1587  uint8_t queues[], uint8_t priorities[]);
1588 
1604 int
1605 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1606 
1620 int
1621 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1622 
1624 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1625 
1630  RTE_EVENT_DEV_XSTATS_DEVICE,
1631  RTE_EVENT_DEV_XSTATS_PORT,
1632  RTE_EVENT_DEV_XSTATS_QUEUE,
1633 };
1634 
1642  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1643 };
1644 
1677 int
1678 rte_event_dev_xstats_names_get(uint8_t dev_id,
1679  enum rte_event_dev_xstats_mode mode,
1680  uint8_t queue_port_id,
1681  struct rte_event_dev_xstats_name *xstats_names,
1682  unsigned int *ids,
1683  unsigned int size);
1684 
1711 int
1712 rte_event_dev_xstats_get(uint8_t dev_id,
1713  enum rte_event_dev_xstats_mode mode,
1714  uint8_t queue_port_id,
1715  const unsigned int ids[],
1716  uint64_t values[], unsigned int n);
1717 
1734 uint64_t
1735 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1736  unsigned int *id);
1737 
1758 int
1759 rte_event_dev_xstats_reset(uint8_t dev_id,
1760  enum rte_event_dev_xstats_mode mode,
1761  int16_t queue_port_id,
1762  const uint32_t ids[],
1763  uint32_t nb_ids);
1764 
1765 #ifdef __cplusplus
1766 }
1767 #endif
1768 
1769 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:345
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:137
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id, uint32_t *caps)
struct rte_eventdev * rte_eventdevs
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint8_t priority
Definition: rte_eventdev.h:981
struct rte_device * dev
Definition: rte_eventdev.h:344
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:349
#define rte_errno
Definition: rte_errno.h:58
uint32_t event_dev_cap
Definition: rte_eventdev.h:380
int rte_event_dev_socket_id(uint8_t dev_id)
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:370
struct rte_mbuf __rte_cache_aligned
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, unsigned int *id)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:540
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:471
void * event_ptr
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:365
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:479
const char * driver_name
Definition: rte_eventdev.h:343
uint8_t impl_opaque
Definition: rte_eventdev.h:991
uint8_t queue_id
Definition: rte_eventdev.h:974
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const unsigned int ids[], uint64_t values[], unsigned int n)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint32_t ids[], uint32_t nb_ids)
#define RTE_STD_C11
Definition: rte_common.h:64
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:440
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
int32_t new_event_threshold
Definition: rte_eventdev.h:664
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:359
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:347
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:353
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
uint8_t max_event_queues
Definition: rte_eventdev.h:351
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:355
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, unsigned int *ids, unsigned int size)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:466