eaiovnaovbqoebvqoeavibavo PKW /* * Increment this value if any changes that break userspace ABI * compatibility are made. */ #define MTHCA_UVERBS_ABI_VERSION 1 /* * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). * In particular do not use pointer types -- pass pointers in __u64 * instead. */ struct mthca_alloc_ucontext_resp { __u32 qp_tab_size; __u32 uarc_size; }; struct mthca_alloc_pd_resp { __u32 pdn; __u32 reserved; }; /* * Mark the memory region with a DMA attribute that causes * in-flight DMA to be flushed when the region is written to: */ #define MTHCA_MR_DMASYNC 0x1 struct mthca_reg_mr { __u32 mr_attrs; __u32 reserved; }; struct mthca_create_cq { __u32 lkey; __u32 pdn; __aligned_u64 arm_db_page; __aligned_u64 set_db_page; __u32 arm_db_index; __u32 set_db_index; }; struct mthca_create_cq_resp { __u32 cqn; __u32 reserved; }; struct mthca_resize_cq { __u32 lkey; __u32 reserved; }; struct mthca_create_srq { __u32 lkey; __u32 db_index; __aligned_u64 db_page; }; struct mthca_create_srq_resp { __u32 srqn; __u32 reserved; }; struct mthca_create_qp { __u32 lkey; __u32 reserved; __aligned_u64 sq_db_page; __aligned_u64 rq_db_page; __u32 sq_db_index; __u32 rq_db_index; }; #endif /* MTHCA_ABI_USER_H */ PKW #define QEDR_ABI_VERSION (8) /* user kernel communication data structures. */ struct qedr_alloc_ucontext_resp { __aligned_u64 db_pa; __u32 db_size; __u32 max_send_wr; __u32 max_recv_wr; __u32 max_srq_wr; __u32 sges_per_send_wr; __u32 sges_per_recv_wr; __u32 sges_per_srq_wr; __u32 max_cqes; __u8 dpm_enabled; __u8 wids_enabled; __u16 wid_count; __u32 reserved; }; struct qedr_alloc_pd_ureq { __aligned_u64 rsvd1; }; struct qedr_alloc_pd_uresp { __u32 pd_id; __u32 reserved; }; struct qedr_create_cq_ureq { __aligned_u64 addr; __aligned_u64 len; }; struct qedr_create_cq_uresp { __u32 db_offset; __u16 icid; __u16 reserved; }; struct qedr_create_qp_ureq { __u32 qp_handle_hi; __u32 qp_handle_lo; /* SQ */ /* user space virtual address of SQ buffer */ __aligned_u64 sq_addr; /* length of SQ buffer */ __aligned_u64 sq_len; /* RQ */ /* user space virtual address of RQ buffer */ __aligned_u64 rq_addr; /* length of RQ buffer */ __aligned_u64 rq_len; }; struct qedr_create_qp_uresp { __u32 qp_id; __u32 atomic_supported; /* SQ */ __u32 sq_db_offset; __u16 sq_icid; /* RQ */ __u32 rq_db_offset; __u16 rq_icid; __u32 rq_db2_offset; __u32 reserved; }; struct qedr_create_srq_ureq { /* user space virtual address of producer pair */ __aligned_u64 prod_pair_addr; /* user space virtual address of SRQ buffer */ __aligned_u64 srq_addr; /* length of SRQ buffer */ __aligned_u64 srq_len; }; struct qedr_create_srq_uresp { __u16 srq_id; __u16 reserved0; __u32 reserved1; }; #endif /* __QEDR_USER_H__ */ PKW enum { RDMA_NL_RDMA_CM = 1, RDMA_NL_IWCM, RDMA_NL_RSVD, RDMA_NL_LS, /* RDMA Local Services */ RDMA_NL_NLDEV, /* RDMA device interface */ RDMA_NL_NUM_CLIENTS }; enum { RDMA_NL_GROUP_CM = 1, RDMA_NL_GROUP_IWPM, RDMA_NL_GROUP_LS, RDMA_NL_NUM_GROUPS }; #define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10) #define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1)) #define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op) enum { RDMA_NL_RDMA_CM_ID_STATS = 0, RDMA_NL_RDMA_CM_NUM_OPS }; enum { RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1, RDMA_NL_RDMA_CM_ATTR_DST_ADDR, RDMA_NL_RDMA_CM_NUM_ATTR, }; /* iwarp port mapper op-codes */ enum { RDMA_NL_IWPM_REG_PID = 0, RDMA_NL_IWPM_ADD_MAPPING, RDMA_NL_IWPM_QUERY_MAPPING, RDMA_NL_IWPM_REMOVE_MAPPING, RDMA_NL_IWPM_REMOTE_INFO, RDMA_NL_IWPM_HANDLE_ERR, RDMA_NL_IWPM_MAPINFO, RDMA_NL_IWPM_MAPINFO_NUM, RDMA_NL_IWPM_NUM_OPS }; struct rdma_cm_id_stats { __u32 qp_num; __u32 bound_dev_if; __u32 port_space; __s32 pid; __u8 cm_state; __u8 node_type; __u8 port_num; __u8 qp_type; }; enum { IWPM_NLA_REG_PID_UNSPEC = 0, IWPM_NLA_REG_PID_SEQ, IWPM_NLA_REG_IF_NAME, IWPM_NLA_REG_IBDEV_NAME, IWPM_NLA_REG_ULIB_NAME, IWPM_NLA_REG_PID_MAX }; enum { IWPM_NLA_RREG_PID_UNSPEC = 0, IWPM_NLA_RREG_PID_SEQ, IWPM_NLA_RREG_IBDEV_NAME, IWPM_NLA_RREG_ULIB_NAME, IWPM_NLA_RREG_ULIB_VER, IWPM_NLA_RREG_PID_ERR, IWPM_NLA_RREG_PID_MAX }; enum { IWPM_NLA_MANAGE_MAPPING_UNSPEC = 0, IWPM_NLA_MANAGE_MAPPING_SEQ, IWPM_NLA_MANAGE_ADDR, IWPM_NLA_MANAGE_MAPPED_LOC_ADDR, IWPM_NLA_RMANAGE_MAPPING_ERR, IWPM_NLA_RMANAGE_MAPPING_MAX }; #define IWPM_NLA_MANAGE_MAPPING_MAX 3 #define IWPM_NLA_QUERY_MAPPING_MAX 4 #define IWPM_NLA_MAPINFO_SEND_MAX 3 enum { IWPM_NLA_QUERY_MAPPING_UNSPEC = 0, IWPM_NLA_QUERY_MAPPING_SEQ, IWPM_NLA_QUERY_LOCAL_ADDR, IWPM_NLA_QUERY_REMOTE_ADDR, IWPM_NLA_RQUERY_MAPPED_LOC_ADDR, IWPM_NLA_RQUERY_MAPPED_REM_ADDR, IWPM_NLA_RQUERY_MAPPING_ERR, IWPM_NLA_RQUERY_MAPPING_MAX }; enum { IWPM_NLA_MAPINFO_REQ_UNSPEC = 0, IWPM_NLA_MAPINFO_ULIB_NAME, IWPM_NLA_MAPINFO_ULIB_VER, IWPM_NLA_MAPINFO_REQ_MAX }; enum { IWPM_NLA_MAPINFO_UNSPEC = 0, IWPM_NLA_MAPINFO_LOCAL_ADDR, IWPM_NLA_MAPINFO_MAPPED_ADDR, IWPM_NLA_MAPINFO_MAX }; enum { IWPM_NLA_MAPINFO_NUM_UNSPEC = 0, IWPM_NLA_MAPINFO_SEQ, IWPM_NLA_MAPINFO_SEND_NUM, IWPM_NLA_MAPINFO_ACK_NUM, IWPM_NLA_MAPINFO_NUM_MAX }; enum { IWPM_NLA_ERR_UNSPEC = 0, IWPM_NLA_ERR_SEQ, IWPM_NLA_ERR_CODE, IWPM_NLA_ERR_MAX }; /* * Local service operations: * RESOLVE - The client requests the local service to resolve a path. * SET_TIMEOUT - The local service requests the client to set the timeout. * IP_RESOLVE - The client requests the local service to resolve an IP to GID. */ enum { RDMA_NL_LS_OP_RESOLVE = 0, RDMA_NL_LS_OP_SET_TIMEOUT, RDMA_NL_LS_OP_IP_RESOLVE, RDMA_NL_LS_NUM_OPS }; /* Local service netlink message flags */ #define RDMA_NL_LS_F_ERR 0x0100 /* Failed response */ /* * Local service resolve operation family header. * The layout for the resolve operation: * nlmsg header * family header * attributes */ /* * Local service path use: * Specify how the path(s) will be used. * ALL - For connected CM operation (6 pathrecords) * UNIDIRECTIONAL - For unidirectional UD (1 pathrecord) * GMP - For miscellaneous GMP like operation (at least 1 reversible * pathrecord) */ enum { LS_RESOLVE_PATH_USE_ALL = 0, LS_RESOLVE_PATH_USE_UNIDIRECTIONAL, LS_RESOLVE_PATH_USE_GMP, LS_RESOLVE_PATH_USE_MAX }; #define LS_DEVICE_NAME_MAX 64 struct rdma_ls_resolve_header { __u8 device_name[LS_DEVICE_NAME_MAX]; __u8 port_num; __u8 path_use; }; struct rdma_ls_ip_resolve_header { __u32 ifindex; }; /* Local service attribute type */ #define RDMA_NLA_F_MANDATORY (1 << 13) #define RDMA_NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \ RDMA_NLA_F_MANDATORY)) /* * Local service attributes: * Attr Name Size Byte order * ----------------------------------------------------- * PATH_RECORD struct ib_path_rec_data * TIMEOUT u32 cpu * SERVICE_ID u64 cpu * DGID u8[16] BE * SGID u8[16] BE * TCLASS u8 * PKEY u16 cpu * QOS_CLASS u16 cpu * IPV4 u32 BE * IPV6 u8[16] BE */ enum { LS_NLA_TYPE_UNSPEC = 0, LS_NLA_TYPE_PATH_RECORD, LS_NLA_TYPE_TIMEOUT, LS_NLA_TYPE_SERVICE_ID, LS_NLA_TYPE_DGID, LS_NLA_TYPE_SGID, LS_NLA_TYPE_TCLASS, LS_NLA_TYPE_PKEY, LS_NLA_TYPE_QOS_CLASS, LS_NLA_TYPE_IPV4, LS_NLA_TYPE_IPV6, LS_NLA_TYPE_MAX }; /* Local service DGID/SGID attribute: big endian */ struct rdma_nla_ls_gid { __u8 gid[16]; }; enum rdma_nldev_command { RDMA_NLDEV_CMD_UNSPEC, RDMA_NLDEV_CMD_GET, /* can dump */ /* 2 - 4 are free to use */ RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */ /* 6 - 8 are free to use */ RDMA_NLDEV_CMD_RES_GET = 9, /* can dump */ RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */ RDMA_NLDEV_CMD_RES_CM_ID_GET, /* can dump */ RDMA_NLDEV_CMD_RES_CQ_GET, /* can dump */ RDMA_NLDEV_CMD_RES_MR_GET, /* can dump */ RDMA_NLDEV_CMD_RES_PD_GET, /* can dump */ RDMA_NLDEV_NUM_OPS }; enum { RDMA_NLDEV_ATTR_ENTRY_STRLEN = 16, }; enum rdma_nldev_print_type { RDMA_NLDEV_PRINT_TYPE_UNSPEC, RDMA_NLDEV_PRINT_TYPE_HEX, }; enum rdma_nldev_attr { /* don't change the order or add anything between, this is ABI! */ RDMA_NLDEV_ATTR_UNSPEC, /* Pad attribute for 64b alignment */ RDMA_NLDEV_ATTR_PAD = RDMA_NLDEV_ATTR_UNSPEC, /* Identifier for ib_device */ RDMA_NLDEV_ATTR_DEV_INDEX, /* u32 */ RDMA_NLDEV_ATTR_DEV_NAME, /* string */ /* * Device index together with port index are identifiers * for port/link properties. * * For RDMA_NLDEV_CMD_GET commamnd, port index will return number * of available ports in ib_device, while for port specific operations, * it will be real port index as it appears in sysfs. Port index follows * sysfs notation and starts from 1 for the first port. */ RDMA_NLDEV_ATTR_PORT_INDEX, /* u32 */ /* * Device and port capabilities */ RDMA_NLDEV_ATTR_CAP_FLAGS, /* u64 */ /* * FW version */ RDMA_NLDEV_ATTR_FW_VERSION, /* string */ /* * Node GUID (in host byte order) associated with the RDMA device. */ RDMA_NLDEV_ATTR_NODE_GUID, /* u64 */ /* * System image GUID (in host byte order) associated with * this RDMA device and other devices which are part of a * single system. */ RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, /* u64 */ /* * Subnet prefix (in host byte order) */ RDMA_NLDEV_ATTR_SUBNET_PREFIX, /* u64 */ /* * Local Identifier (LID), * According to IB specification, It is 16-bit address assigned * by the Subnet Manager. Extended to be 32-bit for OmniPath users. */ RDMA_NLDEV_ATTR_LID, /* u32 */ RDMA_NLDEV_ATTR_SM_LID, /* u32 */ /* * LID mask control (LMC) */ RDMA_NLDEV_ATTR_LMC, /* u8 */ RDMA_NLDEV_ATTR_PORT_STATE, /* u8 */ RDMA_NLDEV_ATTR_PORT_PHYS_STATE, /* u8 */ RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */ RDMA_NLDEV_ATTR_RES_SUMMARY, /* nested table */ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY, /* nested table */ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, /* string */ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, /* u64 */ RDMA_NLDEV_ATTR_RES_QP, /* nested table */ RDMA_NLDEV_ATTR_RES_QP_ENTRY, /* nested table */ /* * Local QPN */ RDMA_NLDEV_ATTR_RES_LQPN, /* u32 */ /* * Remote QPN, * Applicable for RC and UC only IBTA 11.2.5.3 QUERY QUEUE PAIR */ RDMA_NLDEV_ATTR_RES_RQPN, /* u32 */ /* * Receive Queue PSN, * Applicable for RC and UC only 11.2.5.3 QUERY QUEUE PAIR */ RDMA_NLDEV_ATTR_RES_RQ_PSN, /* u32 */ /* * Send Queue PSN */ RDMA_NLDEV_ATTR_RES_SQ_PSN, /* u32 */ RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, /* u8 */ /* * QP types as visible to RDMA/core, the reserved QPT * are not exported through this interface. */ RDMA_NLDEV_ATTR_RES_TYPE, /* u8 */ RDMA_NLDEV_ATTR_RES_STATE, /* u8 */ /* * Process ID which created object, * in case of kernel origin, PID won't exist. */ RDMA_NLDEV_ATTR_RES_PID, /* u32 */ /* * The name of process created following resource. * It will exist only for kernel objects. * For user created objects, the user is supposed * to read /proc/PID/comm file. */ RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */ RDMA_NLDEV_ATTR_RES_CM_ID, /* nested table */ RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, /* nested table */ /* * rdma_cm_id port space. */ RDMA_NLDEV_ATTR_RES_PS, /* u32 */ /* * Source and destination socket addresses */ RDMA_NLDEV_ATTR_RES_SRC_ADDR, /* __kernel_sockaddr_storage */ RDMA_NLDEV_ATTR_RES_DST_ADDR, /* __kernel_sockaddr_storage */ RDMA_NLDEV_ATTR_RES_CQ, /* nested table */ RDMA_NLDEV_ATTR_RES_CQ_ENTRY, /* nested table */ RDMA_NLDEV_ATTR_RES_CQE, /* u32 */ RDMA_NLDEV_ATTR_RES_USECNT, /* u64 */ RDMA_NLDEV_ATTR_RES_POLL_CTX, /* u8 */ RDMA_NLDEV_ATTR_RES_MR, /* nested table */ RDMA_NLDEV_ATTR_RES_MR_ENTRY, /* nested table */ RDMA_NLDEV_ATTR_RES_RKEY, /* u32 */ RDMA_NLDEV_ATTR_RES_LKEY, /* u32 */ RDMA_NLDEV_ATTR_RES_IOVA, /* u64 */ RDMA_NLDEV_ATTR_RES_MRLEN, /* u64 */ RDMA_NLDEV_ATTR_RES_PD, /* nested table */ RDMA_NLDEV_ATTR_RES_PD_ENTRY, /* nested table */ RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, /* u32 */ RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, /* u32 */ /* * Provides logical name and index of netdevice which is * connected to physical port. This information is relevant * for RoCE and iWARP. * * The netdevices which are associated with containers are * supposed to be exported together with GID table once it * will be exposed through the netlink. Because the * associated netdevices are properties of GIDs. */ RDMA_NLDEV_ATTR_NDEV_INDEX, /* u32 */ RDMA_NLDEV_ATTR_NDEV_NAME, /* string */ /* * driver-specific attributes. */ RDMA_NLDEV_ATTR_DRIVER, /* nested table */ RDMA_NLDEV_ATTR_DRIVER_ENTRY, /* nested table */ RDMA_NLDEV_ATTR_DRIVER_STRING, /* string */ /* * u8 values from enum rdma_nldev_print_type */ RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, /* u8 */ RDMA_NLDEV_ATTR_DRIVER_S32, /* s32 */ RDMA_NLDEV_ATTR_DRIVER_U32, /* u32 */ RDMA_NLDEV_ATTR_DRIVER_S64, /* s64 */ RDMA_NLDEV_ATTR_DRIVER_U64, /* u64 */ /* * Always the end */ RDMA_NLDEV_ATTR_MAX }; #endif /* _RDMA_NETLINK_H */ PKW! mlx4-abi.hnu[/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX4_ABI_USER_H #define MLX4_ABI_USER_H #include /* * Increment this value if any changes that break userspace ABI * compatibility are made. */ #define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3 #define MLX4_IB_UVERBS_ABI_VERSION 4 /* * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). * In particular do not use pointer types -- pass pointers in __u64 * instead. */ struct mlx4_ib_alloc_ucontext_resp_v3 { __u32 qp_tab_size; __u16 bf_reg_size; __u16 bf_regs_per_page; }; enum { MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0, }; struct mlx4_ib_alloc_ucontext_resp { __u32 dev_caps; __u32 qp_tab_size; __u16 bf_reg_size; __u16 bf_regs_per_page; __u32 cqe_size; }; struct mlx4_ib_alloc_pd_resp { __u32 pdn; __u32 reserved; }; struct mlx4_ib_create_cq { __aligned_u64 buf_addr; __aligned_u64 db_addr; }; struct mlx4_ib_create_cq_resp { __u32 cqn; __u32 reserved; }; struct mlx4_ib_resize_cq { __aligned_u64 buf_addr; }; struct mlx4_ib_create_srq { __aligned_u64 buf_addr; __aligned_u64 db_addr; }; struct mlx4_ib_create_srq_resp { __u32 srqn; __u32 reserved; }; struct mlx4_ib_create_qp_rss { __aligned_u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */ __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */ __u8 reserved[7]; __u8 rx_hash_key[40]; __u32 comp_mask; __u32 reserved1; }; struct mlx4_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; __u8 log_sq_bb_count; __u8 log_sq_stride; __u8 sq_no_prefetch; __u8 reserved; __u32 inl_recv_sz; }; struct mlx4_ib_create_wq { __aligned_u64 buf_addr; __aligned_u64 db_addr; __u8 log_range_size; __u8 reserved[3]; __u32 comp_mask; }; struct mlx4_ib_modify_wq { __u32 comp_mask; __u32 reserved; }; struct mlx4_ib_create_rwq_ind_tbl_resp { __u32 response_length; __u32 reserved; }; /* RX Hash function flags */ enum mlx4_ib_rx_hash_function_flags { MLX4_IB_RX_HASH_FUNC_TOEPLITZ = 1 << 0, }; /* * RX Hash flags, these flags allows to set which incoming packet's field should * participates in RX Hash. Each flag represent certain packet's field, * when the flag is set the field that is represented by the flag will * participate in RX Hash calculation. */ enum mlx4_ib_rx_hash_fields { MLX4_IB_RX_HASH_SRC_IPV4 = 1 << 0, MLX4_IB_RX_HASH_DST_IPV4 = 1 << 1, MLX4_IB_RX_HASH_SRC_IPV6 = 1 << 2, MLX4_IB_RX_HASH_DST_IPV6 = 1 << 3, MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4, MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5, MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6, MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7, MLX4_IB_RX_HASH_INNER = 1ULL << 31, }; struct mlx4_ib_rss_caps { __aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */ __u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */ __u8 reserved[7]; }; enum query_device_resp_mask { MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, }; struct mlx4_ib_tso_caps { __u32 max_tso; /* Maximum tso payload size in bytes */ /* Corresponding bit will be set if qp type from * 'enum ib_qp_type' is supported. */ __u32 supported_qpts; }; struct mlx4_uverbs_ex_query_device_resp { __u32 comp_mask; __u32 response_length; __aligned_u64 hca_core_clock_offset; __u32 max_inl_recv_sz; __u32 reserved; struct mlx4_ib_rss_caps rss_caps; struct mlx4_ib_tso_caps tso_caps; }; #endif /* MLX4_ABI_USER_H */ PKW #define C4IW_UVERBS_ABI_VERSION 3 /* * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). * In particular do not use pointer types -- pass pointers in __aligned_u64 * instead. */ enum { C4IW_64B_CQE = (1 << 0) }; struct c4iw_create_cq { __u32 flags; __u32 reserved; }; struct c4iw_create_cq_resp { __aligned_u64 key; __aligned_u64 gts_key; __aligned_u64 memsize; __u32 cqid; __u32 size; __u32 qid_mask; __u32 flags; }; enum { C4IW_QPF_ONCHIP = (1 << 0), C4IW_QPF_WRITE_W_IMM = (1 << 1) }; struct c4iw_create_qp_resp { __aligned_u64 ma_sync_key; __aligned_u64 sq_key; __aligned_u64 rq_key; __aligned_u64 sq_db_gts_key; __aligned_u64 rq_db_gts_key; __aligned_u64 sq_memsize; __aligned_u64 rq_memsize; __u32 sqid; __u32 rqid; __u32 sq_size; __u32 rq_size; __u32 qid_mask; __u32 flags; }; struct c4iw_create_srq_resp { __aligned_u64 srq_key; __aligned_u64 srq_db_gts_key; __aligned_u64 srq_memsize; __u32 srqid; __u32 srq_size; __u32 rqt_abs_idx; __u32 qid_mask; __u32 flags; __u32 reserved; /* explicit padding */ }; enum { /* HW supports SRQ_LIMIT_REACHED event */ T4_SRQ_LIMIT_SUPPORT = 1 << 0, }; struct c4iw_alloc_ucontext_resp { __aligned_u64 status_page_key; __u32 status_page_size; __u32 reserved; /* explicit padding (optional for i386) */ }; struct c4iw_alloc_pd_resp { __u32 pdid; }; #endif /* CXGB4_ABI_USER_H */ PKW #define I40IW_ABI_VER 5 struct i40iw_alloc_ucontext_req { __u32 reserved32; __u8 userspace_ver; __u8 reserved8[3]; }; struct i40iw_alloc_ucontext_resp { __u32 max_pds; /* maximum pds allowed for this user process */ __u32 max_qps; /* maximum qps allowed for this user process */ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */ __u8 kernel_ver; __u8 reserved[3]; }; struct i40iw_alloc_pd_resp { __u32 pd_id; __u8 reserved[4]; }; struct i40iw_create_cq_req { __aligned_u64 user_cq_buffer; __aligned_u64 user_shadow_area; }; struct i40iw_create_qp_req { __aligned_u64 user_wqe_buffers; __aligned_u64 user_compl_ctx; /* UDA QP PHB */ __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */ __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */ }; enum i40iw_memreg_type { IW_MEMREG_TYPE_MEM = 0x0000, IW_MEMREG_TYPE_QP = 0x0001, IW_MEMREG_TYPE_CQ = 0x0002, }; struct i40iw_mem_reg_req { __u16 reg_type; /* Memory, QP or CQ */ __u16 cq_pages; __u16 rq_pages; __u16 sq_pages; }; struct i40iw_create_cq_resp { __u32 cq_id; __u32 cq_size; __u32 mmap_db_index; __u32 reserved; }; struct i40iw_create_qp_resp { __u32 qp_id; __u32 actual_sq_size; __u32 actual_rq_size; __u32 i40iw_drv_opt; __u16 push_idx; __u8 lsmm; __u8 rsvd2; }; #endif PKW #include #define IB_USER_CM_ABI_VERSION 5 enum { IB_USER_CM_CMD_CREATE_ID, IB_USER_CM_CMD_DESTROY_ID, IB_USER_CM_CMD_ATTR_ID, IB_USER_CM_CMD_LISTEN, IB_USER_CM_CMD_NOTIFY, IB_USER_CM_CMD_SEND_REQ, IB_USER_CM_CMD_SEND_REP, IB_USER_CM_CMD_SEND_RTU, IB_USER_CM_CMD_SEND_DREQ, IB_USER_CM_CMD_SEND_DREP, IB_USER_CM_CMD_SEND_REJ, IB_USER_CM_CMD_SEND_MRA, IB_USER_CM_CMD_SEND_LAP, IB_USER_CM_CMD_SEND_APR, IB_USER_CM_CMD_SEND_SIDR_REQ, IB_USER_CM_CMD_SEND_SIDR_REP, IB_USER_CM_CMD_EVENT, IB_USER_CM_CMD_INIT_QP_ATTR, }; /* * command ABI structures. */ struct ib_ucm_cmd_hdr { __u32 cmd; __u16 in; __u16 out; }; struct ib_ucm_create_id { __aligned_u64 uid; __aligned_u64 response; }; struct ib_ucm_create_id_resp { __u32 id; }; struct ib_ucm_destroy_id { __aligned_u64 response; __u32 id; __u32 reserved; }; struct ib_ucm_destroy_id_resp { __u32 events_reported; }; struct ib_ucm_attr_id { __aligned_u64 response; __u32 id; __u32 reserved; }; struct ib_ucm_attr_id_resp { __be64 service_id; __be64 service_mask; __be32 local_id; __be32 remote_id; }; struct ib_ucm_init_qp_attr { __aligned_u64 response; __u32 id; __u32 qp_state; }; struct ib_ucm_listen { __be64 service_id; __be64 service_mask; __u32 id; __u32 reserved; }; struct ib_ucm_notify { __u32 id; __u32 event; }; struct ib_ucm_private_data { __aligned_u64 data; __u32 id; __u8 len; __u8 reserved[3]; }; struct ib_ucm_req { __u32 id; __u32 qpn; __u32 qp_type; __u32 psn; __be64 sid; __aligned_u64 data; __aligned_u64 primary_path; __aligned_u64 alternate_path; __u8 len; __u8 peer_to_peer; __u8 responder_resources; __u8 initiator_depth; __u8 remote_cm_response_timeout; __u8 flow_control; __u8 local_cm_response_timeout; __u8 retry_count; __u8 rnr_retry_count; __u8 max_cm_retries; __u8 srq; __u8 reserved[5]; }; struct ib_ucm_rep { __aligned_u64 uid; __aligned_u64 data; __u32 id; __u32 qpn; __u32 psn; __u8 len; __u8 responder_resources; __u8 initiator_depth; __u8 target_ack_delay; __u8 failover_accepted; __u8 flow_control; __u8 rnr_retry_count; __u8 srq; __u8 reserved[4]; }; struct ib_ucm_info { __u32 id; __u32 status; __aligned_u64 info; __aligned_u64 data; __u8 info_len; __u8 data_len; __u8 reserved[6]; }; struct ib_ucm_mra { __aligned_u64 data; __u32 id; __u8 len; __u8 timeout; __u8 reserved[2]; }; struct ib_ucm_lap { __aligned_u64 path; __aligned_u64 data; __u32 id; __u8 len; __u8 reserved[3]; }; struct ib_ucm_sidr_req { __u32 id; __u32 timeout; __be64 sid; __aligned_u64 data; __aligned_u64 path; __u16 reserved_pkey; __u8 len; __u8 max_cm_retries; __u8 reserved[4]; }; struct ib_ucm_sidr_rep { __u32 id; __u32 qpn; __u32 qkey; __u32 status; __aligned_u64 info; __aligned_u64 data; __u8 info_len; __u8 data_len; __u8 reserved[6]; }; /* * event notification ABI structures. */ struct ib_ucm_event_get { __aligned_u64 response; __aligned_u64 data; __aligned_u64 info; __u8 data_len; __u8 info_len; __u8 reserved[6]; }; struct ib_ucm_req_event_resp { struct ib_user_path_rec primary_path; struct ib_user_path_rec alternate_path; __be64 remote_ca_guid; __u32 remote_qkey; __u32 remote_qpn; __u32 qp_type; __u32 starting_psn; __u8 responder_resources; __u8 initiator_depth; __u8 local_cm_response_timeout; __u8 flow_control; __u8 remote_cm_response_timeout; __u8 retry_count; __u8 rnr_retry_count; __u8 srq; __u8 port; __u8 reserved[7]; }; struct ib_ucm_rep_event_resp { __be64 remote_ca_guid; __u32 remote_qkey; __u32 remote_qpn; __u32 starting_psn; __u8 responder_resources; __u8 initiator_depth; __u8 target_ack_delay; __u8 failover_accepted; __u8 flow_control; __u8 rnr_retry_count; __u8 srq; __u8 reserved[5]; }; struct ib_ucm_rej_event_resp { __u32 reason; /* ari in ib_ucm_event_get info field. */ }; struct ib_ucm_mra_event_resp { __u8 timeout; __u8 reserved[3]; }; struct ib_ucm_lap_event_resp { struct ib_user_path_rec path; }; struct ib_ucm_apr_event_resp { __u32 status; /* apr info in ib_ucm_event_get info field. */ }; struct ib_ucm_sidr_req_event_resp { __u16 pkey; __u8 port; __u8 reserved; }; struct ib_ucm_sidr_rep_event_resp { __u32 status; __u32 qkey; __u32 qpn; /* info in ib_ucm_event_get info field. */ }; #define IB_UCM_PRES_DATA 0x01 #define IB_UCM_PRES_INFO 0x02 #define IB_UCM_PRES_PRIMARY 0x04 #define IB_UCM_PRES_ALTERNATE 0x08 struct ib_ucm_event_resp { __aligned_u64 uid; __u32 id; __u32 event; __u32 present; __u32 reserved; union { struct ib_ucm_req_event_resp req_resp; struct ib_ucm_rep_event_resp rep_resp; struct ib_ucm_rej_event_resp rej_resp; struct ib_ucm_mra_event_resp mra_resp; struct ib_ucm_lap_event_resp lap_resp; struct ib_ucm_apr_event_resp apr_resp; struct ib_ucm_sidr_req_event_resp sidr_req_resp; struct ib_ucm_sidr_rep_event_resp sidr_rep_resp; __u32 send_status; } u; }; #endif /* IB_USER_CM_H */ PKW #include #include #include union rxe_gid { __u8 raw[16]; struct { __be64 subnet_prefix; __be64 interface_id; } global; }; struct rxe_global_route { union rxe_gid dgid; __u32 flow_label; __u8 sgid_index; __u8 hop_limit; __u8 traffic_class; }; struct rxe_av { __u8 port_num; __u8 network_type; __u16 reserved1; __u32 reserved2; struct rxe_global_route grh; union { struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid_addr, dgid_addr; }; struct rxe_send_wr { __aligned_u64 wr_id; __u32 num_sge; __u32 opcode; __u32 send_flags; union { __be32 imm_data; __u32 invalidate_rkey; } ex; union { struct { __aligned_u64 remote_addr; __u32 rkey; __u32 reserved; } rdma; struct { __aligned_u64 remote_addr; __aligned_u64 compare_add; __aligned_u64 swap; __u32 rkey; __u32 reserved; } atomic; struct { __u32 remote_qpn; __u32 remote_qkey; __u16 pkey_index; } ud; /* reg is only used by the kernel and is not part of the uapi */ struct { union { struct ib_mr *mr; __aligned_u64 reserved; }; __u32 key; __u32 access; } reg; } wr; }; struct rxe_sge { __aligned_u64 addr; __u32 length; __u32 lkey; }; struct mminfo { __aligned_u64 offset; __u32 size; __u32 pad; }; struct rxe_dma_info { __u32 length; __u32 resid; __u32 cur_sge; __u32 num_sge; __u32 sge_offset; __u32 reserved; union { __u8 inline_data[0]; struct rxe_sge sge[0]; }; }; struct rxe_send_wqe { struct rxe_send_wr wr; struct rxe_av av; __u32 status; __u32 state; __aligned_u64 iova; __u32 mask; __u32 first_psn; __u32 last_psn; __u32 ack_length; __u32 ssn; __u32 has_rd_atomic; struct rxe_dma_info dma; }; struct rxe_recv_wqe { __aligned_u64 wr_id; __u32 num_sge; __u32 padding; struct rxe_dma_info dma; }; struct rxe_create_cq_resp { struct mminfo mi; }; struct rxe_resize_cq_resp { struct mminfo mi; }; struct rxe_create_qp_resp { struct mminfo rq_mi; struct mminfo sq_mi; }; struct rxe_create_srq_resp { struct mminfo mi; __u32 srq_num; __u32 reserved; }; struct rxe_modify_srq_cmd { __aligned_u64 mmap_info_addr; }; #endif /* RDMA_USER_RXE_H */ PKW #define OCRDMA_ABI_VERSION 2 #define OCRDMA_BE_ROCE_ABI_VERSION 1 /* user kernel communication data structures. */ struct ocrdma_alloc_ucontext_resp { __u32 dev_id; __u32 wqe_size; __u32 max_inline_data; __u32 dpp_wqe_size; __aligned_u64 ah_tbl_page; __u32 ah_tbl_len; __u32 rqe_size; __u8 fw_ver[32]; /* for future use/new features in progress */ __aligned_u64 rsvd1; __aligned_u64 rsvd2; }; struct ocrdma_alloc_pd_ureq { __u32 rsvd[2]; }; struct ocrdma_alloc_pd_uresp { __u32 id; __u32 dpp_enabled; __u32 dpp_page_addr_hi; __u32 dpp_page_addr_lo; __u32 rsvd[2]; }; struct ocrdma_create_cq_ureq { __u32 dpp_cq; __u32 rsvd; /* pad */ }; #define MAX_CQ_PAGES 8 struct ocrdma_create_cq_uresp { __u32 cq_id; __u32 page_size; __u32 num_pages; __u32 max_hw_cqe; __aligned_u64 page_addr[MAX_CQ_PAGES]; __aligned_u64 db_page_addr; __u32 db_page_size; __u32 phase_change; /* for future use/new features in progress */ __aligned_u64 rsvd1; __aligned_u64 rsvd2; }; #define MAX_QP_PAGES 8 #define MAX_UD_AV_PAGES 8 struct ocrdma_create_qp_ureq { __u8 enable_dpp_cq; __u8 rsvd; __u16 dpp_cq_id; __u32 rsvd1; /* pad */ }; struct ocrdma_create_qp_uresp { __u16 qp_id; __u16 sq_dbid; __u16 rq_dbid; __u16 resv0; /* pad */ __u32 sq_page_size; __u32 rq_page_size; __u32 num_sq_pages; __u32 num_rq_pages; __aligned_u64 sq_page_addr[MAX_QP_PAGES]; __aligned_u64 rq_page_addr[MAX_QP_PAGES]; __aligned_u64 db_page_addr; __u32 db_page_size; __u32 dpp_credit; __u32 dpp_offset; __u32 num_wqe_allocated; __u32 num_rqe_allocated; __u32 db_sq_offset; __u32 db_rq_offset; __u32 db_shift; __aligned_u64 rsvd[11]; }; struct ocrdma_create_srq_uresp { __u16 rq_dbid; __u16 resv0; /* pad */ __u32 resv1; __u32 rq_page_size; __u32 num_rq_pages; __aligned_u64 rq_page_addr[MAX_QP_PAGES]; __aligned_u64 db_page_addr; __u32 db_page_size; __u32 num_rqe_allocated; __u32 db_rq_offset; __u32 db_shift; __aligned_u64 rsvd2; __aligned_u64 rsvd3; }; #endif /* OCRDMA_ABI_USER_H */ PKW #define BNXT_RE_ABI_VERSION 1 struct bnxt_re_uctx_resp { __u32 dev_id; __u32 max_qp; __u32 pg_size; __u32 cqe_sz; __u32 max_cqd; __u32 rsvd; }; /* * This struct is placed after the ib_uverbs_alloc_pd_resp struct, which is * not 8 byted aligned. To avoid undesired padding in various cases we have to * set this struct to packed. */ struct bnxt_re_pd_resp { __u32 pdid; __u32 dpi; __u64 dbr; } __attribute__((packed, aligned(4))); struct bnxt_re_cq_req { __aligned_u64 cq_va; __aligned_u64 cq_handle; }; struct bnxt_re_cq_resp { __u32 cqid; __u32 tail; __u32 phase; __u32 rsvd; }; struct bnxt_re_qp_req { __aligned_u64 qpsva; __aligned_u64 qprva; __aligned_u64 qp_handle; }; struct bnxt_re_qp_resp { __u32 qpid; __u32 rsvd; }; struct bnxt_re_srq_req { __aligned_u64 srqva; __aligned_u64 srq_handle; }; struct bnxt_re_srq_resp { __u32 srqid; }; enum bnxt_re_shpg_offt { BNXT_RE_BEG_RESV_OFFT = 0x00, BNXT_RE_AVID_OFFT = 0x10, BNXT_RE_AVID_SIZE = 0x04, BNXT_RE_END_RESV_OFFT = 0xFF0 }; #endif /* __BNXT_RE_UVERBS_ABI_H__*/ PKW struct hns_roce_ib_create_cq { __aligned_u64 buf_addr; __aligned_u64 db_addr; }; struct hns_roce_ib_create_cq_resp { __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */ __aligned_u64 cap_flags; }; struct hns_roce_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; __u8 log_sq_bb_count; __u8 log_sq_stride; __u8 sq_no_prefetch; __u8 reserved[5]; __aligned_u64 sdb_addr; }; struct hns_roce_ib_create_qp_resp { __aligned_u64 cap_flags; }; struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 reserved; }; struct hns_roce_ib_alloc_pd_resp { __u32 pdn; }; #endif /* HNS_ABI_USER_H */ PKW #include /* * Increment this value if any changes that break userspace ABI * compatibility are made. */ #define IB_USER_MAD_ABI_VERSION 5 /* * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). */ /** * ib_user_mad_hdr_old - Old version of MAD packet header without pkey_index * @id - ID of agent MAD received with/to be sent with * @status - 0 on successful receive, ETIMEDOUT if no response * received (transaction ID in data[] will be set to TID of original * request) (ignored on send) * @timeout_ms - Milliseconds to wait for response (unset on receive) * @retries - Number of automatic retries to attempt * @qpn - Remote QP number received from/to be sent to * @qkey - Remote Q_Key to be sent with (unset on receive) * @lid - Remote lid received from/to be sent to * @sl - Service level received with/to be sent with * @path_bits - Local path bits received with/to be sent with * @grh_present - If set, GRH was received/should be sent * @gid_index - Local GID index to send with (unset on receive) * @hop_limit - Hop limit in GRH * @traffic_class - Traffic class in GRH * @gid - Remote GID in GRH * @flow_label - Flow label in GRH */ struct ib_user_mad_hdr_old { __u32 id; __u32 status; __u32 timeout_ms; __u32 retries; __u32 length; __be32 qpn; __be32 qkey; __be16 lid; __u8 sl; __u8 path_bits; __u8 grh_present; __u8 gid_index; __u8 hop_limit; __u8 traffic_class; __u8 gid[16]; __be32 flow_label; }; /** * ib_user_mad_hdr - MAD packet header * This layout allows specifying/receiving the P_Key index. To use * this capability, an application must call the * IB_USER_MAD_ENABLE_PKEY ioctl on the user MAD file handle before * any other actions with the file handle. * @id - ID of agent MAD received with/to be sent with * @status - 0 on successful receive, ETIMEDOUT if no response * received (transaction ID in data[] will be set to TID of original * request) (ignored on send) * @timeout_ms - Milliseconds to wait for response (unset on receive) * @retries - Number of automatic retries to attempt * @qpn - Remote QP number received from/to be sent to * @qkey - Remote Q_Key to be sent with (unset on receive) * @lid - Remote lid received from/to be sent to * @sl - Service level received with/to be sent with * @path_bits - Local path bits received with/to be sent with * @grh_present - If set, GRH was received/should be sent * @gid_index - Local GID index to send with (unset on receive) * @hop_limit - Hop limit in GRH * @traffic_class - Traffic class in GRH * @gid - Remote GID in GRH * @flow_label - Flow label in GRH * @pkey_index - P_Key index */ struct ib_user_mad_hdr { __u32 id; __u32 status; __u32 timeout_ms; __u32 retries; __u32 length; __be32 qpn; __be32 qkey; __be16 lid; __u8 sl; __u8 path_bits; __u8 grh_present; __u8 gid_index; __u8 hop_limit; __u8 traffic_class; __u8 gid[16]; __be32 flow_label; __u16 pkey_index; __u8 reserved[6]; }; /** * ib_user_mad - MAD packet * @hdr - MAD packet header * @data - Contents of MAD * */ struct ib_user_mad { struct ib_user_mad_hdr hdr; __aligned_u64 data[0]; }; /* * Earlier versions of this interface definition declared the * method_mask[] member as an array of __u32 but treated it as a * bitmap made up of longs in the kernel. This ambiguity meant that * 32-bit big-endian applications that can run on both 32-bit and * 64-bit kernels had no consistent ABI to rely on, and 64-bit * big-endian applications that treated method_mask as being made up * of 32-bit words would have their bitmap misinterpreted. * * To clear up this confusion, we change the declaration of * method_mask[] to use unsigned long and handle the conversion from * 32-bit userspace to 64-bit kernel for big-endian systems in the * compat_ioctl method. Unfortunately, to keep the structure layout * the same, we need the method_mask[] array to be aligned only to 4 * bytes even when long is 64 bits, which forces us into this ugly * typedef. */ typedef unsigned long __attribute__((aligned(4))) packed_ulong; #define IB_USER_MAD_LONGS_PER_METHOD_MASK (128 / (8 * sizeof (long))) /** * ib_user_mad_reg_req - MAD registration request * @id - Set by the kernel; used to identify agent in future requests. * @qpn - Queue pair number; must be 0 or 1. * @method_mask - The caller will receive unsolicited MADs for any method * where @method_mask = 1. * @mgmt_class - Indicates which management class of MADs should be receive * by the caller. This field is only required if the user wishes to * receive unsolicited MADs, otherwise it should be 0. * @mgmt_class_version - Indicates which version of MADs for the given * management class to receive. * @oui: Indicates IEEE OUI when mgmt_class is a vendor class * in the range from 0x30 to 0x4f. Otherwise not used. * @rmpp_version: If set, indicates the RMPP version used. * */ struct ib_user_mad_reg_req { __u32 id; packed_ulong method_mask[IB_USER_MAD_LONGS_PER_METHOD_MASK]; __u8 qpn; __u8 mgmt_class; __u8 mgmt_class_version; __u8 oui[3]; __u8 rmpp_version; }; /** * ib_user_mad_reg_req2 - MAD registration request * * @id - Set by the _kernel_; used by userspace to identify the * registered agent in future requests. * @qpn - Queue pair number; must be 0 or 1. * @mgmt_class - Indicates which management class of MADs should be * receive by the caller. This field is only required if * the user wishes to receive unsolicited MADs, otherwise * it should be 0. * @mgmt_class_version - Indicates which version of MADs for the given * management class to receive. * @res - Ignored. * @flags - additional registration flags; Must be in the set of * flags defined in IB_USER_MAD_REG_FLAGS_CAP * @method_mask - The caller wishes to receive unsolicited MADs for the * methods whose bit(s) is(are) set. * @oui - Indicates IEEE OUI to use when mgmt_class is a vendor * class in the range from 0x30 to 0x4f. Otherwise not * used. * @rmpp_version - If set, indicates the RMPP version to use. */ enum { IB_USER_MAD_USER_RMPP = (1 << 0), }; #define IB_USER_MAD_REG_FLAGS_CAP (IB_USER_MAD_USER_RMPP) struct ib_user_mad_reg_req2 { __u32 id; __u32 qpn; __u8 mgmt_class; __u8 mgmt_class_version; __u16 res; __u32 flags; __aligned_u64 method_mask[2]; __u32 oui; __u8 rmpp_version; __u8 reserved[3]; }; #endif /* IB_USER_MAD_H */ PKW #define IWCH_UVERBS_ABI_VERSION 1 /* * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). * In particular do not use pointer types -- pass pointers in __aligned_u64 * instead. */ struct iwch_create_cq_req { __aligned_u64 user_rptr_addr; }; struct iwch_create_cq_resp_v0 { __aligned_u64 key; __u32 cqid; __u32 size_log2; }; struct iwch_create_cq_resp { __aligned_u64 key; __u32 cqid; __u32 size_log2; __u32 memsize; __u32 reserved; }; struct iwch_create_qp_resp { __aligned_u64 key; __aligned_u64 db_key; __u32 qpid; __u32 size_log2; __u32 sq_size_log2; __u32 rq_size_log2; }; struct iwch_reg_user_mr_resp { __u32 pbl_addr; }; struct iwch_alloc_pd_resp { __u32 pdid; }; #endif /* CXGB3_ABI_USER_H */ PKW #ifndef RDMA_UAPI_PTR #define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name #endif enum ib_uverbs_access_flags { IB_UVERBS_ACCESS_LOCAL_WRITE = 1 << 0, IB_UVERBS_ACCESS_REMOTE_WRITE = 1 << 1, IB_UVERBS_ACCESS_REMOTE_READ = 1 << 2, IB_UVERBS_ACCESS_REMOTE_ATOMIC = 1 << 3, IB_UVERBS_ACCESS_MW_BIND = 1 << 4, IB_UVERBS_ACCESS_ZERO_BASED = 1 << 5, IB_UVERBS_ACCESS_ON_DEMAND = 1 << 6, IB_UVERBS_ACCESS_HUGETLB = 1 << 7, }; enum ib_uverbs_query_port_cap_flags { IB_UVERBS_PCF_SM = 1 << 1, IB_UVERBS_PCF_NOTICE_SUP = 1 << 2, IB_UVERBS_PCF_TRAP_SUP = 1 << 3, IB_UVERBS_PCF_OPT_IPD_SUP = 1 << 4, IB_UVERBS_PCF_AUTO_MIGR_SUP = 1 << 5, IB_UVERBS_PCF_SL_MAP_SUP = 1 << 6, IB_UVERBS_PCF_MKEY_NVRAM = 1 << 7, IB_UVERBS_PCF_PKEY_NVRAM = 1 << 8, IB_UVERBS_PCF_LED_INFO_SUP = 1 << 9, IB_UVERBS_PCF_SM_DISABLED = 1 << 10, IB_UVERBS_PCF_SYS_IMAGE_GUID_SUP = 1 << 11, IB_UVERBS_PCF_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, IB_UVERBS_PCF_EXTENDED_SPEEDS_SUP = 1 << 14, IB_UVERBS_PCF_CM_SUP = 1 << 16, IB_UVERBS_PCF_SNMP_TUNNEL_SUP = 1 << 17, IB_UVERBS_PCF_REINIT_SUP = 1 << 18, IB_UVERBS_PCF_DEVICE_MGMT_SUP = 1 << 19, IB_UVERBS_PCF_VENDOR_CLASS_SUP = 1 << 20, IB_UVERBS_PCF_DR_NOTICE_SUP = 1 << 21, IB_UVERBS_PCF_CAP_MASK_NOTICE_SUP = 1 << 22, IB_UVERBS_PCF_BOOT_MGMT_SUP = 1 << 23, IB_UVERBS_PCF_LINK_LATENCY_SUP = 1 << 24, IB_UVERBS_PCF_CLIENT_REG_SUP = 1 << 25, /* * IsOtherLocalChangesNoticeSupported is aliased by IP_BASED_GIDS and * is inaccessible */ IB_UVERBS_PCF_LINK_SPEED_WIDTH_TABLE_SUP = 1 << 27, IB_UVERBS_PCF_VENDOR_SPECIFIC_MADS_TABLE_SUP = 1 << 28, IB_UVERBS_PCF_MCAST_PKEY_TRAP_SUPPRESSION_SUP = 1 << 29, IB_UVERBS_PCF_MCAST_FDB_TOP_SUP = 1 << 30, IB_UVERBS_PCF_HIERARCHY_INFO_SUP = 1ULL << 31, /* NOTE this is an internal flag, not an IBA flag */ IB_UVERBS_PCF_IP_BASED_GIDS = 1 << 26, }; enum ib_uverbs_query_port_flags { IB_UVERBS_QPF_GRH_REQUIRED = 1 << 0, }; enum ib_uverbs_flow_action_esp_keymat { IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM, }; enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo { IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ, }; struct ib_uverbs_flow_action_esp_keymat_aes_gcm { __aligned_u64 iv; __u32 iv_algo; /* Use enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo */ __u32 salt; __u32 icv_len; __u32 key_len; __u32 aes_key[256 / 32]; }; enum ib_uverbs_flow_action_esp_replay { IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE, IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP, }; struct ib_uverbs_flow_action_esp_replay_bmp { __u32 size; }; enum ib_uverbs_flow_action_esp_flags { IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO = 0UL << 0, /* Default */ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD = 1UL << 0, IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL = 0UL << 1, /* Default */ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT = 1UL << 1, IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT = 0UL << 2, /* Default */ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT = 1UL << 2, IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW = 1UL << 3, }; struct ib_uverbs_flow_action_esp_encap { /* This struct represents a list of pointers to flow_xxxx_filter that * encapsulates the payload in ESP tunnel mode. */ RDMA_UAPI_PTR(void *, val_ptr); /* pointer to a flow_xxxx_filter */ RDMA_UAPI_PTR(struct ib_uverbs_flow_action_esp_encap *, next_ptr); __u16 len; /* Len of the filter struct val_ptr points to */ __u16 type; /* Use flow_spec_type enum */ }; struct ib_uverbs_flow_action_esp { __u32 spi; __u32 seq; __u32 tfc_pad; __u32 flags; __aligned_u64 hard_limit_pkts; }; enum ib_uverbs_read_counters_flags { /* prefer read values from driver cache */ IB_UVERBS_READ_COUNTERS_PREFER_CACHED = 1 << 0, }; #endif PKW #define NES_ABI_USERSPACE_VER 2 #define NES_ABI_KERNEL_VER 2 /* * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). * In particular do not use pointer types -- pass pointers in __u64 * instead. */ struct nes_alloc_ucontext_req { __u32 reserved32; __u8 userspace_ver; __u8 reserved8[3]; }; struct nes_alloc_ucontext_resp { __u32 max_pds; /* maximum pds allowed for this user process */ __u32 max_qps; /* maximum qps allowed for this user process */ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */ __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */ __u8 kernel_ver; __u8 reserved[2]; }; struct nes_alloc_pd_resp { __u32 pd_id; __u32 mmap_db_index; }; struct nes_create_cq_req { __aligned_u64 user_cq_buffer; __u32 mcrqf; __u8 reserved[4]; }; struct nes_create_qp_req { __aligned_u64 user_wqe_buffers; __aligned_u64 user_qp_buffer; }; enum iwnes_memreg_type { IWNES_MEMREG_TYPE_MEM = 0x0000, IWNES_MEMREG_TYPE_QP = 0x0001, IWNES_MEMREG_TYPE_CQ = 0x0002, IWNES_MEMREG_TYPE_MW = 0x0003, IWNES_MEMREG_TYPE_FMR = 0x0004, IWNES_MEMREG_TYPE_FMEM = 0x0005, }; struct nes_mem_reg_req { __u32 reg_type; /* indicates if id is memory, QP or CQ */ __u32 reserved; }; struct nes_create_cq_resp { __u32 cq_id; __u32 cq_size; __u32 mmap_db_index; __u32 reserved; }; struct nes_create_qp_resp { __u32 qp_id; __u32 actual_sq_size; __u32 actual_rq_size; __u32 mmap_sq_db_index; __u32 mmap_rq_db_index; __u32 nes_drv_opt; }; #endif /* NES_ABI_USER_H */ PKW /* * Increment this value if any changes that break userspace ABI * compatibility are made. */ #define IB_USER_VERBS_ABI_VERSION 6 #define IB_USER_VERBS_CMD_THRESHOLD 50 enum { IB_USER_VERBS_CMD_GET_CONTEXT, IB_USER_VERBS_CMD_QUERY_DEVICE, IB_USER_VERBS_CMD_QUERY_PORT, IB_USER_VERBS_CMD_ALLOC_PD, IB_USER_VERBS_CMD_DEALLOC_PD, IB_USER_VERBS_CMD_CREATE_AH, IB_USER_VERBS_CMD_MODIFY_AH, IB_USER_VERBS_CMD_QUERY_AH, IB_USER_VERBS_CMD_DESTROY_AH, IB_USER_VERBS_CMD_REG_MR, IB_USER_VERBS_CMD_REG_SMR, IB_USER_VERBS_CMD_REREG_MR, IB_USER_VERBS_CMD_QUERY_MR, IB_USER_VERBS_CMD_DEREG_MR, IB_USER_VERBS_CMD_ALLOC_MW, IB_USER_VERBS_CMD_BIND_MW, IB_USER_VERBS_CMD_DEALLOC_MW, IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL, IB_USER_VERBS_CMD_CREATE_CQ, IB_USER_VERBS_CMD_RESIZE_CQ, IB_USER_VERBS_CMD_DESTROY_CQ, IB_USER_VERBS_CMD_POLL_CQ, IB_USER_VERBS_CMD_PEEK_CQ, IB_USER_VERBS_CMD_REQ_NOTIFY_CQ, IB_USER_VERBS_CMD_CREATE_QP, IB_USER_VERBS_CMD_QUERY_QP, IB_USER_VERBS_CMD_MODIFY_QP, IB_USER_VERBS_CMD_DESTROY_QP, IB_USER_VERBS_CMD_POST_SEND, IB_USER_VERBS_CMD_POST_RECV, IB_USER_VERBS_CMD_ATTACH_MCAST, IB_USER_VERBS_CMD_DETACH_MCAST, IB_USER_VERBS_CMD_CREATE_SRQ, IB_USER_VERBS_CMD_MODIFY_SRQ, IB_USER_VERBS_CMD_QUERY_SRQ, IB_USER_VERBS_CMD_DESTROY_SRQ, IB_USER_VERBS_CMD_POST_SRQ_RECV, IB_USER_VERBS_CMD_OPEN_XRCD, IB_USER_VERBS_CMD_CLOSE_XRCD, IB_USER_VERBS_CMD_CREATE_XSRQ, IB_USER_VERBS_CMD_OPEN_QP, }; enum { IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ, IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP, IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP, IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, IB_USER_VERBS_EX_CMD_DESTROY_FLOW, IB_USER_VERBS_EX_CMD_CREATE_WQ, IB_USER_VERBS_EX_CMD_MODIFY_WQ, IB_USER_VERBS_EX_CMD_DESTROY_WQ, IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL, IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL, IB_USER_VERBS_EX_CMD_MODIFY_CQ }; /* * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). * Specifically: * - Do not use pointer types -- pass pointers in __u64 instead. * - Make sure that any structure larger than 4 bytes is padded to a * multiple of 8 bytes. Otherwise the structure size will be * different between 32-bit and 64-bit architectures. */ struct ib_uverbs_async_event_desc { __aligned_u64 element; __u32 event_type; /* enum ib_event_type */ __u32 reserved; }; struct ib_uverbs_comp_event_desc { __aligned_u64 cq_handle; }; struct ib_uverbs_cq_moderation_caps { __u16 max_cq_moderation_count; __u16 max_cq_moderation_period; __u32 reserved; }; /* * All commands from userspace should start with a __u32 command field * followed by __u16 in_words and out_words fields (which give the * length of the command block and response buffer if any in 32-bit * words). The kernel driver will read these fields first and read * the rest of the command struct based on these value. */ #define IB_USER_VERBS_CMD_COMMAND_MASK 0xff #define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80000000u struct ib_uverbs_cmd_hdr { __u32 command; __u16 in_words; __u16 out_words; }; struct ib_uverbs_ex_cmd_hdr { __aligned_u64 response; __u16 provider_in_words; __u16 provider_out_words; __u32 cmd_hdr_reserved; }; struct ib_uverbs_get_context { __aligned_u64 response; __aligned_u64 driver_data[0]; }; struct ib_uverbs_get_context_resp { __u32 async_fd; __u32 num_comp_vectors; }; struct ib_uverbs_query_device { __aligned_u64 response; __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_device_resp { __aligned_u64 fw_ver; __be64 node_guid; __be64 sys_image_guid; __aligned_u64 max_mr_size; __aligned_u64 page_size_cap; __u32 vendor_id; __u32 vendor_part_id; __u32 hw_ver; __u32 max_qp; __u32 max_qp_wr; __u32 device_cap_flags; __u32 max_sge; __u32 max_sge_rd; __u32 max_cq; __u32 max_cqe; __u32 max_mr; __u32 max_pd; __u32 max_qp_rd_atom; __u32 max_ee_rd_atom; __u32 max_res_rd_atom; __u32 max_qp_init_rd_atom; __u32 max_ee_init_rd_atom; __u32 atomic_cap; __u32 max_ee; __u32 max_rdd; __u32 max_mw; __u32 max_raw_ipv6_qp; __u32 max_raw_ethy_qp; __u32 max_mcast_grp; __u32 max_mcast_qp_attach; __u32 max_total_mcast_qp_attach; __u32 max_ah; __u32 max_fmr; __u32 max_map_per_fmr; __u32 max_srq; __u32 max_srq_wr; __u32 max_srq_sge; __u16 max_pkeys; __u8 local_ca_ack_delay; __u8 phys_port_cnt; __u8 reserved[4]; }; struct ib_uverbs_ex_query_device { __u32 comp_mask; __u32 reserved; }; struct ib_uverbs_odp_caps { __aligned_u64 general_caps; struct { __u32 rc_odp_caps; __u32 uc_odp_caps; __u32 ud_odp_caps; } per_transport_caps; __u32 reserved; }; struct ib_uverbs_rss_caps { /* Corresponding bit will be set if qp type from * 'enum ib_qp_type' is supported, e.g. * supported_qpts |= 1 << IB_QPT_UD */ __u32 supported_qpts; __u32 max_rwq_indirection_tables; __u32 max_rwq_indirection_table_size; __u32 reserved; }; struct ib_uverbs_tm_caps { /* Max size of rendezvous request message */ __u32 max_rndv_hdr_size; /* Max number of entries in tag matching list */ __u32 max_num_tags; /* TM flags */ __u32 flags; /* Max number of outstanding list operations */ __u32 max_ops; /* Max number of SGE in tag matching entry */ __u32 max_sge; __u32 reserved; }; struct ib_uverbs_ex_query_device_resp { struct ib_uverbs_query_device_resp base; __u32 comp_mask; __u32 response_length; struct ib_uverbs_odp_caps odp_caps; __aligned_u64 timestamp_mask; __aligned_u64 hca_core_clock; /* in KHZ */ __aligned_u64 device_cap_flags_ex; struct ib_uverbs_rss_caps rss_caps; __u32 max_wq_type_rq; __u32 raw_packet_caps; struct ib_uverbs_tm_caps tm_caps; struct ib_uverbs_cq_moderation_caps cq_moderation_caps; __aligned_u64 max_dm_size; }; struct ib_uverbs_query_port { __aligned_u64 response; __u8 port_num; __u8 reserved[7]; __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_port_resp { __u32 port_cap_flags; /* see ib_uverbs_query_port_cap_flags */ __u32 max_msg_sz; __u32 bad_pkey_cntr; __u32 qkey_viol_cntr; __u32 gid_tbl_len; __u16 pkey_tbl_len; __u16 lid; __u16 sm_lid; __u8 state; __u8 max_mtu; __u8 active_mtu; __u8 lmc; __u8 max_vl_num; __u8 sm_sl; __u8 subnet_timeout; __u8 init_type_reply; __u8 active_width; __u8 active_speed; __u8 phys_state; __u8 link_layer; __u8 flags; /* see ib_uverbs_query_port_flags */ __u8 reserved; }; struct ib_uverbs_alloc_pd { __aligned_u64 response; __aligned_u64 driver_data[0]; }; struct ib_uverbs_alloc_pd_resp { __u32 pd_handle; }; struct ib_uverbs_dealloc_pd { __u32 pd_handle; }; struct ib_uverbs_open_xrcd { __aligned_u64 response; __u32 fd; __u32 oflags; __aligned_u64 driver_data[0]; }; struct ib_uverbs_open_xrcd_resp { __u32 xrcd_handle; }; struct ib_uverbs_close_xrcd { __u32 xrcd_handle; }; struct ib_uverbs_reg_mr { __aligned_u64 response; __aligned_u64 start; __aligned_u64 length; __aligned_u64 hca_va; __u32 pd_handle; __u32 access_flags; __aligned_u64 driver_data[0]; }; struct ib_uverbs_reg_mr_resp { __u32 mr_handle; __u32 lkey; __u32 rkey; }; struct ib_uverbs_rereg_mr { __aligned_u64 response; __u32 mr_handle; __u32 flags; __aligned_u64 start; __aligned_u64 length; __aligned_u64 hca_va; __u32 pd_handle; __u32 access_flags; }; struct ib_uverbs_rereg_mr_resp { __u32 lkey; __u32 rkey; }; struct ib_uverbs_dereg_mr { __u32 mr_handle; }; struct ib_uverbs_alloc_mw { __aligned_u64 response; __u32 pd_handle; __u8 mw_type; __u8 reserved[3]; }; struct ib_uverbs_alloc_mw_resp { __u32 mw_handle; __u32 rkey; }; struct ib_uverbs_dealloc_mw { __u32 mw_handle; }; struct ib_uverbs_create_comp_channel { __aligned_u64 response; }; struct ib_uverbs_create_comp_channel_resp { __u32 fd; }; struct ib_uverbs_create_cq { __aligned_u64 response; __aligned_u64 user_handle; __u32 cqe; __u32 comp_vector; __s32 comp_channel; __u32 reserved; __aligned_u64 driver_data[0]; }; enum ib_uverbs_ex_create_cq_flags { IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, }; struct ib_uverbs_ex_create_cq { __aligned_u64 user_handle; __u32 cqe; __u32 comp_vector; __s32 comp_channel; __u32 comp_mask; __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */ __u32 reserved; }; struct ib_uverbs_create_cq_resp { __u32 cq_handle; __u32 cqe; }; struct ib_uverbs_ex_create_cq_resp { struct ib_uverbs_create_cq_resp base; __u32 comp_mask; __u32 response_length; }; struct ib_uverbs_resize_cq { __aligned_u64 response; __u32 cq_handle; __u32 cqe; __aligned_u64 driver_data[0]; }; struct ib_uverbs_resize_cq_resp { __u32 cqe; __u32 reserved; __aligned_u64 driver_data[0]; }; struct ib_uverbs_poll_cq { __aligned_u64 response; __u32 cq_handle; __u32 ne; }; struct ib_uverbs_wc { __aligned_u64 wr_id; __u32 status; __u32 opcode; __u32 vendor_err; __u32 byte_len; union { __be32 imm_data; __u32 invalidate_rkey; } ex; __u32 qp_num; __u32 src_qp; __u32 wc_flags; __u16 pkey_index; __u16 slid; __u8 sl; __u8 dlid_path_bits; __u8 port_num; __u8 reserved; }; struct ib_uverbs_poll_cq_resp { __u32 count; __u32 reserved; struct ib_uverbs_wc wc[0]; }; struct ib_uverbs_req_notify_cq { __u32 cq_handle; __u32 solicited_only; }; struct ib_uverbs_destroy_cq { __aligned_u64 response; __u32 cq_handle; __u32 reserved; }; struct ib_uverbs_destroy_cq_resp { __u32 comp_events_reported; __u32 async_events_reported; }; struct ib_uverbs_global_route { __u8 dgid[16]; __u32 flow_label; __u8 sgid_index; __u8 hop_limit; __u8 traffic_class; __u8 reserved; }; struct ib_uverbs_ah_attr { struct ib_uverbs_global_route grh; __u16 dlid; __u8 sl; __u8 src_path_bits; __u8 static_rate; __u8 is_global; __u8 port_num; __u8 reserved; }; struct ib_uverbs_qp_attr { __u32 qp_attr_mask; __u32 qp_state; __u32 cur_qp_state; __u32 path_mtu; __u32 path_mig_state; __u32 qkey; __u32 rq_psn; __u32 sq_psn; __u32 dest_qp_num; __u32 qp_access_flags; struct ib_uverbs_ah_attr ah_attr; struct ib_uverbs_ah_attr alt_ah_attr; /* ib_qp_cap */ __u32 max_send_wr; __u32 max_recv_wr; __u32 max_send_sge; __u32 max_recv_sge; __u32 max_inline_data; __u16 pkey_index; __u16 alt_pkey_index; __u8 en_sqd_async_notify; __u8 sq_draining; __u8 max_rd_atomic; __u8 max_dest_rd_atomic; __u8 min_rnr_timer; __u8 port_num; __u8 timeout; __u8 retry_cnt; __u8 rnr_retry; __u8 alt_port_num; __u8 alt_timeout; __u8 reserved[5]; }; struct ib_uverbs_create_qp { __aligned_u64 response; __aligned_u64 user_handle; __u32 pd_handle; __u32 send_cq_handle; __u32 recv_cq_handle; __u32 srq_handle; __u32 max_send_wr; __u32 max_recv_wr; __u32 max_send_sge; __u32 max_recv_sge; __u32 max_inline_data; __u8 sq_sig_all; __u8 qp_type; __u8 is_srq; __u8 reserved; __aligned_u64 driver_data[0]; }; enum ib_uverbs_create_qp_mask { IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1UL << 0, }; enum { IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE, }; enum { /* * This value is equal to IB_QP_DEST_QPN. */ IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20, }; enum { /* * This value is equal to IB_QP_RATE_LIMIT. */ IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25, }; struct ib_uverbs_ex_create_qp { __aligned_u64 user_handle; __u32 pd_handle; __u32 send_cq_handle; __u32 recv_cq_handle; __u32 srq_handle; __u32 max_send_wr; __u32 max_recv_wr; __u32 max_send_sge; __u32 max_recv_sge; __u32 max_inline_data; __u8 sq_sig_all; __u8 qp_type; __u8 is_srq; __u8 reserved; __u32 comp_mask; __u32 create_flags; __u32 rwq_ind_tbl_handle; __u32 source_qpn; }; struct ib_uverbs_open_qp { __aligned_u64 response; __aligned_u64 user_handle; __u32 pd_handle; __u32 qpn; __u8 qp_type; __u8 reserved[7]; __aligned_u64 driver_data[0]; }; /* also used for open response */ struct ib_uverbs_create_qp_resp { __u32 qp_handle; __u32 qpn; __u32 max_send_wr; __u32 max_recv_wr; __u32 max_send_sge; __u32 max_recv_sge; __u32 max_inline_data; __u32 reserved; }; struct ib_uverbs_ex_create_qp_resp { struct ib_uverbs_create_qp_resp base; __u32 comp_mask; __u32 response_length; }; /* * This struct needs to remain a multiple of 8 bytes to keep the * alignment of the modify QP parameters. */ struct ib_uverbs_qp_dest { __u8 dgid[16]; __u32 flow_label; __u16 dlid; __u16 reserved; __u8 sgid_index; __u8 hop_limit; __u8 traffic_class; __u8 sl; __u8 src_path_bits; __u8 static_rate; __u8 is_global; __u8 port_num; }; struct ib_uverbs_query_qp { __aligned_u64 response; __u32 qp_handle; __u32 attr_mask; __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_qp_resp { struct ib_uverbs_qp_dest dest; struct ib_uverbs_qp_dest alt_dest; __u32 max_send_wr; __u32 max_recv_wr; __u32 max_send_sge; __u32 max_recv_sge; __u32 max_inline_data; __u32 qkey; __u32 rq_psn; __u32 sq_psn; __u32 dest_qp_num; __u32 qp_access_flags; __u16 pkey_index; __u16 alt_pkey_index; __u8 qp_state; __u8 cur_qp_state; __u8 path_mtu; __u8 path_mig_state; __u8 sq_draining; __u8 max_rd_atomic; __u8 max_dest_rd_atomic; __u8 min_rnr_timer; __u8 port_num; __u8 timeout; __u8 retry_cnt; __u8 rnr_retry; __u8 alt_port_num; __u8 alt_timeout; __u8 sq_sig_all; __u8 reserved[5]; __aligned_u64 driver_data[0]; }; struct ib_uverbs_modify_qp { struct ib_uverbs_qp_dest dest; struct ib_uverbs_qp_dest alt_dest; __u32 qp_handle; __u32 attr_mask; __u32 qkey; __u32 rq_psn; __u32 sq_psn; __u32 dest_qp_num; __u32 qp_access_flags; __u16 pkey_index; __u16 alt_pkey_index; __u8 qp_state; __u8 cur_qp_state; __u8 path_mtu; __u8 path_mig_state; __u8 en_sqd_async_notify; __u8 max_rd_atomic; __u8 max_dest_rd_atomic; __u8 min_rnr_timer; __u8 port_num; __u8 timeout; __u8 retry_cnt; __u8 rnr_retry; __u8 alt_port_num; __u8 alt_timeout; __u8 reserved[2]; __aligned_u64 driver_data[0]; }; struct ib_uverbs_ex_modify_qp { struct ib_uverbs_modify_qp base; __u32 rate_limit; __u32 reserved; }; struct ib_uverbs_modify_qp_resp { }; struct ib_uverbs_ex_modify_qp_resp { __u32 comp_mask; __u32 response_length; }; struct ib_uverbs_destroy_qp { __aligned_u64 response; __u32 qp_handle; __u32 reserved; }; struct ib_uverbs_destroy_qp_resp { __u32 events_reported; }; /* * The ib_uverbs_sge structure isn't used anywhere, since we assume * the ib_sge structure is packed the same way on 32-bit and 64-bit * architectures in both kernel and user space. It's just here to * document the ABI. */ struct ib_uverbs_sge { __aligned_u64 addr; __u32 length; __u32 lkey; }; enum ib_uverbs_wr_opcode { IB_UVERBS_WR_RDMA_WRITE = 0, IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, IB_UVERBS_WR_SEND = 2, IB_UVERBS_WR_SEND_WITH_IMM = 3, IB_UVERBS_WR_RDMA_READ = 4, IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, IB_UVERBS_WR_LOCAL_INV = 7, IB_UVERBS_WR_BIND_MW = 8, IB_UVERBS_WR_SEND_WITH_INV = 9, IB_UVERBS_WR_TSO = 10, IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, /* Review enum ib_wr_opcode before modifying this */ }; struct ib_uverbs_send_wr { __aligned_u64 wr_id; __u32 num_sge; __u32 opcode; /* see enum ib_uverbs_wr_opcode */ __u32 send_flags; union { __be32 imm_data; __u32 invalidate_rkey; } ex; union { struct { __aligned_u64 remote_addr; __u32 rkey; __u32 reserved; } rdma; struct { __aligned_u64 remote_addr; __aligned_u64 compare_add; __aligned_u64 swap; __u32 rkey; __u32 reserved; } atomic; struct { __u32 ah; __u32 remote_qpn; __u32 remote_qkey; __u32 reserved; } ud; } wr; }; struct ib_uverbs_post_send { __aligned_u64 response; __u32 qp_handle; __u32 wr_count; __u32 sge_count; __u32 wqe_size; struct ib_uverbs_send_wr send_wr[0]; }; struct ib_uverbs_post_send_resp { __u32 bad_wr; }; struct ib_uverbs_recv_wr { __aligned_u64 wr_id; __u32 num_sge; __u32 reserved; }; struct ib_uverbs_post_recv { __aligned_u64 response; __u32 qp_handle; __u32 wr_count; __u32 sge_count; __u32 wqe_size; struct ib_uverbs_recv_wr recv_wr[0]; }; struct ib_uverbs_post_recv_resp { __u32 bad_wr; }; struct ib_uverbs_post_srq_recv { __aligned_u64 response; __u32 srq_handle; __u32 wr_count; __u32 sge_count; __u32 wqe_size; struct ib_uverbs_recv_wr recv[0]; }; struct ib_uverbs_post_srq_recv_resp { __u32 bad_wr; }; struct ib_uverbs_create_ah { __aligned_u64 response; __aligned_u64 user_handle; __u32 pd_handle; __u32 reserved; struct ib_uverbs_ah_attr attr; }; struct ib_uverbs_create_ah_resp { __u32 ah_handle; }; struct ib_uverbs_destroy_ah { __u32 ah_handle; }; struct ib_uverbs_attach_mcast { __u8 gid[16]; __u32 qp_handle; __u16 mlid; __u16 reserved; __aligned_u64 driver_data[0]; }; struct ib_uverbs_detach_mcast { __u8 gid[16]; __u32 qp_handle; __u16 mlid; __u16 reserved; __aligned_u64 driver_data[0]; }; struct ib_uverbs_flow_spec_hdr { __u32 type; __u16 size; __u16 reserved; /* followed by flow_spec */ __aligned_u64 flow_spec_data[0]; }; struct ib_uverbs_flow_eth_filter { __u8 dst_mac[6]; __u8 src_mac[6]; __be16 ether_type; __be16 vlan_tag; }; struct ib_uverbs_flow_spec_eth { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; struct ib_uverbs_flow_eth_filter val; struct ib_uverbs_flow_eth_filter mask; }; struct ib_uverbs_flow_ipv4_filter { __be32 src_ip; __be32 dst_ip; __u8 proto; __u8 tos; __u8 ttl; __u8 flags; }; struct ib_uverbs_flow_spec_ipv4 { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; struct ib_uverbs_flow_ipv4_filter val; struct ib_uverbs_flow_ipv4_filter mask; }; struct ib_uverbs_flow_tcp_udp_filter { __be16 dst_port; __be16 src_port; }; struct ib_uverbs_flow_spec_tcp_udp { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; struct ib_uverbs_flow_tcp_udp_filter val; struct ib_uverbs_flow_tcp_udp_filter mask; }; struct ib_uverbs_flow_ipv6_filter { __u8 src_ip[16]; __u8 dst_ip[16]; __be32 flow_label; __u8 next_hdr; __u8 traffic_class; __u8 hop_limit; __u8 reserved; }; struct ib_uverbs_flow_spec_ipv6 { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; struct ib_uverbs_flow_ipv6_filter val; struct ib_uverbs_flow_ipv6_filter mask; }; struct ib_uverbs_flow_spec_action_tag { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; __u32 tag_id; __u32 reserved1; }; struct ib_uverbs_flow_spec_action_drop { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; }; struct ib_uverbs_flow_spec_action_handle { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; __u32 handle; __u32 reserved1; }; struct ib_uverbs_flow_spec_action_count { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; __u32 handle; __u32 reserved1; }; struct ib_uverbs_flow_tunnel_filter { __be32 tunnel_id; }; struct ib_uverbs_flow_spec_tunnel { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; struct ib_uverbs_flow_tunnel_filter val; struct ib_uverbs_flow_tunnel_filter mask; }; struct ib_uverbs_flow_spec_esp_filter { __u32 spi; __u32 seq; }; struct ib_uverbs_flow_spec_esp { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; struct ib_uverbs_flow_spec_esp_filter val; struct ib_uverbs_flow_spec_esp_filter mask; }; struct ib_uverbs_flow_gre_filter { /* c_ks_res0_ver field is bits 0-15 in offset 0 of a standard GRE header: * bit 0 - C - checksum bit. * bit 1 - reserved. set to 0. * bit 2 - key bit. * bit 3 - sequence number bit. * bits 4:12 - reserved. set to 0. * bits 13:15 - GRE version. */ __be16 c_ks_res0_ver; __be16 protocol; __be32 key; }; struct ib_uverbs_flow_spec_gre { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; struct ib_uverbs_flow_gre_filter val; struct ib_uverbs_flow_gre_filter mask; }; struct ib_uverbs_flow_mpls_filter { /* The field includes the entire MPLS label: * bits 0:19 - label field. * bits 20:22 - traffic class field. * bits 23 - bottom of stack bit. * bits 24:31 - ttl field. */ __be32 label; }; struct ib_uverbs_flow_spec_mpls { union { struct ib_uverbs_flow_spec_hdr hdr; struct { __u32 type; __u16 size; __u16 reserved; }; }; struct ib_uverbs_flow_mpls_filter val; struct ib_uverbs_flow_mpls_filter mask; }; struct ib_uverbs_flow_attr { __u32 type; __u16 size; __u16 priority; __u8 num_of_specs; __u8 reserved[2]; __u8 port; __u32 flags; /* Following are the optional layers according to user request * struct ib_flow_spec_xxx * struct ib_flow_spec_yyy */ struct ib_uverbs_flow_spec_hdr flow_specs[0]; }; struct ib_uverbs_create_flow { __u32 comp_mask; __u32 qp_handle; struct ib_uverbs_flow_attr flow_attr; }; struct ib_uverbs_create_flow_resp { __u32 comp_mask; __u32 flow_handle; }; struct ib_uverbs_destroy_flow { __u32 comp_mask; __u32 flow_handle; }; struct ib_uverbs_create_srq { __aligned_u64 response; __aligned_u64 user_handle; __u32 pd_handle; __u32 max_wr; __u32 max_sge; __u32 srq_limit; __aligned_u64 driver_data[0]; }; struct ib_uverbs_create_xsrq { __aligned_u64 response; __aligned_u64 user_handle; __u32 srq_type; __u32 pd_handle; __u32 max_wr; __u32 max_sge; __u32 srq_limit; __u32 max_num_tags; __u32 xrcd_handle; __u32 cq_handle; __aligned_u64 driver_data[0]; }; struct ib_uverbs_create_srq_resp { __u32 srq_handle; __u32 max_wr; __u32 max_sge; __u32 srqn; }; struct ib_uverbs_modify_srq { __u32 srq_handle; __u32 attr_mask; __u32 max_wr; __u32 srq_limit; __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_srq { __aligned_u64 response; __u32 srq_handle; __u32 reserved; __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_srq_resp { __u32 max_wr; __u32 max_sge; __u32 srq_limit; __u32 reserved; }; struct ib_uverbs_destroy_srq { __aligned_u64 response; __u32 srq_handle; __u32 reserved; }; struct ib_uverbs_destroy_srq_resp { __u32 events_reported; }; struct ib_uverbs_ex_create_wq { __u32 comp_mask; __u32 wq_type; __aligned_u64 user_handle; __u32 pd_handle; __u32 cq_handle; __u32 max_wr; __u32 max_sge; __u32 create_flags; /* Use enum ib_wq_flags */ __u32 reserved; }; struct ib_uverbs_ex_create_wq_resp { __u32 comp_mask; __u32 response_length; __u32 wq_handle; __u32 max_wr; __u32 max_sge; __u32 wqn; }; struct ib_uverbs_ex_destroy_wq { __u32 comp_mask; __u32 wq_handle; }; struct ib_uverbs_ex_destroy_wq_resp { __u32 comp_mask; __u32 response_length; __u32 events_reported; __u32 reserved; }; struct ib_uverbs_ex_modify_wq { __u32 attr_mask; __u32 wq_handle; __u32 wq_state; __u32 curr_wq_state; __u32 flags; /* Use enum ib_wq_flags */ __u32 flags_mask; /* Use enum ib_wq_flags */ }; /* Prevent memory allocation rather than max expected size */ #define IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE 0x0d struct ib_uverbs_ex_create_rwq_ind_table { __u32 comp_mask; __u32 log_ind_tbl_size; /* Following are the wq handles according to log_ind_tbl_size * wq_handle1 * wq_handle2 */ __u32 wq_handles[0]; }; struct ib_uverbs_ex_create_rwq_ind_table_resp { __u32 comp_mask; __u32 response_length; __u32 ind_tbl_handle; __u32 ind_tbl_num; }; struct ib_uverbs_ex_destroy_rwq_ind_table { __u32 comp_mask; __u32 ind_tbl_handle; }; struct ib_uverbs_cq_moderation { __u16 cq_count; __u16 cq_period; }; struct ib_uverbs_ex_modify_cq { __u32 cq_handle; __u32 attr_mask; struct ib_uverbs_cq_moderation attr; __u32 reserved; }; #define IB_DEVICE_NAME_MAX 64 #endif /* IB_USER_VERBS_H */ PKW #define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */ #define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */ #define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */ #define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */ #define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */ #define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */ #define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */ #define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */ #define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */ #define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */ #define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */ enum pvrdma_wr_opcode { PVRDMA_WR_RDMA_WRITE, PVRDMA_WR_RDMA_WRITE_WITH_IMM, PVRDMA_WR_SEND, PVRDMA_WR_SEND_WITH_IMM, PVRDMA_WR_RDMA_READ, PVRDMA_WR_ATOMIC_CMP_AND_SWP, PVRDMA_WR_ATOMIC_FETCH_AND_ADD, PVRDMA_WR_LSO, PVRDMA_WR_SEND_WITH_INV, PVRDMA_WR_RDMA_READ_WITH_INV, PVRDMA_WR_LOCAL_INV, PVRDMA_WR_FAST_REG_MR, PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP, PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, PVRDMA_WR_BIND_MW, PVRDMA_WR_REG_SIG_MR, PVRDMA_WR_ERROR, }; enum pvrdma_wc_status { PVRDMA_WC_SUCCESS, PVRDMA_WC_LOC_LEN_ERR, PVRDMA_WC_LOC_QP_OP_ERR, PVRDMA_WC_LOC_EEC_OP_ERR, PVRDMA_WC_LOC_PROT_ERR, PVRDMA_WC_WR_FLUSH_ERR, PVRDMA_WC_MW_BIND_ERR, PVRDMA_WC_BAD_RESP_ERR, PVRDMA_WC_LOC_ACCESS_ERR, PVRDMA_WC_REM_INV_REQ_ERR, PVRDMA_WC_REM_ACCESS_ERR, PVRDMA_WC_REM_OP_ERR, PVRDMA_WC_RETRY_EXC_ERR, PVRDMA_WC_RNR_RETRY_EXC_ERR, PVRDMA_WC_LOC_RDD_VIOL_ERR, PVRDMA_WC_REM_INV_RD_REQ_ERR, PVRDMA_WC_REM_ABORT_ERR, PVRDMA_WC_INV_EECN_ERR, PVRDMA_WC_INV_EEC_STATE_ERR, PVRDMA_WC_FATAL_ERR, PVRDMA_WC_RESP_TIMEOUT_ERR, PVRDMA_WC_GENERAL_ERR, }; enum pvrdma_wc_opcode { PVRDMA_WC_SEND, PVRDMA_WC_RDMA_WRITE, PVRDMA_WC_RDMA_READ, PVRDMA_WC_COMP_SWAP, PVRDMA_WC_FETCH_ADD, PVRDMA_WC_BIND_MW, PVRDMA_WC_LSO, PVRDMA_WC_LOCAL_INV, PVRDMA_WC_FAST_REG_MR, PVRDMA_WC_MASKED_COMP_SWAP, PVRDMA_WC_MASKED_FETCH_ADD, PVRDMA_WC_RECV = 1 << 7, PVRDMA_WC_RECV_RDMA_WITH_IMM, }; enum pvrdma_wc_flags { PVRDMA_WC_GRH = 1 << 0, PVRDMA_WC_WITH_IMM = 1 << 1, PVRDMA_WC_WITH_INVALIDATE = 1 << 2, PVRDMA_WC_IP_CSUM_OK = 1 << 3, PVRDMA_WC_WITH_SMAC = 1 << 4, PVRDMA_WC_WITH_VLAN = 1 << 5, PVRDMA_WC_WITH_NETWORK_HDR_TYPE = 1 << 6, PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE, }; struct pvrdma_alloc_ucontext_resp { __u32 qp_tab_size; __u32 reserved; }; struct pvrdma_alloc_pd_resp { __u32 pdn; __u32 reserved; }; struct pvrdma_create_cq { __aligned_u64 buf_addr; __u32 buf_size; __u32 reserved; }; struct pvrdma_create_cq_resp { __u32 cqn; __u32 reserved; }; struct pvrdma_resize_cq { __aligned_u64 buf_addr; __u32 buf_size; __u32 reserved; }; struct pvrdma_create_srq { __aligned_u64 buf_addr; __u32 buf_size; __u32 reserved; }; struct pvrdma_create_srq_resp { __u32 srqn; __u32 reserved; }; struct pvrdma_create_qp { __aligned_u64 rbuf_addr; __aligned_u64 sbuf_addr; __u32 rbuf_size; __u32 sbuf_size; __aligned_u64 qp_addr; }; /* PVRDMA masked atomic compare and swap */ struct pvrdma_ex_cmp_swap { __aligned_u64 swap_val; __aligned_u64 compare_val; __aligned_u64 swap_mask; __aligned_u64 compare_mask; }; /* PVRDMA masked atomic fetch and add */ struct pvrdma_ex_fetch_add { __aligned_u64 add_val; __aligned_u64 field_boundary; }; /* PVRDMA address vector. */ struct pvrdma_av { __u32 port_pd; __u32 sl_tclass_flowlabel; __u8 dgid[16]; __u8 src_path_bits; __u8 gid_index; __u8 stat_rate; __u8 hop_limit; __u8 dmac[6]; __u8 reserved[6]; }; /* PVRDMA scatter/gather entry */ struct pvrdma_sge { __aligned_u64 addr; __u32 length; __u32 lkey; }; /* PVRDMA receive queue work request */ struct pvrdma_rq_wqe_hdr { __aligned_u64 wr_id; /* wr id */ __u32 num_sge; /* size of s/g array */ __u32 total_len; /* reserved */ }; /* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */ /* PVRDMA send queue work request */ struct pvrdma_sq_wqe_hdr { __aligned_u64 wr_id; /* wr id */ __u32 num_sge; /* size of s/g array */ __u32 total_len; /* reserved */ __u32 opcode; /* operation type */ __u32 send_flags; /* wr flags */ union { __be32 imm_data; __u32 invalidate_rkey; } ex; __u32 reserved; union { struct { __aligned_u64 remote_addr; __u32 rkey; __u8 reserved[4]; } rdma; struct { __aligned_u64 remote_addr; __aligned_u64 compare_add; __aligned_u64 swap; __u32 rkey; __u32 reserved; } atomic; struct { __aligned_u64 remote_addr; __u32 log_arg_sz; __u32 rkey; union { struct pvrdma_ex_cmp_swap cmp_swap; struct pvrdma_ex_fetch_add fetch_add; } wr_data; } masked_atomics; struct { __aligned_u64 iova_start; __aligned_u64 pl_pdir_dma; __u32 page_shift; __u32 page_list_len; __u32 length; __u32 access_flags; __u32 rkey; __u32 reserved; } fast_reg; struct { __u32 remote_qpn; __u32 remote_qkey; struct pvrdma_av av; } ud; } wr; }; /* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */ /* Completion queue element. */ struct pvrdma_cqe { __aligned_u64 wr_id; __aligned_u64 qp; __u32 opcode; __u32 status; __u32 byte_len; __be32 imm_data; __u32 src_qp; __u32 wc_flags; __u32 vendor_err; __u16 pkey_index; __u16 slid; __u8 sl; __u8 dlid_path_bits; __u8 port_num; __u8 smac[6]; __u8 network_hdr_type; __u8 reserved2[6]; /* Pad to next power of 2 (64). */ }; #endif /* __VMW_PVRDMA_ABI_H__ */ PKW #include #include /* Legacy name, for user space application which already use it */ #define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC /* * General blocks assignments * It is closed on purpose do not expose it it user space * #define MAD_CMD_BASE 0x00 * #define HFI1_CMD_BAS 0xE0 */ /* MAD specific section */ #define IB_USER_MAD_REGISTER_AGENT _IOWR(RDMA_IOCTL_MAGIC, 0x01, struct ib_user_mad_reg_req) #define IB_USER_MAD_UNREGISTER_AGENT _IOW(RDMA_IOCTL_MAGIC, 0x02, __u32) #define IB_USER_MAD_ENABLE_PKEY _IO(RDMA_IOCTL_MAGIC, 0x03) #define IB_USER_MAD_REGISTER_AGENT2 _IOWR(RDMA_IOCTL_MAGIC, 0x04, struct ib_user_mad_reg_req2) /* HFI specific section */ /* allocate HFI and context */ #define HFI1_IOCTL_ASSIGN_CTXT _IOWR(RDMA_IOCTL_MAGIC, 0xE1, struct hfi1_user_info) /* find out what resources we got */ #define HFI1_IOCTL_CTXT_INFO _IOW(RDMA_IOCTL_MAGIC, 0xE2, struct hfi1_ctxt_info) /* set up userspace */ #define HFI1_IOCTL_USER_INFO _IOW(RDMA_IOCTL_MAGIC, 0xE3, struct hfi1_base_info) /* update expected TID entries */ #define HFI1_IOCTL_TID_UPDATE _IOWR(RDMA_IOCTL_MAGIC, 0xE4, struct hfi1_tid_info) /* free expected TID entries */ #define HFI1_IOCTL_TID_FREE _IOWR(RDMA_IOCTL_MAGIC, 0xE5, struct hfi1_tid_info) /* force an update of PIO credit */ #define HFI1_IOCTL_CREDIT_UPD _IO(RDMA_IOCTL_MAGIC, 0xE6) /* control receipt of packets */ #define HFI1_IOCTL_RECV_CTRL _IOW(RDMA_IOCTL_MAGIC, 0xE8, int) /* set the kind of polling we want */ #define HFI1_IOCTL_POLL_TYPE _IOW(RDMA_IOCTL_MAGIC, 0xE9, int) /* ack & clear user status bits */ #define HFI1_IOCTL_ACK_EVENT _IOW(RDMA_IOCTL_MAGIC, 0xEA, unsigned long) /* set context's pkey */ #define HFI1_IOCTL_SET_PKEY _IOW(RDMA_IOCTL_MAGIC, 0xEB, __u16) /* reset context's HW send context */ #define HFI1_IOCTL_CTXT_RESET _IO(RDMA_IOCTL_MAGIC, 0xEC) /* read TID cache invalidations */ #define HFI1_IOCTL_TID_INVAL_READ _IOWR(RDMA_IOCTL_MAGIC, 0xED, struct hfi1_tid_info) /* get the version of the user cdev */ #define HFI1_IOCTL_GET_VERS _IOR(RDMA_IOCTL_MAGIC, 0xEE, int) #endif /* RDMA_USER_IOCTL_H */ PKW enum mlx5_ib_uapi_flow_action_flags { MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0, }; #endif PKW #include /* For ETH_ALEN. */ #include enum { MLX5_QP_FLAG_SIGNATURE = 1 << 0, MLX5_QP_FLAG_SCATTER_CQE = 1 << 1, MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2, MLX5_QP_FLAG_BFREG_INDEX = 1 << 3, MLX5_QP_FLAG_TYPE_DCT = 1 << 4, MLX5_QP_FLAG_TYPE_DCI = 1 << 5, }; enum { MLX5_SRQ_FLAG_SIGNATURE = 1 << 0, }; enum { MLX5_WQ_FLAG_SIGNATURE = 1 << 0, }; /* Increment this value if any changes that break userspace ABI * compatibility are made. */ #define MLX5_IB_UVERBS_ABI_VERSION 1 /* Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). * In particular do not use pointer types -- pass pointers in __u64 * instead. */ struct mlx5_ib_alloc_ucontext_req { __u32 total_num_bfregs; __u32 num_low_latency_bfregs; }; enum mlx5_lib_caps { MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0, }; enum mlx5_ib_alloc_uctx_v2_flags { MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0, }; struct mlx5_ib_alloc_ucontext_req_v2 { __u32 total_num_bfregs; __u32 num_low_latency_bfregs; __u32 flags; __u32 comp_mask; __u8 max_cqe_version; __u8 reserved0; __u16 reserved1; __u32 reserved2; __aligned_u64 lib_caps; }; enum mlx5_ib_alloc_ucontext_resp_mask { MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1, }; enum mlx5_user_cmds_supp_uhw { MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, }; /* The eth_min_inline response value is set to off-by-one vs the FW * returned value to allow user-space to deal with older kernels. */ enum mlx5_user_inline_mode { MLX5_USER_INLINE_MODE_NA, MLX5_USER_INLINE_MODE_NONE, MLX5_USER_INLINE_MODE_L2, MLX5_USER_INLINE_MODE_IP, MLX5_USER_INLINE_MODE_TCP_UDP, }; enum { MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0, MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1, MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2, MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3, MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4, }; struct mlx5_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 bf_reg_size; __u32 tot_bfregs; __u32 cache_line_size; __u16 max_sq_desc_sz; __u16 max_rq_desc_sz; __u32 max_send_wqebb; __u32 max_recv_wr; __u32 max_srq_recv_wr; __u16 num_ports; __u16 flow_action_flags; __u32 comp_mask; __u32 response_length; __u8 cqe_version; __u8 cmds_supp_uhw; __u8 eth_min_inline; __u8 clock_info_versions; __aligned_u64 hca_core_clock_offset; __u32 log_uar_size; __u32 num_uars_per_page; __u32 num_dyn_bfregs; __u32 dump_fill_mkey; }; struct mlx5_ib_alloc_pd_resp { __u32 pdn; }; struct mlx5_ib_tso_caps { __u32 max_tso; /* Maximum tso payload size in bytes */ /* Corresponding bit will be set if qp type from * 'enum ib_qp_type' is supported, e.g. * supported_qpts |= 1 << IB_QPT_UD */ __u32 supported_qpts; }; struct mlx5_ib_rss_caps { __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ __u8 reserved[7]; }; enum mlx5_ib_cqe_comp_res_format { MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0, MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1, MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2, }; struct mlx5_ib_cqe_comp_caps { __u32 max_num; __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */ }; enum mlx5_ib_packet_pacing_cap_flags { MLX5_IB_PP_SUPPORT_BURST = 1 << 0, }; struct mlx5_packet_pacing_caps { __u32 qp_rate_limit_min; __u32 qp_rate_limit_max; /* In kpbs */ /* Corresponding bit will be set if qp type from * 'enum ib_qp_type' is supported, e.g. * supported_qpts |= 1 << IB_QPT_RAW_PACKET */ __u32 supported_qpts; __u8 cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */ __u8 reserved[3]; }; enum mlx5_ib_mpw_caps { MPW_RESERVED = 1 << 0, MLX5_IB_ALLOW_MPW = 1 << 1, MLX5_IB_SUPPORT_EMPW = 1 << 2, }; enum mlx5_ib_sw_parsing_offloads { MLX5_IB_SW_PARSING = 1 << 0, MLX5_IB_SW_PARSING_CSUM = 1 << 1, MLX5_IB_SW_PARSING_LSO = 1 << 2, }; struct mlx5_ib_sw_parsing_caps { __u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */ /* Corresponding bit will be set if qp type from * 'enum ib_qp_type' is supported, e.g. * supported_qpts |= 1 << IB_QPT_RAW_PACKET */ __u32 supported_qpts; }; struct mlx5_ib_striding_rq_caps { __u32 min_single_stride_log_num_of_bytes; __u32 max_single_stride_log_num_of_bytes; __u32 min_single_wqe_log_num_of_strides; __u32 max_single_wqe_log_num_of_strides; /* Corresponding bit will be set if qp type from * 'enum ib_qp_type' is supported, e.g. * supported_qpts |= 1 << IB_QPT_RAW_PACKET */ __u32 supported_qpts; __u32 reserved; }; enum mlx5_ib_query_dev_resp_flags { /* Support 128B CQE compression */ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1, }; enum mlx5_ib_tunnel_offloads { MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0, MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1, MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2, MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3, MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4, }; struct mlx5_ib_query_device_resp { __u32 comp_mask; __u32 response_length; struct mlx5_ib_tso_caps tso_caps; struct mlx5_ib_rss_caps rss_caps; struct mlx5_ib_cqe_comp_caps cqe_comp_caps; struct mlx5_packet_pacing_caps packet_pacing_caps; __u32 mlx5_ib_support_multi_pkt_send_wqes; __u32 flags; /* Use enum mlx5_ib_query_dev_resp_flags */ struct mlx5_ib_sw_parsing_caps sw_parsing_caps; struct mlx5_ib_striding_rq_caps striding_rq_caps; __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */ __u32 reserved; }; enum mlx5_ib_create_cq_flags { MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0, }; struct mlx5_ib_create_cq { __aligned_u64 buf_addr; __aligned_u64 db_addr; __u32 cqe_size; __u8 cqe_comp_en; __u8 cqe_comp_res_format; __u16 flags; }; struct mlx5_ib_create_cq_resp { __u32 cqn; __u32 reserved; }; struct mlx5_ib_resize_cq { __aligned_u64 buf_addr; __u16 cqe_size; __u16 reserved0; __u32 reserved1; }; struct mlx5_ib_create_srq { __aligned_u64 buf_addr; __aligned_u64 db_addr; __u32 flags; __u32 reserved0; /* explicit padding (optional on i386) */ __u32 uidx; __u32 reserved1; }; struct mlx5_ib_create_srq_resp { __u32 srqn; __u32 reserved; }; struct mlx5_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; __u32 sq_wqe_count; __u32 rq_wqe_count; __u32 rq_wqe_shift; __u32 flags; __u32 uidx; __u32 bfreg_index; union { __aligned_u64 sq_buf_addr; __aligned_u64 access_key; }; }; /* RX Hash function flags */ enum mlx5_rx_hash_function_flags { MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0, }; /* * RX Hash flags, these flags allows to set which incoming packet's field should * participates in RX Hash. Each flag represent certain packet's field, * when the flag is set the field that is represented by the flag will * participate in RX Hash calculation. * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP * and *TCP and *UDP flags can't be enabled together on the same QP. */ enum mlx5_rx_hash_fields { MLX5_RX_HASH_SRC_IPV4 = 1 << 0, MLX5_RX_HASH_DST_IPV4 = 1 << 1, MLX5_RX_HASH_SRC_IPV6 = 1 << 2, MLX5_RX_HASH_DST_IPV6 = 1 << 3, MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4, MLX5_RX_HASH_DST_PORT_TCP = 1 << 5, MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6, MLX5_RX_HASH_DST_PORT_UDP = 1 << 7, MLX5_RX_HASH_IPSEC_SPI = 1 << 8, /* Save bits for future fields */ MLX5_RX_HASH_INNER = (1UL << 31), }; struct mlx5_ib_create_qp_rss { __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ __u8 rx_key_len; /* valid only for Toeplitz */ __u8 reserved[6]; __u8 rx_hash_key[128]; /* valid only for Toeplitz */ __u32 comp_mask; __u32 flags; }; struct mlx5_ib_create_qp_resp { __u32 bfreg_index; __u32 reserved; }; struct mlx5_ib_alloc_mw { __u32 comp_mask; __u8 num_klms; __u8 reserved1; __u16 reserved2; }; enum mlx5_ib_create_wq_mask { MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0), }; struct mlx5_ib_create_wq { __aligned_u64 buf_addr; __aligned_u64 db_addr; __u32 rq_wqe_count; __u32 rq_wqe_shift; __u32 user_index; __u32 flags; __u32 comp_mask; __u32 single_stride_log_num_of_bytes; __u32 single_wqe_log_num_of_strides; __u32 two_byte_shift_en; }; struct mlx5_ib_create_ah_resp { __u32 response_length; __u8 dmac[ETH_ALEN]; __u8 reserved[6]; }; struct mlx5_ib_burst_info { __u32 max_burst_sz; __u16 typical_pkt_sz; __u16 reserved; }; struct mlx5_ib_modify_qp { __u32 comp_mask; struct mlx5_ib_burst_info burst_info; __u32 reserved; }; struct mlx5_ib_modify_qp_resp { __u32 response_length; __u32 dctn; }; struct mlx5_ib_create_wq_resp { __u32 response_length; __u32 reserved; }; struct mlx5_ib_create_rwq_ind_tbl_resp { __u32 response_length; __u32 reserved; }; struct mlx5_ib_modify_wq { __u32 comp_mask; __u32 reserved; }; struct mlx5_ib_clock_info { __u32 sign; __u32 resv; __aligned_u64 nsec; __aligned_u64 cycles; __aligned_u64 frac; __u32 mult; __u32 shift; __aligned_u64 mask; __aligned_u64 overflow_period; }; enum mlx5_ib_mmap_cmd { MLX5_IB_MMAP_REGULAR_PAGE = 0, MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, MLX5_IB_MMAP_WC_PAGE = 2, MLX5_IB_MMAP_NC_PAGE = 3, /* 5 is chosen in order to be compatible with old versions of libmlx5 */ MLX5_IB_MMAP_CORE_CLOCK = 5, MLX5_IB_MMAP_ALLOC_WC = 6, MLX5_IB_MMAP_CLOCK_INFO = 7, MLX5_IB_MMAP_DEVICE_MEM = 8, }; enum { MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1, }; /* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */ enum { MLX5_IB_CLOCK_INFO_V1 = 0, }; struct mlx5_ib_flow_counters_desc { __u32 description; __u32 index; }; struct mlx5_ib_flow_counters_data { RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data); __u32 ncounters; __u32 reserved; }; struct mlx5_ib_create_flow { __u32 ncounters_data; __u32 reserved; /* * Following are counters data based on ncounters_data, each * entry in the data[] should match a corresponding counter object * that was pointed by a counters spec upon the flow creation */ struct mlx5_ib_flow_counters_data data[]; }; #endif /* MLX5_ABI_USER_H */ PKW #include /* Documentation/ioctl/ioctl-number.txt */ #define RDMA_IOCTL_MAGIC 0x1b #define RDMA_VERBS_IOCTL \ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr) enum { /* User input */ UVERBS_ATTR_F_MANDATORY = 1U << 0, /* * Valid output bit should be ignored and considered set in * mandatory fields. This bit is kernel output. */ UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1, }; struct ib_uverbs_attr { __u16 attr_id; /* command specific type attribute */ __u16 len; /* only for pointers */ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ union { struct { __u8 elem_id; __u8 reserved; } enum_data; __u16 reserved; } attr_data; union { /* Used by PTR_IN/OUT, ENUM_IN and IDR */ __aligned_u64 data; /* Used by FD_IN and FD_OUT */ __s64 data_s64; }; }; struct ib_uverbs_ioctl_hdr { __u16 length; __u16 object_id; __u16 method_id; __u16 num_attrs; __aligned_u64 reserved1; __u32 driver_id; __u32 reserved2; struct ib_uverbs_attr attrs[0]; }; enum rdma_driver_id { RDMA_DRIVER_UNKNOWN, RDMA_DRIVER_MLX5, RDMA_DRIVER_MLX4, RDMA_DRIVER_CXGB3, RDMA_DRIVER_CXGB4, RDMA_DRIVER_MTHCA, RDMA_DRIVER_BNXT_RE, RDMA_DRIVER_OCRDMA, RDMA_DRIVER_NES, RDMA_DRIVER_I40IW, RDMA_DRIVER_VMW_PVRDMA, RDMA_DRIVER_QEDR, RDMA_DRIVER_HNS, RDMA_DRIVER_USNIC, RDMA_DRIVER_RXE, RDMA_DRIVER_HFI1, RDMA_DRIVER_QIB, }; #endif PKW #include enum mlx5_ib_create_flow_action_attrs { /* This attribute belong to the driver namespace */ MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT), }; enum mlx5_ib_alloc_dm_attrs { MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, }; enum mlx5_ib_devx_methods { MLX5_IB_METHOD_DEVX_OTHER = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_METHOD_DEVX_QUERY_UAR, MLX5_IB_METHOD_DEVX_QUERY_EQN, }; enum mlx5_ib_devx_other_attrs { MLX5_IB_ATTR_DEVX_OTHER_CMD_IN = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, }; enum mlx5_ib_devx_obj_create_attrs { MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, }; enum mlx5_ib_devx_query_uar_attrs { MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX, }; enum mlx5_ib_devx_obj_destroy_attrs { MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), }; enum mlx5_ib_devx_obj_modify_attrs { MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT, }; enum mlx5_ib_devx_obj_query_attrs { MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, }; enum mlx5_ib_devx_query_eqn_attrs { MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN, }; enum mlx5_ib_devx_obj_methods { MLX5_IB_METHOD_DEVX_OBJ_CREATE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_METHOD_DEVX_OBJ_DESTROY, MLX5_IB_METHOD_DEVX_OBJ_MODIFY, MLX5_IB_METHOD_DEVX_OBJ_QUERY, }; enum mlx5_ib_devx_umem_reg_attrs { MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN, MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, }; enum mlx5_ib_devx_umem_dereg_attrs { MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), }; enum mlx5_ib_devx_umem_methods { MLX5_IB_METHOD_DEVX_UMEM_REG = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_METHOD_DEVX_UMEM_DEREG, }; enum mlx5_ib_objects { MLX5_IB_OBJECT_DEVX = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_OBJECT_DEVX_OBJ, MLX5_IB_OBJECT_DEVX_UMEM, MLX5_IB_OBJECT_FLOW_MATCHER, }; enum mlx5_ib_flow_matcher_create_attrs { MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA, }; enum mlx5_ib_flow_matcher_destroy_attrs { MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), }; enum mlx5_ib_flow_matcher_methods { MLX5_IB_METHOD_FLOW_MATCHER_CREATE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_METHOD_FLOW_MATCHER_DESTROY, }; #define MLX5_IB_DW_MATCH_PARAM 0x80 struct mlx5_ib_match_params { __u32 match_params[MLX5_IB_DW_MATCH_PARAM]; }; enum mlx5_ib_flow_type { MLX5_IB_FLOW_TYPE_NORMAL, MLX5_IB_FLOW_TYPE_SNIFFER, MLX5_IB_FLOW_TYPE_ALL_DEFAULT, MLX5_IB_FLOW_TYPE_MC_DEFAULT, }; enum mlx5_ib_create_flow_attrs { MLX5_IB_ATTR_CREATE_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE, MLX5_IB_ATTR_CREATE_FLOW_DEST_QP, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX, MLX5_IB_ATTR_CREATE_FLOW_MATCHER, }; enum mlx5_ib_destoy_flow_attrs { MLX5_IB_ATTR_DESTROY_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT), }; enum mlx5_ib_flow_methods { MLX5_IB_METHOD_CREATE_FLOW = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_METHOD_DESTROY_FLOW, }; #endif PKW enum { IB_PATH_GMP = 1, IB_PATH_PRIMARY = (1<<1), IB_PATH_ALTERNATE = (1<<2), IB_PATH_OUTBOUND = (1<<3), IB_PATH_INBOUND = (1<<4), IB_PATH_INBOUND_REVERSE = (1<<5), IB_PATH_BIDIRECTIONAL = IB_PATH_OUTBOUND | IB_PATH_INBOUND_REVERSE }; struct ib_path_rec_data { __u32 flags; __u32 reserved; __u32 path_rec[16]; }; struct ib_user_path_rec { __u8 dgid[16]; __u8 sgid[16]; __be16 dlid; __be16 slid; __u32 raw_traffic; __be32 flow_label; __u32 reversible; __u32 mtu; __be16 pkey; __u8 hop_limit; __u8 traffic_class; __u8 numb_path; __u8 sl; __u8 mtu_selector; __u8 rate_selector; __u8 rate; __u8 packet_life_time_selector; __u8 packet_life_time; __u8 preference; }; #endif /* IB_USER_SA_H */ PKW /* * This structure is passed to the driver to tell it where * user code buffers are, sizes, etc. The offsets and sizes of the * fields must remain unchanged, for binary compatibility. It can * be extended, if userversion is changed so user code can tell, if needed */ struct hfi1_user_info { /* * version of user software, to detect compatibility issues. * Should be set to HFI1_USER_SWVERSION. */ __u32 userversion; __u32 pad; /* * If two or more processes wish to share a context, each process * must set the subcontext_cnt and subcontext_id to the same * values. The only restriction on the subcontext_id is that * it be unique for a given node. */ __u16 subctxt_cnt; __u16 subctxt_id; /* 128bit UUID passed in by PSM. */ __u8 uuid[16]; }; struct hfi1_ctxt_info { __aligned_u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ __u32 rcvegr_size; /* size of each eager buffer */ __u16 num_active; /* number of active units */ __u16 unit; /* unit (chip) assigned to caller */ __u16 ctxt; /* ctxt on unit assigned to caller */ __u16 subctxt; /* subctxt on unit assigned to caller */ __u16 rcvtids; /* number of Rcv TIDs for this context */ __u16 credits; /* number of PIO credits for this context */ __u16 numa_node; /* NUMA node of the assigned device */ __u16 rec_cpu; /* cpu # for affinity (0xffff if none) */ __u16 send_ctxt; /* send context in use by this user context */ __u16 egrtids; /* number of RcvArray entries for Eager Rcvs */ __u16 rcvhdrq_cnt; /* number of RcvHdrQ entries */ __u16 rcvhdrq_entsize; /* size (in bytes) for each RcvHdrQ entry */ __u16 sdma_ring_size; /* number of entries in SDMA request ring */ }; struct hfi1_tid_info { /* virtual address of first page in transfer */ __aligned_u64 vaddr; /* pointer to tid array. this array is big enough */ __aligned_u64 tidlist; /* number of tids programmed by this request */ __u32 tidcnt; /* length of transfer buffer programmed by this request */ __u32 length; }; /* * This structure is returned by the driver immediately after * open to get implementation-specific info, and info specific to this * instance. * * This struct must have explicit pad fields where type sizes * may result in different alignments between 32 and 64 bit * programs, since the 64 bit * bit kernel requires the user code * to have matching offsets */ struct hfi1_base_info { /* version of hardware, for feature checking. */ __u32 hw_version; /* version of software, for feature checking. */ __u32 sw_version; /* Job key */ __u16 jkey; __u16 padding1; /* * The special QP (queue pair) value that identifies PSM * protocol packet from standard IB packets. */ __u32 bthqp; /* PIO credit return address, */ __aligned_u64 sc_credits_addr; /* * Base address of write-only pio buffers for this process. * Each buffer has sendpio_credits*64 bytes. */ __aligned_u64 pio_bufbase_sop; /* * Base address of write-only pio buffers for this process. * Each buffer has sendpio_credits*64 bytes. */ __aligned_u64 pio_bufbase; /* address where receive buffer queue is mapped into */ __aligned_u64 rcvhdr_bufbase; /* base address of Eager receive buffers. */ __aligned_u64 rcvegr_bufbase; /* base address of SDMA completion ring */ __aligned_u64 sdma_comp_bufbase; /* * User register base for init code, not to be used directly by * protocol or applications. Always maps real chip register space. * the register addresses are: * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail, * ur_rcvtidflow */ __aligned_u64 user_regbase; /* notification events */ __aligned_u64 events_bufbase; /* status page */ __aligned_u64 status_bufbase; /* rcvhdrtail update */ __aligned_u64 rcvhdrtail_base; /* * shared memory pages for subctxts if ctxt is shared; these cover * all the processes in the group sharing a single context. * all have enough space for the num_subcontexts value on this job. */ __aligned_u64 subctxt_uregbase; __aligned_u64 subctxt_rcvegrbuf; __aligned_u64 subctxt_rcvhdrbuf; }; #endif /* _LINIUX__HFI1_IOCTL_H */ PKW #include /* * This version number is given to the driver by the user code during * initialization in the spu_userversion field of hfi1_user_info, so * the driver can check for compatibility with user code. * * The major version changes when data structures change in an incompatible * way. The driver must be the same for initialization to succeed. */ #define HFI1_USER_SWMAJOR 6 /* * Minor version differences are always compatible * a within a major version, however if user software is larger * than driver software, some new features and/or structure fields * may not be implemented; the user code must deal with this if it * cares, or it must abort after initialization reports the difference. */ #define HFI1_USER_SWMINOR 3 /* * We will encode the major/minor inside a single 32bit version number. */ #define HFI1_SWMAJOR_SHIFT 16 /* * Set of HW and driver capability/feature bits. * These bit values are used to configure enabled/disabled HW and * driver features. The same set of bits are communicated to user * space. */ #define HFI1_CAP_DMA_RTAIL (1UL << 0) /* Use DMA'ed RTail value */ #define HFI1_CAP_SDMA (1UL << 1) /* Enable SDMA support */ #define HFI1_CAP_SDMA_AHG (1UL << 2) /* Enable SDMA AHG support */ #define HFI1_CAP_EXTENDED_PSN (1UL << 3) /* Enable Extended PSN support */ #define HFI1_CAP_HDRSUPP (1UL << 4) /* Enable Header Suppression */ /* 1UL << 5 unused */ #define HFI1_CAP_USE_SDMA_HEAD (1UL << 6) /* DMA Hdr Q tail vs. use CSR */ #define HFI1_CAP_MULTI_PKT_EGR (1UL << 7) /* Enable multi-packet Egr buffs*/ #define HFI1_CAP_NODROP_RHQ_FULL (1UL << 8) /* Don't drop on Hdr Q full */ #define HFI1_CAP_NODROP_EGR_FULL (1UL << 9) /* Don't drop on EGR buffs full */ #define HFI1_CAP_TID_UNMAP (1UL << 10) /* Disable Expected TID caching */ #define HFI1_CAP_PRINT_UNIMPL (1UL << 11) /* Show for unimplemented feats */ #define HFI1_CAP_ALLOW_PERM_JKEY (1UL << 12) /* Allow use of permissive JKEY */ #define HFI1_CAP_NO_INTEGRITY (1UL << 13) /* Enable ctxt integrity checks */ #define HFI1_CAP_PKEY_CHECK (1UL << 14) /* Enable ctxt PKey checking */ #define HFI1_CAP_STATIC_RATE_CTRL (1UL << 15) /* Allow PBC.StaticRateControl */ /* 1UL << 16 unused */ #define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */ #define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */ #define HFI1_RCVHDR_ENTSIZE_2 (1UL << 0) #define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1) #define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2) #define _HFI1_EVENT_FROZEN_BIT 0 #define _HFI1_EVENT_LINKDOWN_BIT 1 #define _HFI1_EVENT_LID_CHANGE_BIT 2 #define _HFI1_EVENT_LMC_CHANGE_BIT 3 #define _HFI1_EVENT_SL2VL_CHANGE_BIT 4 #define _HFI1_EVENT_TID_MMU_NOTIFY_BIT 5 #define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_TID_MMU_NOTIFY_BIT #define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT) #define HFI1_EVENT_LINKDOWN (1UL << _HFI1_EVENT_LINKDOWN_BIT) #define HFI1_EVENT_LID_CHANGE (1UL << _HFI1_EVENT_LID_CHANGE_BIT) #define HFI1_EVENT_LMC_CHANGE (1UL << _HFI1_EVENT_LMC_CHANGE_BIT) #define HFI1_EVENT_SL2VL_CHANGE (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT) #define HFI1_EVENT_TID_MMU_NOTIFY (1UL << _HFI1_EVENT_TID_MMU_NOTIFY_BIT) /* * These are the status bits readable (in ASCII form, 64bit value) * from the "status" sysfs file. For binary compatibility, values * must remain as is; removed states can be reused for different * purposes. */ #define HFI1_STATUS_INITTED 0x1 /* basic initialization done */ /* Chip has been found and initialized */ #define HFI1_STATUS_CHIP_PRESENT 0x20 /* IB link is at ACTIVE, usable for data traffic */ #define HFI1_STATUS_IB_READY 0x40 /* link is configured, LID, MTU, etc. have been set */ #define HFI1_STATUS_IB_CONF 0x80 /* A Fatal hardware error has occurred. */ #define HFI1_STATUS_HWERROR 0x200 /* * Number of supported shared contexts. * This is the maximum number of software contexts that can share * a hardware send/receive context. */ #define HFI1_MAX_SHARED_CTXTS 8 /* * Poll types */ #define HFI1_POLL_TYPE_ANYRCV 0x0 #define HFI1_POLL_TYPE_URGENT 0x1 enum hfi1_sdma_comp_state { FREE = 0, QUEUED, COMPLETE, ERROR }; /* * SDMA completion ring entry */ struct hfi1_sdma_comp_entry { __u32 status; __u32 errcode; }; /* * Device status and notifications from driver to user-space. */ struct hfi1_status { __aligned_u64 dev; /* device/hw status bits */ __aligned_u64 port; /* port state and status bits */ char freezemsg[0]; }; enum sdma_req_opcode { EXPECTED = 0, EAGER }; #define HFI1_SDMA_REQ_VERSION_MASK 0xF #define HFI1_SDMA_REQ_VERSION_SHIFT 0x0 #define HFI1_SDMA_REQ_OPCODE_MASK 0xF #define HFI1_SDMA_REQ_OPCODE_SHIFT 0x4 #define HFI1_SDMA_REQ_IOVCNT_MASK 0xFF #define HFI1_SDMA_REQ_IOVCNT_SHIFT 0x8 struct sdma_req_info { /* * bits 0-3 - version (currently unused) * bits 4-7 - opcode (enum sdma_req_opcode) * bits 8-15 - io vector count */ __u16 ctrl; /* * Number of fragments contained in this request. * User-space has already computed how many * fragment-sized packet the user buffer will be * split into. */ __u16 npkts; /* * Size of each fragment the user buffer will be * split into. */ __u16 fragsize; /* * Index of the slot in the SDMA completion ring * this request should be using. User-space is * in charge of managing its own ring. */ __u16 comp_idx; } __attribute__((__packed__)); /* * SW KDETH header. * swdata is SW defined portion. */ struct hfi1_kdeth_header { __le32 ver_tid_offset; __le16 jkey; __le16 hcrc; __le32 swdata[7]; } __attribute__((__packed__)); /* * Structure describing the headers that User space uses. The * structure above is a subset of this one. */ struct hfi1_pkt_header { __le16 pbc[4]; __be16 lrh[4]; __be32 bth[3]; struct hfi1_kdeth_header kdeth; } __attribute__((__packed__)); /* * The list of usermode accessible registers. */ enum hfi1_ureg { /* (RO) DMA RcvHdr to be used next. */ ur_rcvhdrtail = 0, /* (RW) RcvHdr entry to be processed next by host. */ ur_rcvhdrhead = 1, /* (RO) Index of next Eager index to use. */ ur_rcvegrindextail = 2, /* (RW) Eager TID to be processed next */ ur_rcvegrindexhead = 3, /* (RO) Receive Eager Offset Tail */ ur_rcvegroffsettail = 4, /* For internal use only; max register number. */ ur_maxreg, /* (RW) Receive TID flow table */ ur_rcvtidflowtable = 256 }; #endif /* _LINIUX__HFI1_USER_H */ PKW #include #include #include #include #define RDMA_USER_CM_ABI_VERSION 4 #define RDMA_MAX_PRIVATE_DATA 256 enum { RDMA_USER_CM_CMD_CREATE_ID, RDMA_USER_CM_CMD_DESTROY_ID, RDMA_USER_CM_CMD_BIND_IP, RDMA_USER_CM_CMD_RESOLVE_IP, RDMA_USER_CM_CMD_RESOLVE_ROUTE, RDMA_USER_CM_CMD_QUERY_ROUTE, RDMA_USER_CM_CMD_CONNECT, RDMA_USER_CM_CMD_LISTEN, RDMA_USER_CM_CMD_ACCEPT, RDMA_USER_CM_CMD_REJECT, RDMA_USER_CM_CMD_DISCONNECT, RDMA_USER_CM_CMD_INIT_QP_ATTR, RDMA_USER_CM_CMD_GET_EVENT, RDMA_USER_CM_CMD_GET_OPTION, RDMA_USER_CM_CMD_SET_OPTION, RDMA_USER_CM_CMD_NOTIFY, RDMA_USER_CM_CMD_JOIN_IP_MCAST, RDMA_USER_CM_CMD_LEAVE_MCAST, RDMA_USER_CM_CMD_MIGRATE_ID, RDMA_USER_CM_CMD_QUERY, RDMA_USER_CM_CMD_BIND, RDMA_USER_CM_CMD_RESOLVE_ADDR, RDMA_USER_CM_CMD_JOIN_MCAST }; /* See IBTA Annex A11, servies ID bytes 4 & 5 */ enum rdma_ucm_port_space { RDMA_PS_IPOIB = 0x0002, RDMA_PS_IB = 0x013F, RDMA_PS_TCP = 0x0106, RDMA_PS_UDP = 0x0111, }; /* * command ABI structures. */ struct rdma_ucm_cmd_hdr { __u32 cmd; __u16 in; __u16 out; }; struct rdma_ucm_create_id { __aligned_u64 uid; __aligned_u64 response; __u16 ps; /* use enum rdma_ucm_port_space */ __u8 qp_type; __u8 reserved[5]; }; struct rdma_ucm_create_id_resp { __u32 id; }; struct rdma_ucm_destroy_id { __aligned_u64 response; __u32 id; __u32 reserved; }; struct rdma_ucm_destroy_id_resp { __u32 events_reported; }; struct rdma_ucm_bind_ip { __aligned_u64 response; struct sockaddr_in6 addr; __u32 id; }; struct rdma_ucm_bind { __u32 id; __u16 addr_size; __u16 reserved; struct __kernel_sockaddr_storage addr; }; struct rdma_ucm_resolve_ip { struct sockaddr_in6 src_addr; struct sockaddr_in6 dst_addr; __u32 id; __u32 timeout_ms; }; struct rdma_ucm_resolve_addr { __u32 id; __u32 timeout_ms; __u16 src_size; __u16 dst_size; __u32 reserved; struct __kernel_sockaddr_storage src_addr; struct __kernel_sockaddr_storage dst_addr; }; struct rdma_ucm_resolve_route { __u32 id; __u32 timeout_ms; }; enum { RDMA_USER_CM_QUERY_ADDR, RDMA_USER_CM_QUERY_PATH, RDMA_USER_CM_QUERY_GID }; struct rdma_ucm_query { __aligned_u64 response; __u32 id; __u32 option; }; struct rdma_ucm_query_route_resp { __aligned_u64 node_guid; struct ib_user_path_rec ib_route[2]; struct sockaddr_in6 src_addr; struct sockaddr_in6 dst_addr; __u32 num_paths; __u8 port_num; __u8 reserved[3]; }; struct rdma_ucm_query_addr_resp { __aligned_u64 node_guid; __u8 port_num; __u8 reserved; __u16 pkey; __u16 src_size; __u16 dst_size; struct __kernel_sockaddr_storage src_addr; struct __kernel_sockaddr_storage dst_addr; }; struct rdma_ucm_query_path_resp { __u32 num_paths; __u32 reserved; struct ib_path_rec_data path_data[0]; }; struct rdma_ucm_conn_param { __u32 qp_num; __u32 qkey; __u8 private_data[RDMA_MAX_PRIVATE_DATA]; __u8 private_data_len; __u8 srq; __u8 responder_resources; __u8 initiator_depth; __u8 flow_control; __u8 retry_count; __u8 rnr_retry_count; __u8 valid; }; struct rdma_ucm_ud_param { __u32 qp_num; __u32 qkey; struct ib_uverbs_ah_attr ah_attr; __u8 private_data[RDMA_MAX_PRIVATE_DATA]; __u8 private_data_len; __u8 reserved[7]; }; struct rdma_ucm_connect { struct rdma_ucm_conn_param conn_param; __u32 id; __u32 reserved; }; struct rdma_ucm_listen { __u32 id; __u32 backlog; }; struct rdma_ucm_accept { __aligned_u64 uid; struct rdma_ucm_conn_param conn_param; __u32 id; __u32 reserved; }; struct rdma_ucm_reject { __u32 id; __u8 private_data_len; __u8 reserved[3]; __u8 private_data[RDMA_MAX_PRIVATE_DATA]; }; struct rdma_ucm_disconnect { __u32 id; }; struct rdma_ucm_init_qp_attr { __aligned_u64 response; __u32 id; __u32 qp_state; }; struct rdma_ucm_notify { __u32 id; __u32 event; }; struct rdma_ucm_join_ip_mcast { __aligned_u64 response; /* rdma_ucm_create_id_resp */ __aligned_u64 uid; struct sockaddr_in6 addr; __u32 id; }; /* Multicast join flags */ enum { RDMA_MC_JOIN_FLAG_FULLMEMBER, RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER, RDMA_MC_JOIN_FLAG_RESERVED, }; struct rdma_ucm_join_mcast { __aligned_u64 response; /* rdma_ucma_create_id_resp */ __aligned_u64 uid; __u32 id; __u16 addr_size; __u16 join_flags; struct __kernel_sockaddr_storage addr; }; struct rdma_ucm_get_event { __aligned_u64 response; }; struct rdma_ucm_event_resp { __aligned_u64 uid; __u32 id; __u32 event; __u32 status; /* * NOTE: This union is not aligned to 8 bytes so none of the union * members may contain a u64 or anything with higher alignment than 4. */ union { struct rdma_ucm_conn_param conn; struct rdma_ucm_ud_param ud; } param; __u32 reserved; }; /* Option levels */ enum { RDMA_OPTION_ID = 0, RDMA_OPTION_IB = 1 }; /* Option details */ enum { RDMA_OPTION_ID_TOS = 0, RDMA_OPTION_ID_REUSEADDR = 1, RDMA_OPTION_ID_AFONLY = 2, RDMA_OPTION_IB_PATH = 1 }; struct rdma_ucm_set_option { __aligned_u64 optval; __u32 id; __u32 level; __u32 optname; __u32 optlen; }; struct rdma_ucm_migrate_id { __aligned_u64 response; __u32 id; __u32 fd; }; struct rdma_ucm_migrate_resp { __u32 events_reported; }; #endif /* RDMA_USER_CM_H */ PKW! Bmlx4-abi.hnu[PKW