blob: 0770dcc74add45e4b3efd1386477de892329d0bf [file] [log] [blame]
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
10#include <rdma/mlx5_user_ioctl_cmds.h>
Yishai Hadasa124edb2019-01-22 08:29:57 +020011#include <rdma/mlx5_user_ioctl_verbs.h>
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030012#include <rdma/ib_umem.h>
Yishai Hadas34613eb2018-11-26 08:28:35 +020013#include <rdma/uverbs_std_types.h>
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030014#include <linux/mlx5/driver.h>
15#include <linux/mlx5/fs.h>
16#include "mlx5_ib.h"
17
Yishai Hadas8aa8c952018-06-17 13:00:00 +030018#define UVERBS_MODULE_NAME mlx5_ib
19#include <rdma/uverbs_named_ioctl.h>
20
Yishai Hadas534fd7a2019-01-13 16:01:17 +020021enum devx_obj_flags {
22 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
Yishai Hadasc5ae1952019-03-06 19:21:42 +020023 DEVX_OBJ_FLAGS_DCT = 1 << 1,
Yishai Hadas534fd7a2019-01-13 16:01:17 +020024};
25
Yishai Hadasa124edb2019-01-22 08:29:57 +020026struct devx_async_data {
27 struct mlx5_ib_dev *mdev;
28 struct list_head list;
29 struct ib_uobject *fd_uobj;
30 struct mlx5_async_work cb_work;
31 u16 cmd_out_len;
32 /* must be last field in this structure */
33 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
34};
35
Yishai Hadas7efce362018-06-17 13:00:01 +030036#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
37struct devx_obj {
38 struct mlx5_core_dev *mdev;
Yishai Hadas2351776e2018-10-07 12:06:34 +030039 u64 obj_id;
Yishai Hadas7efce362018-06-17 13:00:01 +030040 u32 dinlen; /* destroy inbox length */
41 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
Yishai Hadas534fd7a2019-01-13 16:01:17 +020042 u32 flags;
Yishai Hadasc5ae1952019-03-06 19:21:42 +020043 union {
44 struct mlx5_ib_devx_mr devx_mr;
45 struct mlx5_core_dct core_dct;
46 };
Yishai Hadas7efce362018-06-17 13:00:01 +030047};
48
Yishai Hadasaeae9452018-06-17 13:00:04 +030049struct devx_umem {
50 struct mlx5_core_dev *mdev;
51 struct ib_umem *umem;
52 u32 page_offset;
53 int page_shift;
54 int ncont;
55 u32 dinlen;
56 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
57};
58
59struct devx_umem_reg_cmd {
60 void *in;
61 u32 inlen;
62 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
63};
64
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +020065static struct mlx5_ib_ucontext *
66devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
Yishai Hadas8aa8c952018-06-17 13:00:00 +030067{
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +020068 return to_mucontext(ib_uverbs_get_ucontext(attrs));
Yishai Hadas8aa8c952018-06-17 13:00:00 +030069}
70
Yishai Hadasfb981532018-11-26 08:28:36 +020071int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030072{
73 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
74 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
Yishai Hadas6e3722b2018-12-19 16:28:15 +020075 void *uctx;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030076 int err;
Yishai Hadas76dc5a82018-09-20 21:45:19 +030077 u16 uid;
Yishai Hadasfb981532018-11-26 08:28:36 +020078 u32 cap = 0;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030079
Yishai Hadas6e3722b2018-12-19 16:28:15 +020080 /* 0 means not supported */
81 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030082 return -EINVAL;
83
Yishai Hadas6e3722b2018-12-19 16:28:15 +020084 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
Yishai Hadasfb981532018-11-26 08:28:36 +020085 if (is_user && capable(CAP_NET_RAW) &&
86 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
87 cap |= MLX5_UCTX_CAP_RAW_TX;
88
Yishai Hadas6e3722b2018-12-19 16:28:15 +020089 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
Yishai Hadasfb981532018-11-26 08:28:36 +020090 MLX5_SET(uctx, uctx, cap, cap);
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030091
92 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
93 if (err)
94 return err;
95
Yishai Hadas76dc5a82018-09-20 21:45:19 +030096 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
97 return uid;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030098}
99
Yishai Hadas76dc5a82018-09-20 21:45:19 +0300100void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
Yishai Hadasa8b92ca2018-06-17 12:59:57 +0300101{
Yishai Hadas6e3722b2018-12-19 16:28:15 +0200102 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
Yishai Hadasa8b92ca2018-06-17 12:59:57 +0300103 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
104
Yishai Hadas6e3722b2018-12-19 16:28:15 +0200105 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
106 MLX5_SET(destroy_uctx_in, in, uid, uid);
Yishai Hadasa8b92ca2018-06-17 12:59:57 +0300107
108 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
109}
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300110
Yishai Hadas32269442018-07-23 15:25:09 +0300111bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
112{
113 struct devx_obj *devx_obj = obj;
114 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
115
116 switch (opcode) {
117 case MLX5_CMD_OP_DESTROY_TIR:
118 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
119 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
120 obj_id);
121 return true;
122
123 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
124 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
125 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
126 table_id);
127 return true;
128 default:
129 return false;
130 }
131}
132
Mark Blochbfc5d832018-11-20 20:31:08 +0200133bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
134{
135 struct devx_obj *devx_obj = obj;
136 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
137
138 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
139 *counter_id = MLX5_GET(dealloc_flow_counter_in,
140 devx_obj->dinbox,
141 flow_counter_id);
142 return true;
143 }
144
145 return false;
146}
147
Yishai Hadas2351776e2018-10-07 12:06:34 +0300148/*
149 * As the obj_id in the firmware is not globally unique the object type
150 * must be considered upon checking for a valid object id.
151 * For that the opcode of the creator command is encoded as part of the obj_id.
152 */
153static u64 get_enc_obj_id(u16 opcode, u32 obj_id)
154{
155 return ((u64)opcode << 32) | obj_id;
156}
157
Yishai Hadas34613eb2018-11-26 08:28:35 +0200158static u64 devx_get_obj_id(const void *in)
Yishai Hadase662e142018-06-17 13:00:02 +0300159{
160 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
Yishai Hadas2351776e2018-10-07 12:06:34 +0300161 u64 obj_id;
Yishai Hadase662e142018-06-17 13:00:02 +0300162
163 switch (opcode) {
164 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
165 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300166 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT,
167 MLX5_GET(general_obj_in_cmd_hdr, in,
168 obj_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300169 break;
170 case MLX5_CMD_OP_QUERY_MKEY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300171 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
172 MLX5_GET(query_mkey_in, in,
173 mkey_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300174 break;
175 case MLX5_CMD_OP_QUERY_CQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300176 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
177 MLX5_GET(query_cq_in, in, cqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300178 break;
179 case MLX5_CMD_OP_MODIFY_CQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300180 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
181 MLX5_GET(modify_cq_in, in, cqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300182 break;
183 case MLX5_CMD_OP_QUERY_SQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300184 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
185 MLX5_GET(query_sq_in, in, sqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300186 break;
187 case MLX5_CMD_OP_MODIFY_SQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300188 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
189 MLX5_GET(modify_sq_in, in, sqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300190 break;
191 case MLX5_CMD_OP_QUERY_RQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300192 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
193 MLX5_GET(query_rq_in, in, rqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300194 break;
195 case MLX5_CMD_OP_MODIFY_RQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300196 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
197 MLX5_GET(modify_rq_in, in, rqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300198 break;
199 case MLX5_CMD_OP_QUERY_RMP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300200 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
201 MLX5_GET(query_rmp_in, in, rmpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300202 break;
203 case MLX5_CMD_OP_MODIFY_RMP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300204 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
205 MLX5_GET(modify_rmp_in, in, rmpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300206 break;
207 case MLX5_CMD_OP_QUERY_RQT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300208 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
209 MLX5_GET(query_rqt_in, in, rqtn));
Yishai Hadase662e142018-06-17 13:00:02 +0300210 break;
211 case MLX5_CMD_OP_MODIFY_RQT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300212 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
213 MLX5_GET(modify_rqt_in, in, rqtn));
Yishai Hadase662e142018-06-17 13:00:02 +0300214 break;
215 case MLX5_CMD_OP_QUERY_TIR:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300216 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
217 MLX5_GET(query_tir_in, in, tirn));
Yishai Hadase662e142018-06-17 13:00:02 +0300218 break;
219 case MLX5_CMD_OP_MODIFY_TIR:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300220 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
221 MLX5_GET(modify_tir_in, in, tirn));
Yishai Hadase662e142018-06-17 13:00:02 +0300222 break;
223 case MLX5_CMD_OP_QUERY_TIS:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300224 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
225 MLX5_GET(query_tis_in, in, tisn));
Yishai Hadase662e142018-06-17 13:00:02 +0300226 break;
227 case MLX5_CMD_OP_MODIFY_TIS:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300228 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
229 MLX5_GET(modify_tis_in, in, tisn));
Yishai Hadase662e142018-06-17 13:00:02 +0300230 break;
231 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300232 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
233 MLX5_GET(query_flow_table_in, in,
234 table_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300235 break;
236 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300237 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
238 MLX5_GET(modify_flow_table_in, in,
239 table_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300240 break;
241 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300242 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
243 MLX5_GET(query_flow_group_in, in,
244 group_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300245 break;
246 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300247 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
248 MLX5_GET(query_fte_in, in,
249 flow_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300250 break;
251 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300252 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
253 MLX5_GET(set_fte_in, in, flow_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300254 break;
255 case MLX5_CMD_OP_QUERY_Q_COUNTER:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300256 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
257 MLX5_GET(query_q_counter_in, in,
258 counter_set_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300259 break;
260 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300261 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
262 MLX5_GET(query_flow_counter_in, in,
263 flow_counter_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300264 break;
265 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300266 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
267 MLX5_GET(general_obj_in_cmd_hdr, in,
268 obj_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300269 break;
270 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300271 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
272 MLX5_GET(query_scheduling_element_in,
273 in, scheduling_element_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300274 break;
275 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300276 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
277 MLX5_GET(modify_scheduling_element_in,
278 in, scheduling_element_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300279 break;
280 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300281 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
282 MLX5_GET(add_vxlan_udp_dport_in, in,
283 vxlan_udp_port));
Yishai Hadase662e142018-06-17 13:00:02 +0300284 break;
285 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300286 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
287 MLX5_GET(query_l2_table_entry_in, in,
288 table_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300289 break;
290 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300291 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
292 MLX5_GET(set_l2_table_entry_in, in,
293 table_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300294 break;
295 case MLX5_CMD_OP_QUERY_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300296 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
297 MLX5_GET(query_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300298 break;
299 case MLX5_CMD_OP_RST2INIT_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300300 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
301 MLX5_GET(rst2init_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300302 break;
303 case MLX5_CMD_OP_INIT2RTR_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300304 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
305 MLX5_GET(init2rtr_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300306 break;
307 case MLX5_CMD_OP_RTR2RTS_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300308 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
309 MLX5_GET(rtr2rts_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300310 break;
311 case MLX5_CMD_OP_RTS2RTS_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300312 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
313 MLX5_GET(rts2rts_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300314 break;
315 case MLX5_CMD_OP_SQERR2RTS_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300316 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
317 MLX5_GET(sqerr2rts_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300318 break;
319 case MLX5_CMD_OP_2ERR_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300320 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
321 MLX5_GET(qp_2err_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300322 break;
323 case MLX5_CMD_OP_2RST_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300324 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
325 MLX5_GET(qp_2rst_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300326 break;
327 case MLX5_CMD_OP_QUERY_DCT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300328 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
329 MLX5_GET(query_dct_in, in, dctn));
Yishai Hadase662e142018-06-17 13:00:02 +0300330 break;
331 case MLX5_CMD_OP_QUERY_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200332 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
333 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300334 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
335 MLX5_GET(query_xrq_in, in, xrqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300336 break;
337 case MLX5_CMD_OP_QUERY_XRC_SRQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300338 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
339 MLX5_GET(query_xrc_srq_in, in,
340 xrc_srqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300341 break;
342 case MLX5_CMD_OP_ARM_XRC_SRQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300343 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
344 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300345 break;
346 case MLX5_CMD_OP_QUERY_SRQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300347 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
348 MLX5_GET(query_srq_in, in, srqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300349 break;
350 case MLX5_CMD_OP_ARM_RQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300351 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
352 MLX5_GET(arm_rq_in, in, srq_number));
Yishai Hadase662e142018-06-17 13:00:02 +0300353 break;
Yishai Hadase662e142018-06-17 13:00:02 +0300354 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300355 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
356 MLX5_GET(drain_dct_in, in, dctn));
Yishai Hadase662e142018-06-17 13:00:02 +0300357 break;
358 case MLX5_CMD_OP_ARM_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200359 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300360 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
361 MLX5_GET(arm_xrq_in, in, xrqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300362 break;
Yishai Hadas719598c2018-11-26 08:28:37 +0200363 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
364 obj_id = get_enc_obj_id
365 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
366 MLX5_GET(query_packet_reformat_context_in,
367 in, packet_reformat_id));
368 break;
Yishai Hadase662e142018-06-17 13:00:02 +0300369 default:
Yishai Hadas34613eb2018-11-26 08:28:35 +0200370 obj_id = 0;
Yishai Hadase662e142018-06-17 13:00:02 +0300371 }
372
Yishai Hadas34613eb2018-11-26 08:28:35 +0200373 return obj_id;
374}
Yishai Hadase662e142018-06-17 13:00:02 +0300375
Yishai Hadas34613eb2018-11-26 08:28:35 +0200376static bool devx_is_valid_obj_id(struct ib_uobject *uobj, const void *in)
377{
378 u64 obj_id = devx_get_obj_id(in);
379
380 if (!obj_id)
381 return false;
382
383 switch (uobj_get_object_id(uobj)) {
384 case UVERBS_OBJECT_CQ:
385 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
386 to_mcq(uobj->object)->mcq.cqn) ==
387 obj_id;
388
389 case UVERBS_OBJECT_SRQ:
390 {
391 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
392 struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
393 u16 opcode;
394
395 switch (srq->common.res) {
396 case MLX5_RES_XSRQ:
397 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
398 break;
399 case MLX5_RES_XRQ:
400 opcode = MLX5_CMD_OP_CREATE_XRQ;
401 break;
402 default:
403 if (!dev->mdev->issi)
404 opcode = MLX5_CMD_OP_CREATE_SRQ;
405 else
406 opcode = MLX5_CMD_OP_CREATE_RMP;
407 }
408
409 return get_enc_obj_id(opcode,
410 to_msrq(uobj->object)->msrq.srqn) ==
411 obj_id;
412 }
413
414 case UVERBS_OBJECT_QP:
415 {
416 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
417 enum ib_qp_type qp_type = qp->ibqp.qp_type;
418
419 if (qp_type == IB_QPT_RAW_PACKET ||
420 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
421 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
422 &qp->raw_packet_qp;
423 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
424 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
425
426 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
427 rq->base.mqp.qpn) == obj_id ||
428 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
429 sq->base.mqp.qpn) == obj_id ||
430 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
431 rq->tirn) == obj_id ||
432 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
433 sq->tisn) == obj_id);
434 }
435
436 if (qp_type == MLX5_IB_QPT_DCT)
437 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
438 qp->dct.mdct.mqp.qpn) == obj_id;
439
440 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
441 qp->ibqp.qp_num) == obj_id;
442 }
443
444 case UVERBS_OBJECT_WQ:
445 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
446 to_mrwq(uobj->object)->core_qp.qpn) ==
447 obj_id;
448
449 case UVERBS_OBJECT_RWQ_IND_TBL:
450 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
451 to_mrwq_ind_table(uobj->object)->rqtn) ==
452 obj_id;
453
454 case MLX5_IB_OBJECT_DEVX_OBJ:
455 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
456
Yishai Hadase662e142018-06-17 13:00:02 +0300457 default:
458 return false;
459 }
Yishai Hadase662e142018-06-17 13:00:02 +0300460}
461
Yishai Hadasba1a0572018-09-20 21:39:33 +0300462static void devx_set_umem_valid(const void *in)
463{
464 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
465
466 switch (opcode) {
467 case MLX5_CMD_OP_CREATE_MKEY:
468 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
469 break;
470 case MLX5_CMD_OP_CREATE_CQ:
471 {
472 void *cqc;
473
474 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
475 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
476 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
477 break;
478 }
479 case MLX5_CMD_OP_CREATE_QP:
480 {
481 void *qpc;
482
483 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
484 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
485 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
486 break;
487 }
488
489 case MLX5_CMD_OP_CREATE_RQ:
490 {
491 void *rqc, *wq;
492
493 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
494 wq = MLX5_ADDR_OF(rqc, rqc, wq);
495 MLX5_SET(wq, wq, dbr_umem_valid, 1);
496 MLX5_SET(wq, wq, wq_umem_valid, 1);
497 break;
498 }
499
500 case MLX5_CMD_OP_CREATE_SQ:
501 {
502 void *sqc, *wq;
503
504 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
505 wq = MLX5_ADDR_OF(sqc, sqc, wq);
506 MLX5_SET(wq, wq, dbr_umem_valid, 1);
507 MLX5_SET(wq, wq, wq_umem_valid, 1);
508 break;
509 }
510
511 case MLX5_CMD_OP_MODIFY_CQ:
512 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
513 break;
514
515 case MLX5_CMD_OP_CREATE_RMP:
516 {
517 void *rmpc, *wq;
518
519 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
520 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
521 MLX5_SET(wq, wq, dbr_umem_valid, 1);
522 MLX5_SET(wq, wq, wq_umem_valid, 1);
523 break;
524 }
525
526 case MLX5_CMD_OP_CREATE_XRQ:
527 {
528 void *xrqc, *wq;
529
530 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
531 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
532 MLX5_SET(wq, wq, dbr_umem_valid, 1);
533 MLX5_SET(wq, wq, wq_umem_valid, 1);
534 break;
535 }
536
537 case MLX5_CMD_OP_CREATE_XRC_SRQ:
538 {
539 void *xrc_srqc;
540
541 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
542 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
543 xrc_srq_context_entry);
544 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
545 break;
546 }
547
548 default:
549 return;
550 }
551}
552
Yishai Hadas2351776e2018-10-07 12:06:34 +0300553static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
Yishai Hadas7efce362018-06-17 13:00:01 +0300554{
Yishai Hadas2351776e2018-10-07 12:06:34 +0300555 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
Yishai Hadas7efce362018-06-17 13:00:01 +0300556
Yishai Hadas2351776e2018-10-07 12:06:34 +0300557 switch (*opcode) {
Yishai Hadas7efce362018-06-17 13:00:01 +0300558 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
559 case MLX5_CMD_OP_CREATE_MKEY:
560 case MLX5_CMD_OP_CREATE_CQ:
561 case MLX5_CMD_OP_ALLOC_PD:
562 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
563 case MLX5_CMD_OP_CREATE_RMP:
564 case MLX5_CMD_OP_CREATE_SQ:
565 case MLX5_CMD_OP_CREATE_RQ:
566 case MLX5_CMD_OP_CREATE_RQT:
567 case MLX5_CMD_OP_CREATE_TIR:
568 case MLX5_CMD_OP_CREATE_TIS:
569 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
570 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
571 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
572 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
Mark Bloch60786f02018-08-28 14:18:46 +0300573 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
Yishai Hadas7efce362018-06-17 13:00:01 +0300574 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
575 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
576 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
577 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
578 case MLX5_CMD_OP_CREATE_QP:
579 case MLX5_CMD_OP_CREATE_SRQ:
580 case MLX5_CMD_OP_CREATE_XRC_SRQ:
581 case MLX5_CMD_OP_CREATE_DCT:
582 case MLX5_CMD_OP_CREATE_XRQ:
583 case MLX5_CMD_OP_ATTACH_TO_MCG:
584 case MLX5_CMD_OP_ALLOC_XRCD:
585 return true;
586 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
587 {
588 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
589 if (op_mod == 0)
590 return true;
591 return false;
592 }
593 default:
594 return false;
595 }
596}
597
Yishai Hadase662e142018-06-17 13:00:02 +0300598static bool devx_is_obj_modify_cmd(const void *in)
599{
600 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
601
602 switch (opcode) {
603 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
604 case MLX5_CMD_OP_MODIFY_CQ:
605 case MLX5_CMD_OP_MODIFY_RMP:
606 case MLX5_CMD_OP_MODIFY_SQ:
607 case MLX5_CMD_OP_MODIFY_RQ:
608 case MLX5_CMD_OP_MODIFY_RQT:
609 case MLX5_CMD_OP_MODIFY_TIR:
610 case MLX5_CMD_OP_MODIFY_TIS:
611 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
612 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
613 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
614 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
615 case MLX5_CMD_OP_RST2INIT_QP:
616 case MLX5_CMD_OP_INIT2RTR_QP:
617 case MLX5_CMD_OP_RTR2RTS_QP:
618 case MLX5_CMD_OP_RTS2RTS_QP:
619 case MLX5_CMD_OP_SQERR2RTS_QP:
620 case MLX5_CMD_OP_2ERR_QP:
621 case MLX5_CMD_OP_2RST_QP:
622 case MLX5_CMD_OP_ARM_XRC_SRQ:
623 case MLX5_CMD_OP_ARM_RQ:
Yishai Hadase662e142018-06-17 13:00:02 +0300624 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
625 case MLX5_CMD_OP_ARM_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200626 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
Yishai Hadase662e142018-06-17 13:00:02 +0300627 return true;
628 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
629 {
630 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
631
632 if (op_mod == 1)
633 return true;
634 return false;
635 }
636 default:
637 return false;
638 }
639}
640
641static bool devx_is_obj_query_cmd(const void *in)
642{
643 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
644
645 switch (opcode) {
646 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
647 case MLX5_CMD_OP_QUERY_MKEY:
648 case MLX5_CMD_OP_QUERY_CQ:
649 case MLX5_CMD_OP_QUERY_RMP:
650 case MLX5_CMD_OP_QUERY_SQ:
651 case MLX5_CMD_OP_QUERY_RQ:
652 case MLX5_CMD_OP_QUERY_RQT:
653 case MLX5_CMD_OP_QUERY_TIR:
654 case MLX5_CMD_OP_QUERY_TIS:
655 case MLX5_CMD_OP_QUERY_Q_COUNTER:
656 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
657 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
658 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
659 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
660 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
661 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
662 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
663 case MLX5_CMD_OP_QUERY_QP:
664 case MLX5_CMD_OP_QUERY_SRQ:
665 case MLX5_CMD_OP_QUERY_XRC_SRQ:
666 case MLX5_CMD_OP_QUERY_DCT:
667 case MLX5_CMD_OP_QUERY_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200668 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
669 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
670 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
Yishai Hadase662e142018-06-17 13:00:02 +0300671 return true;
672 default:
673 return false;
674 }
675}
676
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300677static bool devx_is_whitelist_cmd(void *in)
678{
679 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
680
681 switch (opcode) {
682 case MLX5_CMD_OP_QUERY_HCA_CAP:
683 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
684 return true;
685 default:
686 return false;
687 }
688}
689
690static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
691{
692 if (devx_is_whitelist_cmd(cmd_in)) {
693 struct mlx5_ib_dev *dev;
694
695 if (c->devx_uid)
696 return c->devx_uid;
697
698 dev = to_mdev(c->ibucontext.device);
699 if (dev->devx_whitelist_uid)
700 return dev->devx_whitelist_uid;
701
702 return -EOPNOTSUPP;
703 }
704
705 if (!c->devx_uid)
706 return -EINVAL;
707
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300708 return c->devx_uid;
709}
Yishai Hadase662e142018-06-17 13:00:02 +0300710static bool devx_is_general_cmd(void *in)
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300711{
712 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
713
Yishai Hadas719598c2018-11-26 08:28:37 +0200714 if (opcode >= MLX5_CMD_OP_GENERAL_START &&
715 opcode < MLX5_CMD_OP_GENERAL_END)
716 return true;
717
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300718 switch (opcode) {
719 case MLX5_CMD_OP_QUERY_HCA_CAP:
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300720 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300721 case MLX5_CMD_OP_QUERY_VPORT_STATE:
722 case MLX5_CMD_OP_QUERY_ADAPTER:
723 case MLX5_CMD_OP_QUERY_ISSI:
724 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
725 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
726 case MLX5_CMD_OP_QUERY_VNIC_ENV:
727 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
728 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
729 case MLX5_CMD_OP_NOP:
730 case MLX5_CMD_OP_QUERY_CONG_STATUS:
731 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
732 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
733 return true;
734 default:
735 return false;
736 }
737}
738
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600739static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200740 struct uverbs_attr_bundle *attrs)
Yishai Hadasf6fe01b2018-06-17 13:00:05 +0300741{
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600742 struct mlx5_ib_ucontext *c;
743 struct mlx5_ib_dev *dev;
Yishai Hadasf6fe01b2018-06-17 13:00:05 +0300744 int user_vector;
745 int dev_eqn;
746 unsigned int irqn;
747 int err;
748
749 if (uverbs_copy_from(&user_vector, attrs,
750 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
751 return -EFAULT;
752
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200753 c = devx_ufile2uctx(attrs);
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600754 if (IS_ERR(c))
755 return PTR_ERR(c);
756 dev = to_mdev(c->ibucontext.device);
757
Yishai Hadasf6fe01b2018-06-17 13:00:05 +0300758 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
759 if (err < 0)
760 return err;
761
762 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
763 &dev_eqn, sizeof(dev_eqn)))
764 return -EFAULT;
765
766 return 0;
767}
768
Yishai Hadas7c043e92018-06-17 13:00:03 +0300769/*
770 *Security note:
771 * The hardware protection mechanism works like this: Each device object that
772 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
773 * the device specification manual) upon its creation. Then upon doorbell,
774 * hardware fetches the object context for which the doorbell was rang, and
775 * validates that the UAR through which the DB was rang matches the UAR ID
776 * of the object.
777 * If no match the doorbell is silently ignored by the hardware. Of course,
778 * the user cannot ring a doorbell on a UAR that was not mapped to it.
779 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
780 * mailboxes (except tagging them with UID), we expose to the user its UAR
781 * ID, so it can embed it in these objects in the expected specification
782 * format. So the only thing the user can do is hurt itself by creating a
783 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
784 * may ring a doorbell on its objects.
785 * The consequence of that will be that another user can schedule a QP/SQ
786 * of the buggy user for execution (just insert it to the hardware schedule
787 * queue or arm its CQ for event generation), no further harm is expected.
788 */
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600789static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200790 struct uverbs_attr_bundle *attrs)
Yishai Hadas7c043e92018-06-17 13:00:03 +0300791{
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600792 struct mlx5_ib_ucontext *c;
793 struct mlx5_ib_dev *dev;
Yishai Hadas7c043e92018-06-17 13:00:03 +0300794 u32 user_idx;
795 s32 dev_idx;
796
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200797 c = devx_ufile2uctx(attrs);
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600798 if (IS_ERR(c))
799 return PTR_ERR(c);
800 dev = to_mdev(c->ibucontext.device);
801
Yishai Hadas7c043e92018-06-17 13:00:03 +0300802 if (uverbs_copy_from(&user_idx, attrs,
803 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
804 return -EFAULT;
805
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600806 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
Yishai Hadas7c043e92018-06-17 13:00:03 +0300807 if (dev_idx < 0)
808 return dev_idx;
809
810 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
811 &dev_idx, sizeof(dev_idx)))
812 return -EFAULT;
813
814 return 0;
815}
816
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600817static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200818 struct uverbs_attr_bundle *attrs)
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300819{
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600820 struct mlx5_ib_ucontext *c;
821 struct mlx5_ib_dev *dev;
Yishai Hadas7efce362018-06-17 13:00:01 +0300822 void *cmd_in = uverbs_attr_get_alloced_ptr(
823 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300824 int cmd_out_len = uverbs_attr_get_len(attrs,
825 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
826 void *cmd_out;
827 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300828 int uid;
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300829
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200830 c = devx_ufile2uctx(attrs);
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600831 if (IS_ERR(c))
832 return PTR_ERR(c);
833 dev = to_mdev(c->ibucontext.device);
834
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300835 uid = devx_get_uid(c, cmd_in);
836 if (uid < 0)
837 return uid;
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300838
839 /* Only white list of some general HCA commands are allowed for this method. */
840 if (!devx_is_general_cmd(cmd_in))
841 return -EINVAL;
842
Jason Gunthorpeb61815e2018-08-09 20:14:41 -0600843 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
844 if (IS_ERR(cmd_out))
845 return PTR_ERR(cmd_out);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300846
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300847 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300848 err = mlx5_cmd_exec(dev->mdev, cmd_in,
849 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
850 cmd_out, cmd_out_len);
851 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -0600852 return err;
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300853
Jason Gunthorpeb61815e2018-08-09 20:14:41 -0600854 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
855 cmd_out_len);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300856}
857
Yishai Hadas7efce362018-06-17 13:00:01 +0300858static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
859 u32 *dinlen,
860 u32 *obj_id)
861{
862 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
863 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
864
865 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
866 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
867
868 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
869 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
870
871 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
872 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
873 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
874 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
875 break;
876
Yishai Hadas6e3722b2018-12-19 16:28:15 +0200877 case MLX5_CMD_OP_CREATE_UMEM:
878 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
879 MLX5_CMD_OP_DESTROY_UMEM);
880 break;
Yishai Hadas7efce362018-06-17 13:00:01 +0300881 case MLX5_CMD_OP_CREATE_MKEY:
882 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
883 break;
884 case MLX5_CMD_OP_CREATE_CQ:
885 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
886 break;
887 case MLX5_CMD_OP_ALLOC_PD:
888 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
889 break;
890 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
891 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
892 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
893 break;
894 case MLX5_CMD_OP_CREATE_RMP:
895 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
896 break;
897 case MLX5_CMD_OP_CREATE_SQ:
898 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
899 break;
900 case MLX5_CMD_OP_CREATE_RQ:
901 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
902 break;
903 case MLX5_CMD_OP_CREATE_RQT:
904 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
905 break;
906 case MLX5_CMD_OP_CREATE_TIR:
907 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
908 break;
909 case MLX5_CMD_OP_CREATE_TIS:
910 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
911 break;
912 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
913 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
914 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
915 break;
916 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
917 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
918 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
919 MLX5_SET(destroy_flow_table_in, din, other_vport,
920 MLX5_GET(create_flow_table_in, in, other_vport));
921 MLX5_SET(destroy_flow_table_in, din, vport_number,
922 MLX5_GET(create_flow_table_in, in, vport_number));
923 MLX5_SET(destroy_flow_table_in, din, table_type,
924 MLX5_GET(create_flow_table_in, in, table_type));
925 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
926 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
927 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
928 break;
929 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
930 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
931 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
932 MLX5_SET(destroy_flow_group_in, din, other_vport,
933 MLX5_GET(create_flow_group_in, in, other_vport));
934 MLX5_SET(destroy_flow_group_in, din, vport_number,
935 MLX5_GET(create_flow_group_in, in, vport_number));
936 MLX5_SET(destroy_flow_group_in, din, table_type,
937 MLX5_GET(create_flow_group_in, in, table_type));
938 MLX5_SET(destroy_flow_group_in, din, table_id,
939 MLX5_GET(create_flow_group_in, in, table_id));
940 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
941 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
942 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
943 break;
944 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
945 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
946 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
947 MLX5_SET(delete_fte_in, din, other_vport,
948 MLX5_GET(set_fte_in, in, other_vport));
949 MLX5_SET(delete_fte_in, din, vport_number,
950 MLX5_GET(set_fte_in, in, vport_number));
951 MLX5_SET(delete_fte_in, din, table_type,
952 MLX5_GET(set_fte_in, in, table_type));
953 MLX5_SET(delete_fte_in, din, table_id,
954 MLX5_GET(set_fte_in, in, table_id));
955 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
956 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
957 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
958 break;
959 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
960 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
961 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
962 break;
Mark Bloch60786f02018-08-28 14:18:46 +0300963 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
Yishai Hadas7efce362018-06-17 13:00:01 +0300964 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
Mark Bloch60786f02018-08-28 14:18:46 +0300965 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
Yishai Hadas7efce362018-06-17 13:00:01 +0300966 break;
967 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
968 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
969 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
970 break;
971 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
972 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
973 *obj_id = MLX5_GET(create_scheduling_element_out, out,
974 scheduling_element_id);
975 MLX5_SET(destroy_scheduling_element_in, din,
976 scheduling_hierarchy,
977 MLX5_GET(create_scheduling_element_in, in,
978 scheduling_hierarchy));
979 MLX5_SET(destroy_scheduling_element_in, din,
980 scheduling_element_id, *obj_id);
981 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
982 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
983 break;
984 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
985 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
986 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
987 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
988 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
989 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
990 break;
991 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
992 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
993 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
994 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
995 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
996 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
997 break;
998 case MLX5_CMD_OP_CREATE_QP:
999 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1000 break;
1001 case MLX5_CMD_OP_CREATE_SRQ:
1002 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1003 break;
1004 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1005 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1006 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1007 break;
1008 case MLX5_CMD_OP_CREATE_DCT:
1009 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1010 break;
1011 case MLX5_CMD_OP_CREATE_XRQ:
1012 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1013 break;
1014 case MLX5_CMD_OP_ATTACH_TO_MCG:
1015 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1016 MLX5_SET(detach_from_mcg_in, din, qpn,
1017 MLX5_GET(attach_to_mcg_in, in, qpn));
1018 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1019 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1020 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1021 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1022 break;
1023 case MLX5_CMD_OP_ALLOC_XRCD:
1024 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1025 break;
1026 default:
1027 /* The entry must match to one of the devx_is_obj_create_cmd */
1028 WARN_ON(true);
1029 break;
1030 }
1031}
1032
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001033static int devx_handle_mkey_indirect(struct devx_obj *obj,
1034 struct mlx5_ib_dev *dev,
1035 void *in, void *out)
1036{
1037 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
1038 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1039 unsigned long flags;
1040 struct mlx5_core_mkey *mkey;
1041 void *mkc;
1042 u8 key;
1043 int err;
1044
1045 mkey = &devx_mr->mmkey;
1046 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1047 key = MLX5_GET(mkc, mkc, mkey_7_0);
1048 mkey->key = mlx5_idx_to_mkey(
1049 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1050 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1051 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1052 mkey->size = MLX5_GET64(mkc, mkc, len);
1053 mkey->pd = MLX5_GET(mkc, mkc, pd);
1054 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1055
1056 write_lock_irqsave(&table->lock, flags);
1057 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key),
1058 mkey);
1059 write_unlock_irqrestore(&table->lock, flags);
1060 return err;
1061}
1062
Yishai Hadasfa31f142019-01-13 16:01:16 +02001063static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1064 struct devx_obj *obj,
1065 void *in, int in_len)
1066{
1067 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1068 MLX5_FLD_SZ_BYTES(create_mkey_in,
1069 memory_key_mkey_entry);
1070 void *mkc;
1071 u8 access_mode;
1072
1073 if (in_len < min_len)
1074 return -EINVAL;
1075
1076 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1077
1078 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1079 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1080
1081 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001082 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1083 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1084 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
Yishai Hadasfa31f142019-01-13 16:01:16 +02001085 return 0;
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001086 }
Yishai Hadasfa31f142019-01-13 16:01:16 +02001087
1088 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1089 return 0;
1090}
1091
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001092static void devx_free_indirect_mkey(struct rcu_head *rcu)
1093{
1094 kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
1095}
1096
1097/* This function to delete from the radix tree needs to be called before
1098 * destroying the underlying mkey. Otherwise a race might occur in case that
1099 * other thread will get the same mkey before this one will be deleted,
1100 * in that case it will fail via inserting to the tree its own data.
1101 *
1102 * Note:
1103 * An error in the destroy is not expected unless there is some other indirect
1104 * mkey which points to this one. In a kernel cleanup flow it will be just
1105 * destroyed in the iterative destruction call. In a user flow, in case
1106 * the application didn't close in the expected order it's its own problem,
1107 * the mkey won't be part of the tree, in both cases the kernel is safe.
1108 */
1109static void devx_cleanup_mkey(struct devx_obj *obj)
1110{
1111 struct mlx5_mkey_table *table = &obj->mdev->priv.mkey_table;
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001112 unsigned long flags;
1113
1114 write_lock_irqsave(&table->lock, flags);
Kamal Heibe5c1bb42019-01-30 16:13:43 +02001115 radix_tree_delete(&table->tree, mlx5_base_mkey(obj->devx_mr.mmkey.key));
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001116 write_unlock_irqrestore(&table->lock, flags);
1117}
1118
Yishai Hadas7efce362018-06-17 13:00:01 +03001119static int devx_obj_cleanup(struct ib_uobject *uobject,
Shamir Rabinovitcha6a37972019-03-31 19:10:04 +03001120 enum rdma_remove_reason why,
1121 struct uverbs_attr_bundle *attrs)
Yishai Hadas7efce362018-06-17 13:00:01 +03001122{
1123 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1124 struct devx_obj *obj = uobject->object;
1125 int ret;
1126
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001127 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1128 devx_cleanup_mkey(obj);
1129
Yishai Hadasc5ae1952019-03-06 19:21:42 +02001130 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1131 ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
1132 else
1133 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
1134 sizeof(out));
Yishai Hadas1c774832018-06-20 17:11:39 +03001135 if (ib_is_destroy_retryable(ret, why, uobject))
Yishai Hadas7efce362018-06-17 13:00:01 +03001136 return ret;
1137
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001138 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1139 struct mlx5_ib_dev *dev = to_mdev(uobject->context->device);
1140
1141 call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
1142 devx_free_indirect_mkey);
1143 return ret;
1144 }
1145
Yishai Hadas7efce362018-06-17 13:00:01 +03001146 kfree(obj);
1147 return ret;
1148}
1149
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001150static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001151 struct uverbs_attr_bundle *attrs)
Yishai Hadas7efce362018-06-17 13:00:01 +03001152{
Yishai Hadas7efce362018-06-17 13:00:01 +03001153 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1154 int cmd_out_len = uverbs_attr_get_len(attrs,
1155 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
Yishai Hadasfa31f142019-01-13 16:01:16 +02001156 int cmd_in_len = uverbs_attr_get_len(attrs,
1157 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
Yishai Hadas7efce362018-06-17 13:00:01 +03001158 void *cmd_out;
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001159 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1160 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
Shamir Rabinovitch89944452019-02-07 18:44:49 +02001161 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1162 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001163 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
Yishai Hadase8ef0902018-09-25 12:11:12 +03001164 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
Yishai Hadas7efce362018-06-17 13:00:01 +03001165 struct devx_obj *obj;
1166 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001167 int uid;
Yishai Hadas2351776e2018-10-07 12:06:34 +03001168 u32 obj_id;
1169 u16 opcode;
Yishai Hadas7efce362018-06-17 13:00:01 +03001170
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001171 uid = devx_get_uid(c, cmd_in);
1172 if (uid < 0)
1173 return uid;
Yishai Hadas7efce362018-06-17 13:00:01 +03001174
Yishai Hadas2351776e2018-10-07 12:06:34 +03001175 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
Yishai Hadas7efce362018-06-17 13:00:01 +03001176 return -EINVAL;
1177
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001178 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1179 if (IS_ERR(cmd_out))
1180 return PTR_ERR(cmd_out);
1181
Yishai Hadas7efce362018-06-17 13:00:01 +03001182 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1183 if (!obj)
1184 return -ENOMEM;
1185
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001186 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadasfa31f142019-01-13 16:01:16 +02001187 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1188 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1189 if (err)
1190 goto obj_free;
1191 } else {
1192 devx_set_umem_valid(cmd_in);
1193 }
Yishai Hadasba1a0572018-09-20 21:39:33 +03001194
Yishai Hadasc5ae1952019-03-06 19:21:42 +02001195 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1196 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1197 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1198 cmd_in, cmd_in_len,
1199 cmd_out, cmd_out_len);
1200 } else {
1201 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1202 cmd_in_len,
1203 cmd_out, cmd_out_len);
1204 }
1205
Yishai Hadas7efce362018-06-17 13:00:01 +03001206 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001207 goto obj_free;
Yishai Hadas7efce362018-06-17 13:00:01 +03001208
Yishai Hadas7efce362018-06-17 13:00:01 +03001209 uobj->object = obj;
1210 obj->mdev = dev->mdev;
Yishai Hadas2351776e2018-10-07 12:06:34 +03001211 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1212 &obj_id);
Yishai Hadas7efce362018-06-17 13:00:01 +03001213 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1214
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001215 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1216 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1217 if (err)
1218 goto obj_destroy;
1219 }
1220
Yishai Hadas7efce362018-06-17 13:00:01 +03001221 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1222 if (err)
Yishai Hadas0da4d482019-02-11 17:40:53 +02001223 goto err_copy;
Yishai Hadas7efce362018-06-17 13:00:01 +03001224
Yishai Hadas2351776e2018-10-07 12:06:34 +03001225 obj->obj_id = get_enc_obj_id(opcode, obj_id);
Yishai Hadas7efce362018-06-17 13:00:01 +03001226 return 0;
1227
Yishai Hadas0da4d482019-02-11 17:40:53 +02001228err_copy:
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001229 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1230 devx_cleanup_mkey(obj);
Yishai Hadas0da4d482019-02-11 17:40:53 +02001231obj_destroy:
Yishai Hadasc5ae1952019-03-06 19:21:42 +02001232 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1233 mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
1234 else
1235 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
1236 sizeof(out));
Yishai Hadas7efce362018-06-17 13:00:01 +03001237obj_free:
1238 kfree(obj);
1239 return err;
1240}
1241
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001242static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001243 struct uverbs_attr_bundle *attrs)
Yishai Hadase662e142018-06-17 13:00:02 +03001244{
Yishai Hadase662e142018-06-17 13:00:02 +03001245 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1246 int cmd_out_len = uverbs_attr_get_len(attrs,
1247 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1248 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1249 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
Shamir Rabinovitch89944452019-02-07 18:44:49 +02001250 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1251 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1252 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
Yishai Hadase662e142018-06-17 13:00:02 +03001253 void *cmd_out;
1254 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001255 int uid;
Yishai Hadase662e142018-06-17 13:00:02 +03001256
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001257 uid = devx_get_uid(c, cmd_in);
1258 if (uid < 0)
1259 return uid;
Yishai Hadase662e142018-06-17 13:00:02 +03001260
1261 if (!devx_is_obj_modify_cmd(cmd_in))
1262 return -EINVAL;
1263
Yishai Hadas34613eb2018-11-26 08:28:35 +02001264 if (!devx_is_valid_obj_id(uobj, cmd_in))
Yishai Hadase662e142018-06-17 13:00:02 +03001265 return -EINVAL;
1266
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001267 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1268 if (IS_ERR(cmd_out))
1269 return PTR_ERR(cmd_out);
Yishai Hadase662e142018-06-17 13:00:02 +03001270
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001271 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadasba1a0572018-09-20 21:39:33 +03001272 devx_set_umem_valid(cmd_in);
1273
Yishai Hadas34613eb2018-11-26 08:28:35 +02001274 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
Yishai Hadase662e142018-06-17 13:00:02 +03001275 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1276 cmd_out, cmd_out_len);
1277 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001278 return err;
Yishai Hadase662e142018-06-17 13:00:02 +03001279
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001280 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1281 cmd_out, cmd_out_len);
Yishai Hadase662e142018-06-17 13:00:02 +03001282}
1283
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001284static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001285 struct uverbs_attr_bundle *attrs)
Yishai Hadase662e142018-06-17 13:00:02 +03001286{
Yishai Hadase662e142018-06-17 13:00:02 +03001287 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1288 int cmd_out_len = uverbs_attr_get_len(attrs,
1289 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1290 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1291 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
Shamir Rabinovitch89944452019-02-07 18:44:49 +02001292 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1293 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
Yishai Hadase662e142018-06-17 13:00:02 +03001294 void *cmd_out;
1295 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001296 int uid;
Shamir Rabinovitch89944452019-02-07 18:44:49 +02001297 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
Yishai Hadase662e142018-06-17 13:00:02 +03001298
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001299 uid = devx_get_uid(c, cmd_in);
1300 if (uid < 0)
1301 return uid;
Yishai Hadase662e142018-06-17 13:00:02 +03001302
1303 if (!devx_is_obj_query_cmd(cmd_in))
1304 return -EINVAL;
1305
Yishai Hadas34613eb2018-11-26 08:28:35 +02001306 if (!devx_is_valid_obj_id(uobj, cmd_in))
Yishai Hadase662e142018-06-17 13:00:02 +03001307 return -EINVAL;
1308
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001309 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1310 if (IS_ERR(cmd_out))
1311 return PTR_ERR(cmd_out);
Yishai Hadase662e142018-06-17 13:00:02 +03001312
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001313 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadas34613eb2018-11-26 08:28:35 +02001314 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
Yishai Hadase662e142018-06-17 13:00:02 +03001315 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1316 cmd_out, cmd_out_len);
1317 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001318 return err;
Yishai Hadase662e142018-06-17 13:00:02 +03001319
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001320 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1321 cmd_out, cmd_out_len);
Yishai Hadase662e142018-06-17 13:00:02 +03001322}
1323
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001324struct devx_async_event_queue {
1325 spinlock_t lock;
1326 wait_queue_head_t poll_wait;
1327 struct list_head event_list;
Yishai Hadasa124edb2019-01-22 08:29:57 +02001328 atomic_t bytes_in_use;
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001329 u8 is_destroyed:1;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001330};
1331
1332struct devx_async_cmd_event_file {
1333 struct ib_uobject uobj;
1334 struct devx_async_event_queue ev_queue;
Yishai Hadasa124edb2019-01-22 08:29:57 +02001335 struct mlx5_async_ctx async_ctx;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001336};
1337
1338static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1339{
1340 spin_lock_init(&ev_queue->lock);
1341 INIT_LIST_HEAD(&ev_queue->event_list);
1342 init_waitqueue_head(&ev_queue->poll_wait);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001343 atomic_set(&ev_queue->bytes_in_use, 0);
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001344 ev_queue->is_destroyed = 0;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001345}
1346
1347static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1348 struct uverbs_attr_bundle *attrs)
1349{
1350 struct devx_async_cmd_event_file *ev_file;
1351
1352 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1353 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001354 struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001355
1356 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1357 uobj);
1358 devx_init_event_queue(&ev_file->ev_queue);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001359 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001360 return 0;
1361}
1362
Yishai Hadasa124edb2019-01-22 08:29:57 +02001363static void devx_query_callback(int status, struct mlx5_async_work *context)
1364{
1365 struct devx_async_data *async_data =
1366 container_of(context, struct devx_async_data, cb_work);
1367 struct ib_uobject *fd_uobj = async_data->fd_uobj;
1368 struct devx_async_cmd_event_file *ev_file;
1369 struct devx_async_event_queue *ev_queue;
1370 unsigned long flags;
1371
1372 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1373 uobj);
1374 ev_queue = &ev_file->ev_queue;
1375
1376 spin_lock_irqsave(&ev_queue->lock, flags);
1377 list_add_tail(&async_data->list, &ev_queue->event_list);
1378 spin_unlock_irqrestore(&ev_queue->lock, flags);
1379
1380 wake_up_interruptible(&ev_queue->poll_wait);
1381 fput(fd_uobj->object);
1382}
1383
1384#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1385
1386static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1387 struct uverbs_attr_bundle *attrs)
1388{
1389 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1390 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1391 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1392 attrs,
1393 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1394 u16 cmd_out_len;
Shamir Rabinovitch89944452019-02-07 18:44:49 +02001395 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1396 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001397 struct ib_uobject *fd_uobj;
1398 int err;
1399 int uid;
Shamir Rabinovitch89944452019-02-07 18:44:49 +02001400 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001401 struct devx_async_cmd_event_file *ev_file;
1402 struct devx_async_data *async_data;
1403
1404 uid = devx_get_uid(c, cmd_in);
1405 if (uid < 0)
1406 return uid;
1407
1408 if (!devx_is_obj_query_cmd(cmd_in))
1409 return -EINVAL;
1410
1411 err = uverbs_get_const(&cmd_out_len, attrs,
1412 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1413 if (err)
1414 return err;
1415
1416 if (!devx_is_valid_obj_id(uobj, cmd_in))
1417 return -EINVAL;
1418
1419 fd_uobj = uverbs_attr_get_uobject(attrs,
1420 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1421 if (IS_ERR(fd_uobj))
1422 return PTR_ERR(fd_uobj);
1423
1424 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1425 uobj);
1426
1427 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1428 MAX_ASYNC_BYTES_IN_USE) {
1429 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1430 return -EAGAIN;
1431 }
1432
1433 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1434 cmd_out_len), GFP_KERNEL);
1435 if (!async_data) {
1436 err = -ENOMEM;
1437 goto sub_bytes;
1438 }
1439
1440 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1441 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1442 if (err)
1443 goto free_async;
1444
1445 async_data->cmd_out_len = cmd_out_len;
1446 async_data->mdev = mdev;
1447 async_data->fd_uobj = fd_uobj;
1448
1449 get_file(fd_uobj->object);
1450 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1451 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1452 uverbs_attr_get_len(attrs,
1453 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1454 async_data->hdr.out_data,
1455 async_data->cmd_out_len,
1456 devx_query_callback, &async_data->cb_work);
1457
1458 if (err)
1459 goto cb_err;
1460
1461 return 0;
1462
1463cb_err:
1464 fput(fd_uobj->object);
1465free_async:
1466 kvfree(async_data);
1467sub_bytes:
1468 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1469 return err;
1470}
1471
Yishai Hadasaeae9452018-06-17 13:00:04 +03001472static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
1473 struct uverbs_attr_bundle *attrs,
1474 struct devx_umem *obj)
1475{
1476 u64 addr;
1477 size_t size;
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001478 u32 access;
Yishai Hadasaeae9452018-06-17 13:00:04 +03001479 int npages;
1480 int err;
1481 u32 page_mask;
1482
1483 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001484 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
Yishai Hadasaeae9452018-06-17 13:00:04 +03001485 return -EFAULT;
1486
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001487 err = uverbs_get_flags32(&access, attrs,
1488 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
Yishai Hadas47f07f02018-12-05 15:50:21 +02001489 IB_ACCESS_LOCAL_WRITE |
1490 IB_ACCESS_REMOTE_WRITE |
1491 IB_ACCESS_REMOTE_READ);
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001492 if (err)
1493 return err;
1494
Yishai Hadasaeae9452018-06-17 13:00:04 +03001495 err = ib_check_mr_access(access);
1496 if (err)
1497 return err;
1498
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001499 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001500 if (IS_ERR(obj->umem))
1501 return PTR_ERR(obj->umem);
1502
1503 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
1504 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
1505 &obj->page_shift, &obj->ncont, NULL);
1506
1507 if (!npages) {
1508 ib_umem_release(obj->umem);
1509 return -EINVAL;
1510 }
1511
1512 page_mask = (1 << obj->page_shift) - 1;
1513 obj->page_offset = obj->umem->address & page_mask;
1514
1515 return 0;
1516}
1517
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001518static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
1519 struct devx_umem *obj,
Yishai Hadasaeae9452018-06-17 13:00:04 +03001520 struct devx_umem_reg_cmd *cmd)
1521{
1522 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
1523 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001524 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
1525 return PTR_ERR_OR_ZERO(cmd->in);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001526}
1527
1528static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
1529 struct devx_umem *obj,
1530 struct devx_umem_reg_cmd *cmd)
1531{
1532 void *umem;
1533 __be64 *mtt;
1534
1535 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
1536 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
1537
Yishai Hadas6e3722b2018-12-19 16:28:15 +02001538 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001539 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
1540 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
1541 MLX5_ADAPTER_PAGE_SHIFT);
1542 MLX5_SET(umem, umem, page_offset, obj->page_offset);
1543 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
1544 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
1545 MLX5_IB_MTT_READ);
1546}
1547
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001548static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001549 struct uverbs_attr_bundle *attrs)
Yishai Hadasaeae9452018-06-17 13:00:04 +03001550{
Yishai Hadasaeae9452018-06-17 13:00:04 +03001551 struct devx_umem_reg_cmd cmd;
1552 struct devx_umem *obj;
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001553 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1554 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001555 u32 obj_id;
Shamir Rabinovitch89944452019-02-07 18:44:49 +02001556 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1557 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001558 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001559 int err;
1560
1561 if (!c->devx_uid)
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001562 return -EINVAL;
1563
Yishai Hadasaeae9452018-06-17 13:00:04 +03001564 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
1565 if (!obj)
1566 return -ENOMEM;
1567
1568 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
1569 if (err)
1570 goto err_obj_free;
1571
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001572 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001573 if (err)
1574 goto err_umem_release;
1575
1576 devx_umem_reg_cmd_build(dev, obj, &cmd);
1577
Yishai Hadas6e3722b2018-12-19 16:28:15 +02001578 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001579 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
1580 sizeof(cmd.out));
1581 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001582 goto err_umem_release;
Yishai Hadasaeae9452018-06-17 13:00:04 +03001583
1584 obj->mdev = dev->mdev;
1585 uobj->object = obj;
1586 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
1587 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
1588 if (err)
1589 goto err_umem_destroy;
1590
Yishai Hadasaeae9452018-06-17 13:00:04 +03001591 return 0;
1592
1593err_umem_destroy:
1594 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001595err_umem_release:
1596 ib_umem_release(obj->umem);
1597err_obj_free:
1598 kfree(obj);
1599 return err;
1600}
1601
Yishai Hadasaeae9452018-06-17 13:00:04 +03001602static int devx_umem_cleanup(struct ib_uobject *uobject,
Shamir Rabinovitcha6a37972019-03-31 19:10:04 +03001603 enum rdma_remove_reason why,
1604 struct uverbs_attr_bundle *attrs)
Yishai Hadasaeae9452018-06-17 13:00:04 +03001605{
1606 struct devx_umem *obj = uobject->object;
1607 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1608 int err;
1609
1610 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
Yishai Hadas1c774832018-06-20 17:11:39 +03001611 if (ib_is_destroy_retryable(err, why, uobject))
Yishai Hadasaeae9452018-06-17 13:00:04 +03001612 return err;
1613
1614 ib_umem_release(obj->umem);
1615 kfree(obj);
1616 return 0;
1617}
1618
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001619static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
1620 size_t count, loff_t *pos)
1621{
Yishai Hadas4accbb32019-01-22 08:29:58 +02001622 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
1623 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1624 struct devx_async_data *event;
1625 int ret = 0;
1626 size_t eventsz;
1627
1628 spin_lock_irq(&ev_queue->lock);
1629
1630 while (list_empty(&ev_queue->event_list)) {
1631 spin_unlock_irq(&ev_queue->lock);
1632
1633 if (filp->f_flags & O_NONBLOCK)
1634 return -EAGAIN;
1635
1636 if (wait_event_interruptible(
1637 ev_queue->poll_wait,
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001638 (!list_empty(&ev_queue->event_list) ||
1639 ev_queue->is_destroyed))) {
Yishai Hadas4accbb32019-01-22 08:29:58 +02001640 return -ERESTARTSYS;
1641 }
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001642
1643 if (list_empty(&ev_queue->event_list) &&
1644 ev_queue->is_destroyed)
1645 return -EIO;
1646
Yishai Hadas4accbb32019-01-22 08:29:58 +02001647 spin_lock_irq(&ev_queue->lock);
1648 }
1649
1650 event = list_entry(ev_queue->event_list.next,
1651 struct devx_async_data, list);
1652 eventsz = event->cmd_out_len +
1653 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
1654
1655 if (eventsz > count) {
1656 spin_unlock_irq(&ev_queue->lock);
1657 return -ENOSPC;
1658 }
1659
1660 list_del(ev_queue->event_list.next);
1661 spin_unlock_irq(&ev_queue->lock);
1662
1663 if (copy_to_user(buf, &event->hdr, eventsz))
1664 ret = -EFAULT;
1665 else
1666 ret = eventsz;
1667
1668 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
1669 kvfree(event);
1670 return ret;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001671}
1672
1673static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
1674{
Yishai Hadasa124edb2019-01-22 08:29:57 +02001675 struct ib_uobject *uobj = filp->private_data;
1676 struct devx_async_cmd_event_file *comp_ev_file = container_of(
1677 uobj, struct devx_async_cmd_event_file, uobj);
1678 struct devx_async_data *entry, *tmp;
1679
1680 spin_lock_irq(&comp_ev_file->ev_queue.lock);
1681 list_for_each_entry_safe(entry, tmp,
1682 &comp_ev_file->ev_queue.event_list, list)
1683 kvfree(entry);
1684 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
1685
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001686 uverbs_close_fd(filp);
1687 return 0;
1688}
1689
1690static __poll_t devx_async_cmd_event_poll(struct file *filp,
1691 struct poll_table_struct *wait)
1692{
Yishai Hadas4accbb32019-01-22 08:29:58 +02001693 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
1694 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1695 __poll_t pollflags = 0;
1696
1697 poll_wait(filp, &ev_queue->poll_wait, wait);
1698
1699 spin_lock_irq(&ev_queue->lock);
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001700 if (ev_queue->is_destroyed)
1701 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1702 else if (!list_empty(&ev_queue->event_list))
Yishai Hadas4accbb32019-01-22 08:29:58 +02001703 pollflags = EPOLLIN | EPOLLRDNORM;
1704 spin_unlock_irq(&ev_queue->lock);
1705
1706 return pollflags;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001707}
1708
Bart Van Assche1f687ed2019-03-27 16:50:48 -07001709static const struct file_operations devx_async_cmd_event_fops = {
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001710 .owner = THIS_MODULE,
1711 .read = devx_async_cmd_event_read,
1712 .poll = devx_async_cmd_event_poll,
1713 .release = devx_async_cmd_event_close,
1714 .llseek = no_llseek,
1715};
1716
1717static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
1718 enum rdma_remove_reason why)
1719{
Yishai Hadasa124edb2019-01-22 08:29:57 +02001720 struct devx_async_cmd_event_file *comp_ev_file =
1721 container_of(uobj, struct devx_async_cmd_event_file,
1722 uobj);
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001723 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1724
1725 spin_lock_irq(&ev_queue->lock);
1726 ev_queue->is_destroyed = 1;
1727 spin_unlock_irq(&ev_queue->lock);
1728
1729 if (why == RDMA_REMOVE_DRIVER_REMOVE)
1730 wake_up_interruptible(&ev_queue->poll_wait);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001731
1732 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001733 return 0;
1734};
1735
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001736DECLARE_UVERBS_NAMED_METHOD(
1737 MLX5_IB_METHOD_DEVX_UMEM_REG,
1738 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
1739 MLX5_IB_OBJECT_DEVX_UMEM,
1740 UVERBS_ACCESS_NEW,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001741 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001742 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
1743 UVERBS_ATTR_TYPE(u64),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001744 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001745 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
1746 UVERBS_ATTR_TYPE(u64),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001747 UA_MANDATORY),
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001748 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
1749 enum ib_access_flags),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001750 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
1751 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001752 UA_MANDATORY));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001753
Yishai Hadas528922a2018-07-08 13:24:39 +03001754DECLARE_UVERBS_NAMED_METHOD_DESTROY(
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001755 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
1756 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
1757 MLX5_IB_OBJECT_DEVX_UMEM,
1758 UVERBS_ACCESS_DESTROY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001759 UA_MANDATORY));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001760
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001761DECLARE_UVERBS_NAMED_METHOD(
1762 MLX5_IB_METHOD_DEVX_QUERY_EQN,
1763 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
1764 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001765 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001766 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
1767 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001768 UA_MANDATORY));
Yishai Hadasf6fe01b2018-06-17 13:00:05 +03001769
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001770DECLARE_UVERBS_NAMED_METHOD(
1771 MLX5_IB_METHOD_DEVX_QUERY_UAR,
1772 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
1773 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001774 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001775 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1776 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001777 UA_MANDATORY));
Yishai Hadas7c043e92018-06-17 13:00:03 +03001778
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001779DECLARE_UVERBS_NAMED_METHOD(
1780 MLX5_IB_METHOD_DEVX_OTHER,
1781 UVERBS_ATTR_PTR_IN(
1782 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
1783 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001784 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001785 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001786 UVERBS_ATTR_PTR_OUT(
1787 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
1788 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001789 UA_MANDATORY));
Yishai Hadas8aa8c952018-06-17 13:00:00 +03001790
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001791DECLARE_UVERBS_NAMED_METHOD(
1792 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
1793 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
1794 MLX5_IB_OBJECT_DEVX_OBJ,
1795 UVERBS_ACCESS_NEW,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001796 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001797 UVERBS_ATTR_PTR_IN(
1798 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
1799 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001800 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001801 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001802 UVERBS_ATTR_PTR_OUT(
1803 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1804 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001805 UA_MANDATORY));
Yishai Hadas7efce362018-06-17 13:00:01 +03001806
Yishai Hadas528922a2018-07-08 13:24:39 +03001807DECLARE_UVERBS_NAMED_METHOD_DESTROY(
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001808 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
1809 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
1810 MLX5_IB_OBJECT_DEVX_OBJ,
1811 UVERBS_ACCESS_DESTROY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001812 UA_MANDATORY));
Yishai Hadas7efce362018-06-17 13:00:01 +03001813
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001814DECLARE_UVERBS_NAMED_METHOD(
1815 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
1816 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
Yishai Hadas34613eb2018-11-26 08:28:35 +02001817 UVERBS_IDR_ANY_OBJECT,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001818 UVERBS_ACCESS_WRITE,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001819 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001820 UVERBS_ATTR_PTR_IN(
1821 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
1822 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001823 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001824 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001825 UVERBS_ATTR_PTR_OUT(
1826 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1827 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001828 UA_MANDATORY));
Yishai Hadase662e142018-06-17 13:00:02 +03001829
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001830DECLARE_UVERBS_NAMED_METHOD(
1831 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
1832 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
Yishai Hadas34613eb2018-11-26 08:28:35 +02001833 UVERBS_IDR_ANY_OBJECT,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001834 UVERBS_ACCESS_READ,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001835 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001836 UVERBS_ATTR_PTR_IN(
1837 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
1838 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001839 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001840 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001841 UVERBS_ATTR_PTR_OUT(
1842 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1843 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001844 UA_MANDATORY));
Yishai Hadase662e142018-06-17 13:00:02 +03001845
Yishai Hadasa124edb2019-01-22 08:29:57 +02001846DECLARE_UVERBS_NAMED_METHOD(
1847 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
1848 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
1849 UVERBS_IDR_ANY_OBJECT,
1850 UVERBS_ACCESS_READ,
1851 UA_MANDATORY),
1852 UVERBS_ATTR_PTR_IN(
1853 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
1854 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
1855 UA_MANDATORY,
1856 UA_ALLOC_AND_COPY),
1857 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
1858 u16, UA_MANDATORY),
1859 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
1860 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1861 UVERBS_ACCESS_READ,
1862 UA_MANDATORY),
1863 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
1864 UVERBS_ATTR_TYPE(u64),
1865 UA_MANDATORY));
1866
Jason Gunthorpe6c61d2a2018-07-04 08:50:27 +03001867DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001868 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
1869 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
1870 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
Yishai Hadas8aa8c952018-06-17 13:00:00 +03001871
Jason Gunthorpe6c61d2a2018-07-04 08:50:27 +03001872DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001873 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
1874 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
1875 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
1876 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
Yishai Hadasa124edb2019-01-22 08:29:57 +02001877 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
1878 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
Yishai Hadas7efce362018-06-17 13:00:01 +03001879
Jason Gunthorpe6c61d2a2018-07-04 08:50:27 +03001880DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001881 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
1882 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
1883 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001884
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001885
1886DECLARE_UVERBS_NAMED_METHOD(
1887 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
1888 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
1889 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1890 UVERBS_ACCESS_NEW,
1891 UA_MANDATORY));
1892
1893DECLARE_UVERBS_NAMED_OBJECT(
1894 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1895 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
1896 devx_hot_unplug_async_cmd_event_file,
1897 &devx_async_cmd_event_fops, "[devx_async_cmd]",
1898 O_RDONLY),
1899 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
1900
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001901static bool devx_is_supported(struct ib_device *device)
Yishai Hadasc59450c2018-06-17 13:00:06 +03001902{
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001903 struct mlx5_ib_dev *dev = to_mdev(device);
1904
Yishai Hadas6e3722b2018-12-19 16:28:15 +02001905 return !dev->rep && MLX5_CAP_GEN(dev->mdev, log_max_uctx);
Yishai Hadasc59450c2018-06-17 13:00:06 +03001906}
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001907
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02001908const struct uapi_definition mlx5_ib_devx_defs[] = {
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001909 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1910 MLX5_IB_OBJECT_DEVX,
1911 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
1912 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1913 MLX5_IB_OBJECT_DEVX_OBJ,
1914 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
1915 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1916 MLX5_IB_OBJECT_DEVX_UMEM,
1917 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001918 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1919 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1920 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02001921 {},
1922};