blob: cd43e39ced87b53dbcf2c12d66a44d678e86a3da [file] [log] [blame]
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
10#include <rdma/mlx5_user_ioctl_cmds.h>
Yishai Hadasa124edb2019-01-22 08:29:57 +020011#include <rdma/mlx5_user_ioctl_verbs.h>
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030012#include <rdma/ib_umem.h>
Yishai Hadas34613eb2018-11-26 08:28:35 +020013#include <rdma/uverbs_std_types.h>
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030014#include <linux/mlx5/driver.h>
15#include <linux/mlx5/fs.h>
16#include "mlx5_ib.h"
17
Yishai Hadas8aa8c952018-06-17 13:00:00 +030018#define UVERBS_MODULE_NAME mlx5_ib
19#include <rdma/uverbs_named_ioctl.h>
20
Yishai Hadas534fd7a2019-01-13 16:01:17 +020021enum devx_obj_flags {
22 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
23};
24
Yishai Hadasa124edb2019-01-22 08:29:57 +020025struct devx_async_data {
26 struct mlx5_ib_dev *mdev;
27 struct list_head list;
28 struct ib_uobject *fd_uobj;
29 struct mlx5_async_work cb_work;
30 u16 cmd_out_len;
31 /* must be last field in this structure */
32 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
33};
34
Yishai Hadas7efce362018-06-17 13:00:01 +030035#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
36struct devx_obj {
37 struct mlx5_core_dev *mdev;
Yishai Hadas2351776e2018-10-07 12:06:34 +030038 u64 obj_id;
Yishai Hadas7efce362018-06-17 13:00:01 +030039 u32 dinlen; /* destroy inbox length */
40 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
Yishai Hadas534fd7a2019-01-13 16:01:17 +020041 u32 flags;
42 struct mlx5_ib_devx_mr devx_mr;
Yishai Hadas7efce362018-06-17 13:00:01 +030043};
44
Yishai Hadasaeae9452018-06-17 13:00:04 +030045struct devx_umem {
46 struct mlx5_core_dev *mdev;
47 struct ib_umem *umem;
48 u32 page_offset;
49 int page_shift;
50 int ncont;
51 u32 dinlen;
52 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
53};
54
55struct devx_umem_reg_cmd {
56 void *in;
57 u32 inlen;
58 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
59};
60
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +020061static struct mlx5_ib_ucontext *
62devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
Yishai Hadas8aa8c952018-06-17 13:00:00 +030063{
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +020064 return to_mucontext(ib_uverbs_get_ucontext(attrs));
Yishai Hadas8aa8c952018-06-17 13:00:00 +030065}
66
Yishai Hadasfb981532018-11-26 08:28:36 +020067int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030068{
69 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
70 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
Yishai Hadas6e3722b2018-12-19 16:28:15 +020071 void *uctx;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030072 int err;
Yishai Hadas76dc5a82018-09-20 21:45:19 +030073 u16 uid;
Yishai Hadasfb981532018-11-26 08:28:36 +020074 u32 cap = 0;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030075
Yishai Hadas6e3722b2018-12-19 16:28:15 +020076 /* 0 means not supported */
77 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030078 return -EINVAL;
79
Yishai Hadas6e3722b2018-12-19 16:28:15 +020080 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
Yishai Hadasfb981532018-11-26 08:28:36 +020081 if (is_user && capable(CAP_NET_RAW) &&
82 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
83 cap |= MLX5_UCTX_CAP_RAW_TX;
84
Yishai Hadas6e3722b2018-12-19 16:28:15 +020085 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
Yishai Hadasfb981532018-11-26 08:28:36 +020086 MLX5_SET(uctx, uctx, cap, cap);
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030087
88 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
89 if (err)
90 return err;
91
Yishai Hadas76dc5a82018-09-20 21:45:19 +030092 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
93 return uid;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030094}
95
Yishai Hadas76dc5a82018-09-20 21:45:19 +030096void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030097{
Yishai Hadas6e3722b2018-12-19 16:28:15 +020098 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030099 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
100
Yishai Hadas6e3722b2018-12-19 16:28:15 +0200101 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
102 MLX5_SET(destroy_uctx_in, in, uid, uid);
Yishai Hadasa8b92ca2018-06-17 12:59:57 +0300103
104 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
105}
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300106
Yishai Hadas32269442018-07-23 15:25:09 +0300107bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
108{
109 struct devx_obj *devx_obj = obj;
110 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
111
112 switch (opcode) {
113 case MLX5_CMD_OP_DESTROY_TIR:
114 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
115 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
116 obj_id);
117 return true;
118
119 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
120 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
121 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
122 table_id);
123 return true;
124 default:
125 return false;
126 }
127}
128
Mark Blochbfc5d832018-11-20 20:31:08 +0200129bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
130{
131 struct devx_obj *devx_obj = obj;
132 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
133
134 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
135 *counter_id = MLX5_GET(dealloc_flow_counter_in,
136 devx_obj->dinbox,
137 flow_counter_id);
138 return true;
139 }
140
141 return false;
142}
143
Yishai Hadas2351776e2018-10-07 12:06:34 +0300144/*
145 * As the obj_id in the firmware is not globally unique the object type
146 * must be considered upon checking for a valid object id.
147 * For that the opcode of the creator command is encoded as part of the obj_id.
148 */
149static u64 get_enc_obj_id(u16 opcode, u32 obj_id)
150{
151 return ((u64)opcode << 32) | obj_id;
152}
153
Yishai Hadas34613eb2018-11-26 08:28:35 +0200154static u64 devx_get_obj_id(const void *in)
Yishai Hadase662e142018-06-17 13:00:02 +0300155{
156 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
Yishai Hadas2351776e2018-10-07 12:06:34 +0300157 u64 obj_id;
Yishai Hadase662e142018-06-17 13:00:02 +0300158
159 switch (opcode) {
160 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
161 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300162 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT,
163 MLX5_GET(general_obj_in_cmd_hdr, in,
164 obj_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300165 break;
166 case MLX5_CMD_OP_QUERY_MKEY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300167 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
168 MLX5_GET(query_mkey_in, in,
169 mkey_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300170 break;
171 case MLX5_CMD_OP_QUERY_CQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300172 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
173 MLX5_GET(query_cq_in, in, cqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300174 break;
175 case MLX5_CMD_OP_MODIFY_CQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300176 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
177 MLX5_GET(modify_cq_in, in, cqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300178 break;
179 case MLX5_CMD_OP_QUERY_SQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300180 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
181 MLX5_GET(query_sq_in, in, sqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300182 break;
183 case MLX5_CMD_OP_MODIFY_SQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300184 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
185 MLX5_GET(modify_sq_in, in, sqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300186 break;
187 case MLX5_CMD_OP_QUERY_RQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300188 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
189 MLX5_GET(query_rq_in, in, rqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300190 break;
191 case MLX5_CMD_OP_MODIFY_RQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300192 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
193 MLX5_GET(modify_rq_in, in, rqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300194 break;
195 case MLX5_CMD_OP_QUERY_RMP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300196 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
197 MLX5_GET(query_rmp_in, in, rmpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300198 break;
199 case MLX5_CMD_OP_MODIFY_RMP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300200 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
201 MLX5_GET(modify_rmp_in, in, rmpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300202 break;
203 case MLX5_CMD_OP_QUERY_RQT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300204 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
205 MLX5_GET(query_rqt_in, in, rqtn));
Yishai Hadase662e142018-06-17 13:00:02 +0300206 break;
207 case MLX5_CMD_OP_MODIFY_RQT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300208 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
209 MLX5_GET(modify_rqt_in, in, rqtn));
Yishai Hadase662e142018-06-17 13:00:02 +0300210 break;
211 case MLX5_CMD_OP_QUERY_TIR:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300212 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
213 MLX5_GET(query_tir_in, in, tirn));
Yishai Hadase662e142018-06-17 13:00:02 +0300214 break;
215 case MLX5_CMD_OP_MODIFY_TIR:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300216 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
217 MLX5_GET(modify_tir_in, in, tirn));
Yishai Hadase662e142018-06-17 13:00:02 +0300218 break;
219 case MLX5_CMD_OP_QUERY_TIS:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300220 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
221 MLX5_GET(query_tis_in, in, tisn));
Yishai Hadase662e142018-06-17 13:00:02 +0300222 break;
223 case MLX5_CMD_OP_MODIFY_TIS:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300224 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
225 MLX5_GET(modify_tis_in, in, tisn));
Yishai Hadase662e142018-06-17 13:00:02 +0300226 break;
227 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300228 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
229 MLX5_GET(query_flow_table_in, in,
230 table_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300231 break;
232 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300233 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
234 MLX5_GET(modify_flow_table_in, in,
235 table_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300236 break;
237 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300238 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
239 MLX5_GET(query_flow_group_in, in,
240 group_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300241 break;
242 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300243 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
244 MLX5_GET(query_fte_in, in,
245 flow_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300246 break;
247 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300248 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
249 MLX5_GET(set_fte_in, in, flow_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300250 break;
251 case MLX5_CMD_OP_QUERY_Q_COUNTER:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300252 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
253 MLX5_GET(query_q_counter_in, in,
254 counter_set_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300255 break;
256 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300257 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
258 MLX5_GET(query_flow_counter_in, in,
259 flow_counter_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300260 break;
261 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300262 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
263 MLX5_GET(general_obj_in_cmd_hdr, in,
264 obj_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300265 break;
266 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300267 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
268 MLX5_GET(query_scheduling_element_in,
269 in, scheduling_element_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300270 break;
271 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300272 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
273 MLX5_GET(modify_scheduling_element_in,
274 in, scheduling_element_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300275 break;
276 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300277 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
278 MLX5_GET(add_vxlan_udp_dport_in, in,
279 vxlan_udp_port));
Yishai Hadase662e142018-06-17 13:00:02 +0300280 break;
281 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300282 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
283 MLX5_GET(query_l2_table_entry_in, in,
284 table_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300285 break;
286 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300287 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
288 MLX5_GET(set_l2_table_entry_in, in,
289 table_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300290 break;
291 case MLX5_CMD_OP_QUERY_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300292 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
293 MLX5_GET(query_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300294 break;
295 case MLX5_CMD_OP_RST2INIT_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300296 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
297 MLX5_GET(rst2init_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300298 break;
299 case MLX5_CMD_OP_INIT2RTR_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300300 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
301 MLX5_GET(init2rtr_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300302 break;
303 case MLX5_CMD_OP_RTR2RTS_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300304 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
305 MLX5_GET(rtr2rts_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300306 break;
307 case MLX5_CMD_OP_RTS2RTS_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300308 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
309 MLX5_GET(rts2rts_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300310 break;
311 case MLX5_CMD_OP_SQERR2RTS_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300312 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
313 MLX5_GET(sqerr2rts_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300314 break;
315 case MLX5_CMD_OP_2ERR_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300316 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
317 MLX5_GET(qp_2err_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300318 break;
319 case MLX5_CMD_OP_2RST_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300320 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
321 MLX5_GET(qp_2rst_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300322 break;
323 case MLX5_CMD_OP_QUERY_DCT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300324 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
325 MLX5_GET(query_dct_in, in, dctn));
Yishai Hadase662e142018-06-17 13:00:02 +0300326 break;
327 case MLX5_CMD_OP_QUERY_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200328 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
329 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300330 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
331 MLX5_GET(query_xrq_in, in, xrqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300332 break;
333 case MLX5_CMD_OP_QUERY_XRC_SRQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300334 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
335 MLX5_GET(query_xrc_srq_in, in,
336 xrc_srqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300337 break;
338 case MLX5_CMD_OP_ARM_XRC_SRQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300339 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
340 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300341 break;
342 case MLX5_CMD_OP_QUERY_SRQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300343 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
344 MLX5_GET(query_srq_in, in, srqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300345 break;
346 case MLX5_CMD_OP_ARM_RQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300347 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
348 MLX5_GET(arm_rq_in, in, srq_number));
Yishai Hadase662e142018-06-17 13:00:02 +0300349 break;
350 case MLX5_CMD_OP_DRAIN_DCT:
351 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300352 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
353 MLX5_GET(drain_dct_in, in, dctn));
Yishai Hadase662e142018-06-17 13:00:02 +0300354 break;
355 case MLX5_CMD_OP_ARM_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200356 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300357 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
358 MLX5_GET(arm_xrq_in, in, xrqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300359 break;
Yishai Hadas719598c2018-11-26 08:28:37 +0200360 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
361 obj_id = get_enc_obj_id
362 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
363 MLX5_GET(query_packet_reformat_context_in,
364 in, packet_reformat_id));
365 break;
Yishai Hadase662e142018-06-17 13:00:02 +0300366 default:
Yishai Hadas34613eb2018-11-26 08:28:35 +0200367 obj_id = 0;
Yishai Hadase662e142018-06-17 13:00:02 +0300368 }
369
Yishai Hadas34613eb2018-11-26 08:28:35 +0200370 return obj_id;
371}
Yishai Hadase662e142018-06-17 13:00:02 +0300372
Yishai Hadas34613eb2018-11-26 08:28:35 +0200373static bool devx_is_valid_obj_id(struct ib_uobject *uobj, const void *in)
374{
375 u64 obj_id = devx_get_obj_id(in);
376
377 if (!obj_id)
378 return false;
379
380 switch (uobj_get_object_id(uobj)) {
381 case UVERBS_OBJECT_CQ:
382 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
383 to_mcq(uobj->object)->mcq.cqn) ==
384 obj_id;
385
386 case UVERBS_OBJECT_SRQ:
387 {
388 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
389 struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
390 u16 opcode;
391
392 switch (srq->common.res) {
393 case MLX5_RES_XSRQ:
394 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
395 break;
396 case MLX5_RES_XRQ:
397 opcode = MLX5_CMD_OP_CREATE_XRQ;
398 break;
399 default:
400 if (!dev->mdev->issi)
401 opcode = MLX5_CMD_OP_CREATE_SRQ;
402 else
403 opcode = MLX5_CMD_OP_CREATE_RMP;
404 }
405
406 return get_enc_obj_id(opcode,
407 to_msrq(uobj->object)->msrq.srqn) ==
408 obj_id;
409 }
410
411 case UVERBS_OBJECT_QP:
412 {
413 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
414 enum ib_qp_type qp_type = qp->ibqp.qp_type;
415
416 if (qp_type == IB_QPT_RAW_PACKET ||
417 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
418 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
419 &qp->raw_packet_qp;
420 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
421 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
422
423 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
424 rq->base.mqp.qpn) == obj_id ||
425 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
426 sq->base.mqp.qpn) == obj_id ||
427 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
428 rq->tirn) == obj_id ||
429 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
430 sq->tisn) == obj_id);
431 }
432
433 if (qp_type == MLX5_IB_QPT_DCT)
434 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
435 qp->dct.mdct.mqp.qpn) == obj_id;
436
437 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
438 qp->ibqp.qp_num) == obj_id;
439 }
440
441 case UVERBS_OBJECT_WQ:
442 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
443 to_mrwq(uobj->object)->core_qp.qpn) ==
444 obj_id;
445
446 case UVERBS_OBJECT_RWQ_IND_TBL:
447 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
448 to_mrwq_ind_table(uobj->object)->rqtn) ==
449 obj_id;
450
451 case MLX5_IB_OBJECT_DEVX_OBJ:
452 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
453
Yishai Hadase662e142018-06-17 13:00:02 +0300454 default:
455 return false;
456 }
Yishai Hadase662e142018-06-17 13:00:02 +0300457}
458
Yishai Hadasba1a0572018-09-20 21:39:33 +0300459static void devx_set_umem_valid(const void *in)
460{
461 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
462
463 switch (opcode) {
464 case MLX5_CMD_OP_CREATE_MKEY:
465 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
466 break;
467 case MLX5_CMD_OP_CREATE_CQ:
468 {
469 void *cqc;
470
471 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
472 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
473 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
474 break;
475 }
476 case MLX5_CMD_OP_CREATE_QP:
477 {
478 void *qpc;
479
480 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
481 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
482 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
483 break;
484 }
485
486 case MLX5_CMD_OP_CREATE_RQ:
487 {
488 void *rqc, *wq;
489
490 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
491 wq = MLX5_ADDR_OF(rqc, rqc, wq);
492 MLX5_SET(wq, wq, dbr_umem_valid, 1);
493 MLX5_SET(wq, wq, wq_umem_valid, 1);
494 break;
495 }
496
497 case MLX5_CMD_OP_CREATE_SQ:
498 {
499 void *sqc, *wq;
500
501 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
502 wq = MLX5_ADDR_OF(sqc, sqc, wq);
503 MLX5_SET(wq, wq, dbr_umem_valid, 1);
504 MLX5_SET(wq, wq, wq_umem_valid, 1);
505 break;
506 }
507
508 case MLX5_CMD_OP_MODIFY_CQ:
509 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
510 break;
511
512 case MLX5_CMD_OP_CREATE_RMP:
513 {
514 void *rmpc, *wq;
515
516 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
517 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
518 MLX5_SET(wq, wq, dbr_umem_valid, 1);
519 MLX5_SET(wq, wq, wq_umem_valid, 1);
520 break;
521 }
522
523 case MLX5_CMD_OP_CREATE_XRQ:
524 {
525 void *xrqc, *wq;
526
527 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
528 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
529 MLX5_SET(wq, wq, dbr_umem_valid, 1);
530 MLX5_SET(wq, wq, wq_umem_valid, 1);
531 break;
532 }
533
534 case MLX5_CMD_OP_CREATE_XRC_SRQ:
535 {
536 void *xrc_srqc;
537
538 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
539 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
540 xrc_srq_context_entry);
541 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
542 break;
543 }
544
545 default:
546 return;
547 }
548}
549
Yishai Hadas2351776e2018-10-07 12:06:34 +0300550static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
Yishai Hadas7efce362018-06-17 13:00:01 +0300551{
Yishai Hadas2351776e2018-10-07 12:06:34 +0300552 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
Yishai Hadas7efce362018-06-17 13:00:01 +0300553
Yishai Hadas2351776e2018-10-07 12:06:34 +0300554 switch (*opcode) {
Yishai Hadas7efce362018-06-17 13:00:01 +0300555 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
556 case MLX5_CMD_OP_CREATE_MKEY:
557 case MLX5_CMD_OP_CREATE_CQ:
558 case MLX5_CMD_OP_ALLOC_PD:
559 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
560 case MLX5_CMD_OP_CREATE_RMP:
561 case MLX5_CMD_OP_CREATE_SQ:
562 case MLX5_CMD_OP_CREATE_RQ:
563 case MLX5_CMD_OP_CREATE_RQT:
564 case MLX5_CMD_OP_CREATE_TIR:
565 case MLX5_CMD_OP_CREATE_TIS:
566 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
567 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
568 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
569 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
Mark Bloch60786f02018-08-28 14:18:46 +0300570 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
Yishai Hadas7efce362018-06-17 13:00:01 +0300571 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
572 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
573 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
574 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
575 case MLX5_CMD_OP_CREATE_QP:
576 case MLX5_CMD_OP_CREATE_SRQ:
577 case MLX5_CMD_OP_CREATE_XRC_SRQ:
578 case MLX5_CMD_OP_CREATE_DCT:
579 case MLX5_CMD_OP_CREATE_XRQ:
580 case MLX5_CMD_OP_ATTACH_TO_MCG:
581 case MLX5_CMD_OP_ALLOC_XRCD:
582 return true;
583 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
584 {
585 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
586 if (op_mod == 0)
587 return true;
588 return false;
589 }
590 default:
591 return false;
592 }
593}
594
Yishai Hadase662e142018-06-17 13:00:02 +0300595static bool devx_is_obj_modify_cmd(const void *in)
596{
597 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
598
599 switch (opcode) {
600 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
601 case MLX5_CMD_OP_MODIFY_CQ:
602 case MLX5_CMD_OP_MODIFY_RMP:
603 case MLX5_CMD_OP_MODIFY_SQ:
604 case MLX5_CMD_OP_MODIFY_RQ:
605 case MLX5_CMD_OP_MODIFY_RQT:
606 case MLX5_CMD_OP_MODIFY_TIR:
607 case MLX5_CMD_OP_MODIFY_TIS:
608 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
609 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
610 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
611 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
612 case MLX5_CMD_OP_RST2INIT_QP:
613 case MLX5_CMD_OP_INIT2RTR_QP:
614 case MLX5_CMD_OP_RTR2RTS_QP:
615 case MLX5_CMD_OP_RTS2RTS_QP:
616 case MLX5_CMD_OP_SQERR2RTS_QP:
617 case MLX5_CMD_OP_2ERR_QP:
618 case MLX5_CMD_OP_2RST_QP:
619 case MLX5_CMD_OP_ARM_XRC_SRQ:
620 case MLX5_CMD_OP_ARM_RQ:
621 case MLX5_CMD_OP_DRAIN_DCT:
622 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
623 case MLX5_CMD_OP_ARM_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200624 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
Yishai Hadase662e142018-06-17 13:00:02 +0300625 return true;
626 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
627 {
628 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
629
630 if (op_mod == 1)
631 return true;
632 return false;
633 }
634 default:
635 return false;
636 }
637}
638
639static bool devx_is_obj_query_cmd(const void *in)
640{
641 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
642
643 switch (opcode) {
644 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
645 case MLX5_CMD_OP_QUERY_MKEY:
646 case MLX5_CMD_OP_QUERY_CQ:
647 case MLX5_CMD_OP_QUERY_RMP:
648 case MLX5_CMD_OP_QUERY_SQ:
649 case MLX5_CMD_OP_QUERY_RQ:
650 case MLX5_CMD_OP_QUERY_RQT:
651 case MLX5_CMD_OP_QUERY_TIR:
652 case MLX5_CMD_OP_QUERY_TIS:
653 case MLX5_CMD_OP_QUERY_Q_COUNTER:
654 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
655 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
656 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
657 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
658 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
659 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
660 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
661 case MLX5_CMD_OP_QUERY_QP:
662 case MLX5_CMD_OP_QUERY_SRQ:
663 case MLX5_CMD_OP_QUERY_XRC_SRQ:
664 case MLX5_CMD_OP_QUERY_DCT:
665 case MLX5_CMD_OP_QUERY_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200666 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
667 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
668 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
Yishai Hadase662e142018-06-17 13:00:02 +0300669 return true;
670 default:
671 return false;
672 }
673}
674
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300675static bool devx_is_whitelist_cmd(void *in)
676{
677 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
678
679 switch (opcode) {
680 case MLX5_CMD_OP_QUERY_HCA_CAP:
681 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
682 return true;
683 default:
684 return false;
685 }
686}
687
688static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
689{
690 if (devx_is_whitelist_cmd(cmd_in)) {
691 struct mlx5_ib_dev *dev;
692
693 if (c->devx_uid)
694 return c->devx_uid;
695
696 dev = to_mdev(c->ibucontext.device);
697 if (dev->devx_whitelist_uid)
698 return dev->devx_whitelist_uid;
699
700 return -EOPNOTSUPP;
701 }
702
703 if (!c->devx_uid)
704 return -EINVAL;
705
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300706 return c->devx_uid;
707}
Yishai Hadase662e142018-06-17 13:00:02 +0300708static bool devx_is_general_cmd(void *in)
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300709{
710 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
711
Yishai Hadas719598c2018-11-26 08:28:37 +0200712 if (opcode >= MLX5_CMD_OP_GENERAL_START &&
713 opcode < MLX5_CMD_OP_GENERAL_END)
714 return true;
715
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300716 switch (opcode) {
717 case MLX5_CMD_OP_QUERY_HCA_CAP:
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300718 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300719 case MLX5_CMD_OP_QUERY_VPORT_STATE:
720 case MLX5_CMD_OP_QUERY_ADAPTER:
721 case MLX5_CMD_OP_QUERY_ISSI:
722 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
723 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
724 case MLX5_CMD_OP_QUERY_VNIC_ENV:
725 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
726 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
727 case MLX5_CMD_OP_NOP:
728 case MLX5_CMD_OP_QUERY_CONG_STATUS:
729 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
730 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
731 return true;
732 default:
733 return false;
734 }
735}
736
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600737static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200738 struct uverbs_attr_bundle *attrs)
Yishai Hadasf6fe01b2018-06-17 13:00:05 +0300739{
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600740 struct mlx5_ib_ucontext *c;
741 struct mlx5_ib_dev *dev;
Yishai Hadasf6fe01b2018-06-17 13:00:05 +0300742 int user_vector;
743 int dev_eqn;
744 unsigned int irqn;
745 int err;
746
747 if (uverbs_copy_from(&user_vector, attrs,
748 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
749 return -EFAULT;
750
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200751 c = devx_ufile2uctx(attrs);
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600752 if (IS_ERR(c))
753 return PTR_ERR(c);
754 dev = to_mdev(c->ibucontext.device);
755
Yishai Hadasf6fe01b2018-06-17 13:00:05 +0300756 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
757 if (err < 0)
758 return err;
759
760 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
761 &dev_eqn, sizeof(dev_eqn)))
762 return -EFAULT;
763
764 return 0;
765}
766
Yishai Hadas7c043e92018-06-17 13:00:03 +0300767/*
768 *Security note:
769 * The hardware protection mechanism works like this: Each device object that
770 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
771 * the device specification manual) upon its creation. Then upon doorbell,
772 * hardware fetches the object context for which the doorbell was rang, and
773 * validates that the UAR through which the DB was rang matches the UAR ID
774 * of the object.
775 * If no match the doorbell is silently ignored by the hardware. Of course,
776 * the user cannot ring a doorbell on a UAR that was not mapped to it.
777 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
778 * mailboxes (except tagging them with UID), we expose to the user its UAR
779 * ID, so it can embed it in these objects in the expected specification
780 * format. So the only thing the user can do is hurt itself by creating a
781 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
782 * may ring a doorbell on its objects.
783 * The consequence of that will be that another user can schedule a QP/SQ
784 * of the buggy user for execution (just insert it to the hardware schedule
785 * queue or arm its CQ for event generation), no further harm is expected.
786 */
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600787static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200788 struct uverbs_attr_bundle *attrs)
Yishai Hadas7c043e92018-06-17 13:00:03 +0300789{
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600790 struct mlx5_ib_ucontext *c;
791 struct mlx5_ib_dev *dev;
Yishai Hadas7c043e92018-06-17 13:00:03 +0300792 u32 user_idx;
793 s32 dev_idx;
794
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200795 c = devx_ufile2uctx(attrs);
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600796 if (IS_ERR(c))
797 return PTR_ERR(c);
798 dev = to_mdev(c->ibucontext.device);
799
Yishai Hadas7c043e92018-06-17 13:00:03 +0300800 if (uverbs_copy_from(&user_idx, attrs,
801 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
802 return -EFAULT;
803
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600804 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
Yishai Hadas7c043e92018-06-17 13:00:03 +0300805 if (dev_idx < 0)
806 return dev_idx;
807
808 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
809 &dev_idx, sizeof(dev_idx)))
810 return -EFAULT;
811
812 return 0;
813}
814
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600815static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200816 struct uverbs_attr_bundle *attrs)
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300817{
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600818 struct mlx5_ib_ucontext *c;
819 struct mlx5_ib_dev *dev;
Yishai Hadas7efce362018-06-17 13:00:01 +0300820 void *cmd_in = uverbs_attr_get_alloced_ptr(
821 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300822 int cmd_out_len = uverbs_attr_get_len(attrs,
823 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
824 void *cmd_out;
825 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300826 int uid;
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300827
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200828 c = devx_ufile2uctx(attrs);
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600829 if (IS_ERR(c))
830 return PTR_ERR(c);
831 dev = to_mdev(c->ibucontext.device);
832
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300833 uid = devx_get_uid(c, cmd_in);
834 if (uid < 0)
835 return uid;
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300836
837 /* Only white list of some general HCA commands are allowed for this method. */
838 if (!devx_is_general_cmd(cmd_in))
839 return -EINVAL;
840
Jason Gunthorpeb61815e2018-08-09 20:14:41 -0600841 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
842 if (IS_ERR(cmd_out))
843 return PTR_ERR(cmd_out);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300844
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300845 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300846 err = mlx5_cmd_exec(dev->mdev, cmd_in,
847 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
848 cmd_out, cmd_out_len);
849 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -0600850 return err;
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300851
Jason Gunthorpeb61815e2018-08-09 20:14:41 -0600852 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
853 cmd_out_len);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300854}
855
Yishai Hadas7efce362018-06-17 13:00:01 +0300856static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
857 u32 *dinlen,
858 u32 *obj_id)
859{
860 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
861 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
862
863 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
864 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
865
866 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
867 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
868
869 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
870 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
871 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
872 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
873 break;
874
Yishai Hadas6e3722b2018-12-19 16:28:15 +0200875 case MLX5_CMD_OP_CREATE_UMEM:
876 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
877 MLX5_CMD_OP_DESTROY_UMEM);
878 break;
Yishai Hadas7efce362018-06-17 13:00:01 +0300879 case MLX5_CMD_OP_CREATE_MKEY:
880 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
881 break;
882 case MLX5_CMD_OP_CREATE_CQ:
883 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
884 break;
885 case MLX5_CMD_OP_ALLOC_PD:
886 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
887 break;
888 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
889 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
890 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
891 break;
892 case MLX5_CMD_OP_CREATE_RMP:
893 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
894 break;
895 case MLX5_CMD_OP_CREATE_SQ:
896 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
897 break;
898 case MLX5_CMD_OP_CREATE_RQ:
899 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
900 break;
901 case MLX5_CMD_OP_CREATE_RQT:
902 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
903 break;
904 case MLX5_CMD_OP_CREATE_TIR:
905 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
906 break;
907 case MLX5_CMD_OP_CREATE_TIS:
908 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
909 break;
910 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
911 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
912 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
913 break;
914 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
915 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
916 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
917 MLX5_SET(destroy_flow_table_in, din, other_vport,
918 MLX5_GET(create_flow_table_in, in, other_vport));
919 MLX5_SET(destroy_flow_table_in, din, vport_number,
920 MLX5_GET(create_flow_table_in, in, vport_number));
921 MLX5_SET(destroy_flow_table_in, din, table_type,
922 MLX5_GET(create_flow_table_in, in, table_type));
923 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
924 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
925 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
926 break;
927 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
928 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
929 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
930 MLX5_SET(destroy_flow_group_in, din, other_vport,
931 MLX5_GET(create_flow_group_in, in, other_vport));
932 MLX5_SET(destroy_flow_group_in, din, vport_number,
933 MLX5_GET(create_flow_group_in, in, vport_number));
934 MLX5_SET(destroy_flow_group_in, din, table_type,
935 MLX5_GET(create_flow_group_in, in, table_type));
936 MLX5_SET(destroy_flow_group_in, din, table_id,
937 MLX5_GET(create_flow_group_in, in, table_id));
938 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
939 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
940 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
941 break;
942 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
943 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
944 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
945 MLX5_SET(delete_fte_in, din, other_vport,
946 MLX5_GET(set_fte_in, in, other_vport));
947 MLX5_SET(delete_fte_in, din, vport_number,
948 MLX5_GET(set_fte_in, in, vport_number));
949 MLX5_SET(delete_fte_in, din, table_type,
950 MLX5_GET(set_fte_in, in, table_type));
951 MLX5_SET(delete_fte_in, din, table_id,
952 MLX5_GET(set_fte_in, in, table_id));
953 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
954 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
955 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
956 break;
957 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
958 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
959 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
960 break;
Mark Bloch60786f02018-08-28 14:18:46 +0300961 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
Yishai Hadas7efce362018-06-17 13:00:01 +0300962 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
Mark Bloch60786f02018-08-28 14:18:46 +0300963 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
Yishai Hadas7efce362018-06-17 13:00:01 +0300964 break;
965 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
966 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
967 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
968 break;
969 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
970 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
971 *obj_id = MLX5_GET(create_scheduling_element_out, out,
972 scheduling_element_id);
973 MLX5_SET(destroy_scheduling_element_in, din,
974 scheduling_hierarchy,
975 MLX5_GET(create_scheduling_element_in, in,
976 scheduling_hierarchy));
977 MLX5_SET(destroy_scheduling_element_in, din,
978 scheduling_element_id, *obj_id);
979 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
980 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
981 break;
982 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
983 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
984 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
985 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
986 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
987 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
988 break;
989 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
990 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
991 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
992 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
993 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
994 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
995 break;
996 case MLX5_CMD_OP_CREATE_QP:
997 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
998 break;
999 case MLX5_CMD_OP_CREATE_SRQ:
1000 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1001 break;
1002 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1003 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1004 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1005 break;
1006 case MLX5_CMD_OP_CREATE_DCT:
1007 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1008 break;
1009 case MLX5_CMD_OP_CREATE_XRQ:
1010 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1011 break;
1012 case MLX5_CMD_OP_ATTACH_TO_MCG:
1013 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1014 MLX5_SET(detach_from_mcg_in, din, qpn,
1015 MLX5_GET(attach_to_mcg_in, in, qpn));
1016 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1017 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1018 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1019 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1020 break;
1021 case MLX5_CMD_OP_ALLOC_XRCD:
1022 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1023 break;
1024 default:
1025 /* The entry must match to one of the devx_is_obj_create_cmd */
1026 WARN_ON(true);
1027 break;
1028 }
1029}
1030
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001031static int devx_handle_mkey_indirect(struct devx_obj *obj,
1032 struct mlx5_ib_dev *dev,
1033 void *in, void *out)
1034{
1035 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
1036 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1037 unsigned long flags;
1038 struct mlx5_core_mkey *mkey;
1039 void *mkc;
1040 u8 key;
1041 int err;
1042
1043 mkey = &devx_mr->mmkey;
1044 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1045 key = MLX5_GET(mkc, mkc, mkey_7_0);
1046 mkey->key = mlx5_idx_to_mkey(
1047 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1048 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1049 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1050 mkey->size = MLX5_GET64(mkc, mkc, len);
1051 mkey->pd = MLX5_GET(mkc, mkc, pd);
1052 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1053
1054 write_lock_irqsave(&table->lock, flags);
1055 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key),
1056 mkey);
1057 write_unlock_irqrestore(&table->lock, flags);
1058 return err;
1059}
1060
Yishai Hadasfa31f142019-01-13 16:01:16 +02001061static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1062 struct devx_obj *obj,
1063 void *in, int in_len)
1064{
1065 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1066 MLX5_FLD_SZ_BYTES(create_mkey_in,
1067 memory_key_mkey_entry);
1068 void *mkc;
1069 u8 access_mode;
1070
1071 if (in_len < min_len)
1072 return -EINVAL;
1073
1074 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1075
1076 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1077 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1078
1079 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001080 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1081 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1082 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
Yishai Hadasfa31f142019-01-13 16:01:16 +02001083 return 0;
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001084 }
Yishai Hadasfa31f142019-01-13 16:01:16 +02001085
1086 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1087 return 0;
1088}
1089
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001090static void devx_free_indirect_mkey(struct rcu_head *rcu)
1091{
1092 kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
1093}
1094
1095/* This function to delete from the radix tree needs to be called before
1096 * destroying the underlying mkey. Otherwise a race might occur in case that
1097 * other thread will get the same mkey before this one will be deleted,
1098 * in that case it will fail via inserting to the tree its own data.
1099 *
1100 * Note:
1101 * An error in the destroy is not expected unless there is some other indirect
1102 * mkey which points to this one. In a kernel cleanup flow it will be just
1103 * destroyed in the iterative destruction call. In a user flow, in case
1104 * the application didn't close in the expected order it's its own problem,
1105 * the mkey won't be part of the tree, in both cases the kernel is safe.
1106 */
1107static void devx_cleanup_mkey(struct devx_obj *obj)
1108{
1109 struct mlx5_mkey_table *table = &obj->mdev->priv.mkey_table;
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001110 unsigned long flags;
1111
1112 write_lock_irqsave(&table->lock, flags);
Kamal Heibe5c1bb42019-01-30 16:13:43 +02001113 radix_tree_delete(&table->tree, mlx5_base_mkey(obj->devx_mr.mmkey.key));
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001114 write_unlock_irqrestore(&table->lock, flags);
1115}
1116
Yishai Hadas7efce362018-06-17 13:00:01 +03001117static int devx_obj_cleanup(struct ib_uobject *uobject,
1118 enum rdma_remove_reason why)
1119{
1120 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1121 struct devx_obj *obj = uobject->object;
1122 int ret;
1123
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001124 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1125 devx_cleanup_mkey(obj);
1126
Yishai Hadas7efce362018-06-17 13:00:01 +03001127 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
Yishai Hadas1c774832018-06-20 17:11:39 +03001128 if (ib_is_destroy_retryable(ret, why, uobject))
Yishai Hadas7efce362018-06-17 13:00:01 +03001129 return ret;
1130
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001131 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1132 struct mlx5_ib_dev *dev = to_mdev(uobject->context->device);
1133
1134 call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
1135 devx_free_indirect_mkey);
1136 return ret;
1137 }
1138
Yishai Hadas7efce362018-06-17 13:00:01 +03001139 kfree(obj);
1140 return ret;
1141}
1142
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001143static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001144 struct uverbs_attr_bundle *attrs)
Yishai Hadas7efce362018-06-17 13:00:01 +03001145{
Yishai Hadas7efce362018-06-17 13:00:01 +03001146 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1147 int cmd_out_len = uverbs_attr_get_len(attrs,
1148 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
Yishai Hadasfa31f142019-01-13 16:01:16 +02001149 int cmd_in_len = uverbs_attr_get_len(attrs,
1150 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
Yishai Hadas7efce362018-06-17 13:00:01 +03001151 void *cmd_out;
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001152 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1153 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1154 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
1155 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
Yishai Hadase8ef0902018-09-25 12:11:12 +03001156 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
Yishai Hadas7efce362018-06-17 13:00:01 +03001157 struct devx_obj *obj;
1158 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001159 int uid;
Yishai Hadas2351776e2018-10-07 12:06:34 +03001160 u32 obj_id;
1161 u16 opcode;
Yishai Hadas7efce362018-06-17 13:00:01 +03001162
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001163 uid = devx_get_uid(c, cmd_in);
1164 if (uid < 0)
1165 return uid;
Yishai Hadas7efce362018-06-17 13:00:01 +03001166
Yishai Hadas2351776e2018-10-07 12:06:34 +03001167 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
Yishai Hadas7efce362018-06-17 13:00:01 +03001168 return -EINVAL;
1169
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001170 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1171 if (IS_ERR(cmd_out))
1172 return PTR_ERR(cmd_out);
1173
Yishai Hadas7efce362018-06-17 13:00:01 +03001174 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1175 if (!obj)
1176 return -ENOMEM;
1177
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001178 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadasfa31f142019-01-13 16:01:16 +02001179 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1180 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1181 if (err)
1182 goto obj_free;
1183 } else {
1184 devx_set_umem_valid(cmd_in);
1185 }
Yishai Hadasba1a0572018-09-20 21:39:33 +03001186
Yishai Hadas7efce362018-06-17 13:00:01 +03001187 err = mlx5_cmd_exec(dev->mdev, cmd_in,
Yishai Hadasfa31f142019-01-13 16:01:16 +02001188 cmd_in_len,
Yishai Hadas7efce362018-06-17 13:00:01 +03001189 cmd_out, cmd_out_len);
1190 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001191 goto obj_free;
Yishai Hadas7efce362018-06-17 13:00:01 +03001192
Yishai Hadas7efce362018-06-17 13:00:01 +03001193 uobj->object = obj;
1194 obj->mdev = dev->mdev;
Yishai Hadas2351776e2018-10-07 12:06:34 +03001195 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1196 &obj_id);
Yishai Hadas7efce362018-06-17 13:00:01 +03001197 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1198
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001199 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1200 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1201 if (err)
1202 goto obj_destroy;
1203 }
1204
Yishai Hadas7efce362018-06-17 13:00:01 +03001205 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1206 if (err)
Yishai Hadase8ef0902018-09-25 12:11:12 +03001207 goto obj_destroy;
Yishai Hadas7efce362018-06-17 13:00:01 +03001208
Yishai Hadas2351776e2018-10-07 12:06:34 +03001209 obj->obj_id = get_enc_obj_id(opcode, obj_id);
Yishai Hadas7efce362018-06-17 13:00:01 +03001210 return 0;
1211
Yishai Hadase8ef0902018-09-25 12:11:12 +03001212obj_destroy:
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001213 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1214 devx_cleanup_mkey(obj);
Yishai Hadase8ef0902018-09-25 12:11:12 +03001215 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
Yishai Hadas7efce362018-06-17 13:00:01 +03001216obj_free:
1217 kfree(obj);
1218 return err;
1219}
1220
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001221static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001222 struct uverbs_attr_bundle *attrs)
Yishai Hadase662e142018-06-17 13:00:02 +03001223{
Yishai Hadase662e142018-06-17 13:00:02 +03001224 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1225 int cmd_out_len = uverbs_attr_get_len(attrs,
1226 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1227 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1228 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001229 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
Yishai Hadas34613eb2018-11-26 08:28:35 +02001230 struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
Yishai Hadase662e142018-06-17 13:00:02 +03001231 void *cmd_out;
1232 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001233 int uid;
Yishai Hadase662e142018-06-17 13:00:02 +03001234
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001235 uid = devx_get_uid(c, cmd_in);
1236 if (uid < 0)
1237 return uid;
Yishai Hadase662e142018-06-17 13:00:02 +03001238
1239 if (!devx_is_obj_modify_cmd(cmd_in))
1240 return -EINVAL;
1241
Yishai Hadas34613eb2018-11-26 08:28:35 +02001242 if (!devx_is_valid_obj_id(uobj, cmd_in))
Yishai Hadase662e142018-06-17 13:00:02 +03001243 return -EINVAL;
1244
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001245 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1246 if (IS_ERR(cmd_out))
1247 return PTR_ERR(cmd_out);
Yishai Hadase662e142018-06-17 13:00:02 +03001248
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001249 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadasba1a0572018-09-20 21:39:33 +03001250 devx_set_umem_valid(cmd_in);
1251
Yishai Hadas34613eb2018-11-26 08:28:35 +02001252 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
Yishai Hadase662e142018-06-17 13:00:02 +03001253 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1254 cmd_out, cmd_out_len);
1255 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001256 return err;
Yishai Hadase662e142018-06-17 13:00:02 +03001257
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001258 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1259 cmd_out, cmd_out_len);
Yishai Hadase662e142018-06-17 13:00:02 +03001260}
1261
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001262static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001263 struct uverbs_attr_bundle *attrs)
Yishai Hadase662e142018-06-17 13:00:02 +03001264{
Yishai Hadase662e142018-06-17 13:00:02 +03001265 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1266 int cmd_out_len = uverbs_attr_get_len(attrs,
1267 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1268 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1269 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001270 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
Yishai Hadase662e142018-06-17 13:00:02 +03001271 void *cmd_out;
1272 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001273 int uid;
Yishai Hadas34613eb2018-11-26 08:28:35 +02001274 struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
Yishai Hadase662e142018-06-17 13:00:02 +03001275
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001276 uid = devx_get_uid(c, cmd_in);
1277 if (uid < 0)
1278 return uid;
Yishai Hadase662e142018-06-17 13:00:02 +03001279
1280 if (!devx_is_obj_query_cmd(cmd_in))
1281 return -EINVAL;
1282
Yishai Hadas34613eb2018-11-26 08:28:35 +02001283 if (!devx_is_valid_obj_id(uobj, cmd_in))
Yishai Hadase662e142018-06-17 13:00:02 +03001284 return -EINVAL;
1285
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001286 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1287 if (IS_ERR(cmd_out))
1288 return PTR_ERR(cmd_out);
Yishai Hadase662e142018-06-17 13:00:02 +03001289
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001290 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadas34613eb2018-11-26 08:28:35 +02001291 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
Yishai Hadase662e142018-06-17 13:00:02 +03001292 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1293 cmd_out, cmd_out_len);
1294 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001295 return err;
Yishai Hadase662e142018-06-17 13:00:02 +03001296
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001297 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1298 cmd_out, cmd_out_len);
Yishai Hadase662e142018-06-17 13:00:02 +03001299}
1300
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001301struct devx_async_event_queue {
1302 spinlock_t lock;
1303 wait_queue_head_t poll_wait;
1304 struct list_head event_list;
Yishai Hadasa124edb2019-01-22 08:29:57 +02001305 atomic_t bytes_in_use;
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001306 u8 is_destroyed:1;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001307};
1308
1309struct devx_async_cmd_event_file {
1310 struct ib_uobject uobj;
1311 struct devx_async_event_queue ev_queue;
Yishai Hadasa124edb2019-01-22 08:29:57 +02001312 struct mlx5_async_ctx async_ctx;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001313};
1314
1315static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1316{
1317 spin_lock_init(&ev_queue->lock);
1318 INIT_LIST_HEAD(&ev_queue->event_list);
1319 init_waitqueue_head(&ev_queue->poll_wait);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001320 atomic_set(&ev_queue->bytes_in_use, 0);
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001321 ev_queue->is_destroyed = 0;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001322}
1323
1324static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1325 struct uverbs_attr_bundle *attrs)
1326{
1327 struct devx_async_cmd_event_file *ev_file;
1328
1329 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1330 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001331 struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001332
1333 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1334 uobj);
1335 devx_init_event_queue(&ev_file->ev_queue);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001336 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001337 return 0;
1338}
1339
Yishai Hadasa124edb2019-01-22 08:29:57 +02001340static void devx_query_callback(int status, struct mlx5_async_work *context)
1341{
1342 struct devx_async_data *async_data =
1343 container_of(context, struct devx_async_data, cb_work);
1344 struct ib_uobject *fd_uobj = async_data->fd_uobj;
1345 struct devx_async_cmd_event_file *ev_file;
1346 struct devx_async_event_queue *ev_queue;
1347 unsigned long flags;
1348
1349 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1350 uobj);
1351 ev_queue = &ev_file->ev_queue;
1352
1353 spin_lock_irqsave(&ev_queue->lock, flags);
1354 list_add_tail(&async_data->list, &ev_queue->event_list);
1355 spin_unlock_irqrestore(&ev_queue->lock, flags);
1356
1357 wake_up_interruptible(&ev_queue->poll_wait);
1358 fput(fd_uobj->object);
1359}
1360
1361#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1362
1363static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1364 struct uverbs_attr_bundle *attrs)
1365{
1366 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1367 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1368 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1369 attrs,
1370 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1371 u16 cmd_out_len;
1372 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
1373 struct ib_uobject *fd_uobj;
1374 int err;
1375 int uid;
1376 struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
1377 struct devx_async_cmd_event_file *ev_file;
1378 struct devx_async_data *async_data;
1379
1380 uid = devx_get_uid(c, cmd_in);
1381 if (uid < 0)
1382 return uid;
1383
1384 if (!devx_is_obj_query_cmd(cmd_in))
1385 return -EINVAL;
1386
1387 err = uverbs_get_const(&cmd_out_len, attrs,
1388 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1389 if (err)
1390 return err;
1391
1392 if (!devx_is_valid_obj_id(uobj, cmd_in))
1393 return -EINVAL;
1394
1395 fd_uobj = uverbs_attr_get_uobject(attrs,
1396 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1397 if (IS_ERR(fd_uobj))
1398 return PTR_ERR(fd_uobj);
1399
1400 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1401 uobj);
1402
1403 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1404 MAX_ASYNC_BYTES_IN_USE) {
1405 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1406 return -EAGAIN;
1407 }
1408
1409 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1410 cmd_out_len), GFP_KERNEL);
1411 if (!async_data) {
1412 err = -ENOMEM;
1413 goto sub_bytes;
1414 }
1415
1416 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1417 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1418 if (err)
1419 goto free_async;
1420
1421 async_data->cmd_out_len = cmd_out_len;
1422 async_data->mdev = mdev;
1423 async_data->fd_uobj = fd_uobj;
1424
1425 get_file(fd_uobj->object);
1426 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1427 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1428 uverbs_attr_get_len(attrs,
1429 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1430 async_data->hdr.out_data,
1431 async_data->cmd_out_len,
1432 devx_query_callback, &async_data->cb_work);
1433
1434 if (err)
1435 goto cb_err;
1436
1437 return 0;
1438
1439cb_err:
1440 fput(fd_uobj->object);
1441free_async:
1442 kvfree(async_data);
1443sub_bytes:
1444 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1445 return err;
1446}
1447
Yishai Hadasaeae9452018-06-17 13:00:04 +03001448static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
1449 struct uverbs_attr_bundle *attrs,
1450 struct devx_umem *obj)
1451{
1452 u64 addr;
1453 size_t size;
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001454 u32 access;
Yishai Hadasaeae9452018-06-17 13:00:04 +03001455 int npages;
1456 int err;
1457 u32 page_mask;
1458
1459 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001460 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
Yishai Hadasaeae9452018-06-17 13:00:04 +03001461 return -EFAULT;
1462
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001463 err = uverbs_get_flags32(&access, attrs,
1464 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
Yishai Hadas47f07f02018-12-05 15:50:21 +02001465 IB_ACCESS_LOCAL_WRITE |
1466 IB_ACCESS_REMOTE_WRITE |
1467 IB_ACCESS_REMOTE_READ);
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001468 if (err)
1469 return err;
1470
Yishai Hadasaeae9452018-06-17 13:00:04 +03001471 err = ib_check_mr_access(access);
1472 if (err)
1473 return err;
1474
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001475 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001476 if (IS_ERR(obj->umem))
1477 return PTR_ERR(obj->umem);
1478
1479 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
1480 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
1481 &obj->page_shift, &obj->ncont, NULL);
1482
1483 if (!npages) {
1484 ib_umem_release(obj->umem);
1485 return -EINVAL;
1486 }
1487
1488 page_mask = (1 << obj->page_shift) - 1;
1489 obj->page_offset = obj->umem->address & page_mask;
1490
1491 return 0;
1492}
1493
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001494static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
1495 struct devx_umem *obj,
Yishai Hadasaeae9452018-06-17 13:00:04 +03001496 struct devx_umem_reg_cmd *cmd)
1497{
1498 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
1499 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001500 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
1501 return PTR_ERR_OR_ZERO(cmd->in);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001502}
1503
1504static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
1505 struct devx_umem *obj,
1506 struct devx_umem_reg_cmd *cmd)
1507{
1508 void *umem;
1509 __be64 *mtt;
1510
1511 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
1512 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
1513
Yishai Hadas6e3722b2018-12-19 16:28:15 +02001514 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001515 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
1516 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
1517 MLX5_ADAPTER_PAGE_SHIFT);
1518 MLX5_SET(umem, umem, page_offset, obj->page_offset);
1519 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
1520 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
1521 MLX5_IB_MTT_READ);
1522}
1523
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001524static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001525 struct uverbs_attr_bundle *attrs)
Yishai Hadasaeae9452018-06-17 13:00:04 +03001526{
Yishai Hadasaeae9452018-06-17 13:00:04 +03001527 struct devx_umem_reg_cmd cmd;
1528 struct devx_umem *obj;
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001529 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1530 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001531 u32 obj_id;
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001532 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
1533 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001534 int err;
1535
1536 if (!c->devx_uid)
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001537 return -EINVAL;
1538
Yishai Hadasaeae9452018-06-17 13:00:04 +03001539 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
1540 if (!obj)
1541 return -ENOMEM;
1542
1543 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
1544 if (err)
1545 goto err_obj_free;
1546
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001547 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001548 if (err)
1549 goto err_umem_release;
1550
1551 devx_umem_reg_cmd_build(dev, obj, &cmd);
1552
Yishai Hadas6e3722b2018-12-19 16:28:15 +02001553 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001554 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
1555 sizeof(cmd.out));
1556 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001557 goto err_umem_release;
Yishai Hadasaeae9452018-06-17 13:00:04 +03001558
1559 obj->mdev = dev->mdev;
1560 uobj->object = obj;
1561 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
1562 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
1563 if (err)
1564 goto err_umem_destroy;
1565
Yishai Hadasaeae9452018-06-17 13:00:04 +03001566 return 0;
1567
1568err_umem_destroy:
1569 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001570err_umem_release:
1571 ib_umem_release(obj->umem);
1572err_obj_free:
1573 kfree(obj);
1574 return err;
1575}
1576
Yishai Hadasaeae9452018-06-17 13:00:04 +03001577static int devx_umem_cleanup(struct ib_uobject *uobject,
1578 enum rdma_remove_reason why)
1579{
1580 struct devx_umem *obj = uobject->object;
1581 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1582 int err;
1583
1584 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
Yishai Hadas1c774832018-06-20 17:11:39 +03001585 if (ib_is_destroy_retryable(err, why, uobject))
Yishai Hadasaeae9452018-06-17 13:00:04 +03001586 return err;
1587
1588 ib_umem_release(obj->umem);
1589 kfree(obj);
1590 return 0;
1591}
1592
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001593static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
1594 size_t count, loff_t *pos)
1595{
Yishai Hadas4accbb32019-01-22 08:29:58 +02001596 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
1597 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1598 struct devx_async_data *event;
1599 int ret = 0;
1600 size_t eventsz;
1601
1602 spin_lock_irq(&ev_queue->lock);
1603
1604 while (list_empty(&ev_queue->event_list)) {
1605 spin_unlock_irq(&ev_queue->lock);
1606
1607 if (filp->f_flags & O_NONBLOCK)
1608 return -EAGAIN;
1609
1610 if (wait_event_interruptible(
1611 ev_queue->poll_wait,
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001612 (!list_empty(&ev_queue->event_list) ||
1613 ev_queue->is_destroyed))) {
Yishai Hadas4accbb32019-01-22 08:29:58 +02001614 return -ERESTARTSYS;
1615 }
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001616
1617 if (list_empty(&ev_queue->event_list) &&
1618 ev_queue->is_destroyed)
1619 return -EIO;
1620
Yishai Hadas4accbb32019-01-22 08:29:58 +02001621 spin_lock_irq(&ev_queue->lock);
1622 }
1623
1624 event = list_entry(ev_queue->event_list.next,
1625 struct devx_async_data, list);
1626 eventsz = event->cmd_out_len +
1627 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
1628
1629 if (eventsz > count) {
1630 spin_unlock_irq(&ev_queue->lock);
1631 return -ENOSPC;
1632 }
1633
1634 list_del(ev_queue->event_list.next);
1635 spin_unlock_irq(&ev_queue->lock);
1636
1637 if (copy_to_user(buf, &event->hdr, eventsz))
1638 ret = -EFAULT;
1639 else
1640 ret = eventsz;
1641
1642 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
1643 kvfree(event);
1644 return ret;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001645}
1646
1647static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
1648{
Yishai Hadasa124edb2019-01-22 08:29:57 +02001649 struct ib_uobject *uobj = filp->private_data;
1650 struct devx_async_cmd_event_file *comp_ev_file = container_of(
1651 uobj, struct devx_async_cmd_event_file, uobj);
1652 struct devx_async_data *entry, *tmp;
1653
1654 spin_lock_irq(&comp_ev_file->ev_queue.lock);
1655 list_for_each_entry_safe(entry, tmp,
1656 &comp_ev_file->ev_queue.event_list, list)
1657 kvfree(entry);
1658 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
1659
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001660 uverbs_close_fd(filp);
1661 return 0;
1662}
1663
1664static __poll_t devx_async_cmd_event_poll(struct file *filp,
1665 struct poll_table_struct *wait)
1666{
Yishai Hadas4accbb32019-01-22 08:29:58 +02001667 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
1668 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1669 __poll_t pollflags = 0;
1670
1671 poll_wait(filp, &ev_queue->poll_wait, wait);
1672
1673 spin_lock_irq(&ev_queue->lock);
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001674 if (ev_queue->is_destroyed)
1675 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1676 else if (!list_empty(&ev_queue->event_list))
Yishai Hadas4accbb32019-01-22 08:29:58 +02001677 pollflags = EPOLLIN | EPOLLRDNORM;
1678 spin_unlock_irq(&ev_queue->lock);
1679
1680 return pollflags;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001681}
1682
1683const struct file_operations devx_async_cmd_event_fops = {
1684 .owner = THIS_MODULE,
1685 .read = devx_async_cmd_event_read,
1686 .poll = devx_async_cmd_event_poll,
1687 .release = devx_async_cmd_event_close,
1688 .llseek = no_llseek,
1689};
1690
1691static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
1692 enum rdma_remove_reason why)
1693{
Yishai Hadasa124edb2019-01-22 08:29:57 +02001694 struct devx_async_cmd_event_file *comp_ev_file =
1695 container_of(uobj, struct devx_async_cmd_event_file,
1696 uobj);
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001697 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1698
1699 spin_lock_irq(&ev_queue->lock);
1700 ev_queue->is_destroyed = 1;
1701 spin_unlock_irq(&ev_queue->lock);
1702
1703 if (why == RDMA_REMOVE_DRIVER_REMOVE)
1704 wake_up_interruptible(&ev_queue->poll_wait);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001705
1706 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001707 return 0;
1708};
1709
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001710DECLARE_UVERBS_NAMED_METHOD(
1711 MLX5_IB_METHOD_DEVX_UMEM_REG,
1712 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
1713 MLX5_IB_OBJECT_DEVX_UMEM,
1714 UVERBS_ACCESS_NEW,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001715 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001716 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
1717 UVERBS_ATTR_TYPE(u64),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001718 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001719 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
1720 UVERBS_ATTR_TYPE(u64),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001721 UA_MANDATORY),
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001722 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
1723 enum ib_access_flags),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001724 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
1725 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001726 UA_MANDATORY));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001727
Yishai Hadas528922a2018-07-08 13:24:39 +03001728DECLARE_UVERBS_NAMED_METHOD_DESTROY(
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001729 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
1730 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
1731 MLX5_IB_OBJECT_DEVX_UMEM,
1732 UVERBS_ACCESS_DESTROY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001733 UA_MANDATORY));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001734
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001735DECLARE_UVERBS_NAMED_METHOD(
1736 MLX5_IB_METHOD_DEVX_QUERY_EQN,
1737 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
1738 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001739 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001740 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
1741 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001742 UA_MANDATORY));
Yishai Hadasf6fe01b2018-06-17 13:00:05 +03001743
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001744DECLARE_UVERBS_NAMED_METHOD(
1745 MLX5_IB_METHOD_DEVX_QUERY_UAR,
1746 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
1747 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001748 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001749 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1750 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001751 UA_MANDATORY));
Yishai Hadas7c043e92018-06-17 13:00:03 +03001752
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001753DECLARE_UVERBS_NAMED_METHOD(
1754 MLX5_IB_METHOD_DEVX_OTHER,
1755 UVERBS_ATTR_PTR_IN(
1756 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
1757 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001758 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001759 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001760 UVERBS_ATTR_PTR_OUT(
1761 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
1762 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001763 UA_MANDATORY));
Yishai Hadas8aa8c952018-06-17 13:00:00 +03001764
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001765DECLARE_UVERBS_NAMED_METHOD(
1766 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
1767 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
1768 MLX5_IB_OBJECT_DEVX_OBJ,
1769 UVERBS_ACCESS_NEW,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001770 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001771 UVERBS_ATTR_PTR_IN(
1772 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
1773 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001774 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001775 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001776 UVERBS_ATTR_PTR_OUT(
1777 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1778 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001779 UA_MANDATORY));
Yishai Hadas7efce362018-06-17 13:00:01 +03001780
Yishai Hadas528922a2018-07-08 13:24:39 +03001781DECLARE_UVERBS_NAMED_METHOD_DESTROY(
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001782 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
1783 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
1784 MLX5_IB_OBJECT_DEVX_OBJ,
1785 UVERBS_ACCESS_DESTROY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001786 UA_MANDATORY));
Yishai Hadas7efce362018-06-17 13:00:01 +03001787
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001788DECLARE_UVERBS_NAMED_METHOD(
1789 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
1790 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
Yishai Hadas34613eb2018-11-26 08:28:35 +02001791 UVERBS_IDR_ANY_OBJECT,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001792 UVERBS_ACCESS_WRITE,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001793 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001794 UVERBS_ATTR_PTR_IN(
1795 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
1796 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001797 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001798 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001799 UVERBS_ATTR_PTR_OUT(
1800 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1801 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001802 UA_MANDATORY));
Yishai Hadase662e142018-06-17 13:00:02 +03001803
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001804DECLARE_UVERBS_NAMED_METHOD(
1805 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
1806 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
Yishai Hadas34613eb2018-11-26 08:28:35 +02001807 UVERBS_IDR_ANY_OBJECT,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001808 UVERBS_ACCESS_READ,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001809 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001810 UVERBS_ATTR_PTR_IN(
1811 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
1812 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001813 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001814 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001815 UVERBS_ATTR_PTR_OUT(
1816 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1817 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001818 UA_MANDATORY));
Yishai Hadase662e142018-06-17 13:00:02 +03001819
Yishai Hadasa124edb2019-01-22 08:29:57 +02001820DECLARE_UVERBS_NAMED_METHOD(
1821 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
1822 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
1823 UVERBS_IDR_ANY_OBJECT,
1824 UVERBS_ACCESS_READ,
1825 UA_MANDATORY),
1826 UVERBS_ATTR_PTR_IN(
1827 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
1828 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
1829 UA_MANDATORY,
1830 UA_ALLOC_AND_COPY),
1831 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
1832 u16, UA_MANDATORY),
1833 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
1834 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1835 UVERBS_ACCESS_READ,
1836 UA_MANDATORY),
1837 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
1838 UVERBS_ATTR_TYPE(u64),
1839 UA_MANDATORY));
1840
Jason Gunthorpe6c61d2a2018-07-04 08:50:27 +03001841DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001842 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
1843 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
1844 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
Yishai Hadas8aa8c952018-06-17 13:00:00 +03001845
Jason Gunthorpe6c61d2a2018-07-04 08:50:27 +03001846DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001847 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
1848 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
1849 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
1850 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
Yishai Hadasa124edb2019-01-22 08:29:57 +02001851 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
1852 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
Yishai Hadas7efce362018-06-17 13:00:01 +03001853
Jason Gunthorpe6c61d2a2018-07-04 08:50:27 +03001854DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001855 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
1856 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
1857 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001858
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001859
1860DECLARE_UVERBS_NAMED_METHOD(
1861 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
1862 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
1863 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1864 UVERBS_ACCESS_NEW,
1865 UA_MANDATORY));
1866
1867DECLARE_UVERBS_NAMED_OBJECT(
1868 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1869 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
1870 devx_hot_unplug_async_cmd_event_file,
1871 &devx_async_cmd_event_fops, "[devx_async_cmd]",
1872 O_RDONLY),
1873 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
1874
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001875static bool devx_is_supported(struct ib_device *device)
Yishai Hadasc59450c2018-06-17 13:00:06 +03001876{
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001877 struct mlx5_ib_dev *dev = to_mdev(device);
1878
Yishai Hadas6e3722b2018-12-19 16:28:15 +02001879 return !dev->rep && MLX5_CAP_GEN(dev->mdev, log_max_uctx);
Yishai Hadasc59450c2018-06-17 13:00:06 +03001880}
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001881
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02001882const struct uapi_definition mlx5_ib_devx_defs[] = {
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001883 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1884 MLX5_IB_OBJECT_DEVX,
1885 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
1886 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1887 MLX5_IB_OBJECT_DEVX_OBJ,
1888 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
1889 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1890 MLX5_IB_OBJECT_DEVX_UMEM,
1891 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001892 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1893 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1894 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02001895 {},
1896};