blob: 8e6d23d6859f965dee61ad2d1dce2b3e1badba1f [file] [log] [blame]
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
10#include <rdma/mlx5_user_ioctl_cmds.h>
Yishai Hadasa124edb2019-01-22 08:29:57 +020011#include <rdma/mlx5_user_ioctl_verbs.h>
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030012#include <rdma/ib_umem.h>
Yishai Hadas34613eb2018-11-26 08:28:35 +020013#include <rdma/uverbs_std_types.h>
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030014#include <linux/mlx5/driver.h>
15#include <linux/mlx5/fs.h>
16#include "mlx5_ib.h"
17
Yishai Hadas8aa8c952018-06-17 13:00:00 +030018#define UVERBS_MODULE_NAME mlx5_ib
19#include <rdma/uverbs_named_ioctl.h>
20
Yishai Hadas534fd7a2019-01-13 16:01:17 +020021enum devx_obj_flags {
22 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
23};
24
Yishai Hadasa124edb2019-01-22 08:29:57 +020025struct devx_async_data {
26 struct mlx5_ib_dev *mdev;
27 struct list_head list;
28 struct ib_uobject *fd_uobj;
29 struct mlx5_async_work cb_work;
30 u16 cmd_out_len;
31 /* must be last field in this structure */
32 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
33};
34
Yishai Hadas7efce362018-06-17 13:00:01 +030035#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
36struct devx_obj {
37 struct mlx5_core_dev *mdev;
Yishai Hadas2351776e2018-10-07 12:06:34 +030038 u64 obj_id;
Yishai Hadas7efce362018-06-17 13:00:01 +030039 u32 dinlen; /* destroy inbox length */
40 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
Yishai Hadas534fd7a2019-01-13 16:01:17 +020041 u32 flags;
42 struct mlx5_ib_devx_mr devx_mr;
Yishai Hadas7efce362018-06-17 13:00:01 +030043};
44
Yishai Hadasaeae9452018-06-17 13:00:04 +030045struct devx_umem {
46 struct mlx5_core_dev *mdev;
47 struct ib_umem *umem;
48 u32 page_offset;
49 int page_shift;
50 int ncont;
51 u32 dinlen;
52 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
53};
54
55struct devx_umem_reg_cmd {
56 void *in;
57 u32 inlen;
58 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
59};
60
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +020061static struct mlx5_ib_ucontext *
62devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
Yishai Hadas8aa8c952018-06-17 13:00:00 +030063{
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +020064 return to_mucontext(ib_uverbs_get_ucontext(attrs));
Yishai Hadas8aa8c952018-06-17 13:00:00 +030065}
66
Yishai Hadasfb981532018-11-26 08:28:36 +020067int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030068{
69 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
70 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
Yishai Hadas6e3722b2018-12-19 16:28:15 +020071 void *uctx;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030072 int err;
Yishai Hadas76dc5a82018-09-20 21:45:19 +030073 u16 uid;
Yishai Hadasfb981532018-11-26 08:28:36 +020074 u32 cap = 0;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030075
Yishai Hadas6e3722b2018-12-19 16:28:15 +020076 /* 0 means not supported */
77 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030078 return -EINVAL;
79
Yishai Hadas6e3722b2018-12-19 16:28:15 +020080 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
Yishai Hadasfb981532018-11-26 08:28:36 +020081 if (is_user && capable(CAP_NET_RAW) &&
82 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
83 cap |= MLX5_UCTX_CAP_RAW_TX;
84
Yishai Hadas6e3722b2018-12-19 16:28:15 +020085 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
Yishai Hadasfb981532018-11-26 08:28:36 +020086 MLX5_SET(uctx, uctx, cap, cap);
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030087
88 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
89 if (err)
90 return err;
91
Yishai Hadas76dc5a82018-09-20 21:45:19 +030092 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
93 return uid;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030094}
95
Yishai Hadas76dc5a82018-09-20 21:45:19 +030096void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030097{
Yishai Hadas6e3722b2018-12-19 16:28:15 +020098 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
Yishai Hadasa8b92ca2018-06-17 12:59:57 +030099 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
100
Yishai Hadas6e3722b2018-12-19 16:28:15 +0200101 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
102 MLX5_SET(destroy_uctx_in, in, uid, uid);
Yishai Hadasa8b92ca2018-06-17 12:59:57 +0300103
104 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
105}
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300106
Yishai Hadas32269442018-07-23 15:25:09 +0300107bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
108{
109 struct devx_obj *devx_obj = obj;
110 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
111
112 switch (opcode) {
113 case MLX5_CMD_OP_DESTROY_TIR:
114 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
115 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
116 obj_id);
117 return true;
118
119 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
120 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
121 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
122 table_id);
123 return true;
124 default:
125 return false;
126 }
127}
128
Mark Blochbfc5d832018-11-20 20:31:08 +0200129bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
130{
131 struct devx_obj *devx_obj = obj;
132 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
133
134 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
135 *counter_id = MLX5_GET(dealloc_flow_counter_in,
136 devx_obj->dinbox,
137 flow_counter_id);
138 return true;
139 }
140
141 return false;
142}
143
Yishai Hadas2351776e2018-10-07 12:06:34 +0300144/*
145 * As the obj_id in the firmware is not globally unique the object type
146 * must be considered upon checking for a valid object id.
147 * For that the opcode of the creator command is encoded as part of the obj_id.
148 */
149static u64 get_enc_obj_id(u16 opcode, u32 obj_id)
150{
151 return ((u64)opcode << 32) | obj_id;
152}
153
Yishai Hadas34613eb2018-11-26 08:28:35 +0200154static u64 devx_get_obj_id(const void *in)
Yishai Hadase662e142018-06-17 13:00:02 +0300155{
156 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
Yishai Hadas2351776e2018-10-07 12:06:34 +0300157 u64 obj_id;
Yishai Hadase662e142018-06-17 13:00:02 +0300158
159 switch (opcode) {
160 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
161 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300162 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT,
163 MLX5_GET(general_obj_in_cmd_hdr, in,
164 obj_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300165 break;
166 case MLX5_CMD_OP_QUERY_MKEY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300167 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
168 MLX5_GET(query_mkey_in, in,
169 mkey_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300170 break;
171 case MLX5_CMD_OP_QUERY_CQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300172 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
173 MLX5_GET(query_cq_in, in, cqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300174 break;
175 case MLX5_CMD_OP_MODIFY_CQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300176 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
177 MLX5_GET(modify_cq_in, in, cqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300178 break;
179 case MLX5_CMD_OP_QUERY_SQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300180 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
181 MLX5_GET(query_sq_in, in, sqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300182 break;
183 case MLX5_CMD_OP_MODIFY_SQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300184 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
185 MLX5_GET(modify_sq_in, in, sqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300186 break;
187 case MLX5_CMD_OP_QUERY_RQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300188 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
189 MLX5_GET(query_rq_in, in, rqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300190 break;
191 case MLX5_CMD_OP_MODIFY_RQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300192 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
193 MLX5_GET(modify_rq_in, in, rqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300194 break;
195 case MLX5_CMD_OP_QUERY_RMP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300196 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
197 MLX5_GET(query_rmp_in, in, rmpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300198 break;
199 case MLX5_CMD_OP_MODIFY_RMP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300200 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
201 MLX5_GET(modify_rmp_in, in, rmpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300202 break;
203 case MLX5_CMD_OP_QUERY_RQT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300204 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
205 MLX5_GET(query_rqt_in, in, rqtn));
Yishai Hadase662e142018-06-17 13:00:02 +0300206 break;
207 case MLX5_CMD_OP_MODIFY_RQT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300208 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
209 MLX5_GET(modify_rqt_in, in, rqtn));
Yishai Hadase662e142018-06-17 13:00:02 +0300210 break;
211 case MLX5_CMD_OP_QUERY_TIR:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300212 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
213 MLX5_GET(query_tir_in, in, tirn));
Yishai Hadase662e142018-06-17 13:00:02 +0300214 break;
215 case MLX5_CMD_OP_MODIFY_TIR:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300216 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
217 MLX5_GET(modify_tir_in, in, tirn));
Yishai Hadase662e142018-06-17 13:00:02 +0300218 break;
219 case MLX5_CMD_OP_QUERY_TIS:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300220 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
221 MLX5_GET(query_tis_in, in, tisn));
Yishai Hadase662e142018-06-17 13:00:02 +0300222 break;
223 case MLX5_CMD_OP_MODIFY_TIS:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300224 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
225 MLX5_GET(modify_tis_in, in, tisn));
Yishai Hadase662e142018-06-17 13:00:02 +0300226 break;
227 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300228 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
229 MLX5_GET(query_flow_table_in, in,
230 table_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300231 break;
232 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300233 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
234 MLX5_GET(modify_flow_table_in, in,
235 table_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300236 break;
237 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300238 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
239 MLX5_GET(query_flow_group_in, in,
240 group_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300241 break;
242 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300243 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
244 MLX5_GET(query_fte_in, in,
245 flow_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300246 break;
247 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300248 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
249 MLX5_GET(set_fte_in, in, flow_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300250 break;
251 case MLX5_CMD_OP_QUERY_Q_COUNTER:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300252 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
253 MLX5_GET(query_q_counter_in, in,
254 counter_set_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300255 break;
256 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300257 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
258 MLX5_GET(query_flow_counter_in, in,
259 flow_counter_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300260 break;
261 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300262 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
263 MLX5_GET(general_obj_in_cmd_hdr, in,
264 obj_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300265 break;
266 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300267 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
268 MLX5_GET(query_scheduling_element_in,
269 in, scheduling_element_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300270 break;
271 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300272 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
273 MLX5_GET(modify_scheduling_element_in,
274 in, scheduling_element_id));
Yishai Hadase662e142018-06-17 13:00:02 +0300275 break;
276 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300277 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
278 MLX5_GET(add_vxlan_udp_dport_in, in,
279 vxlan_udp_port));
Yishai Hadase662e142018-06-17 13:00:02 +0300280 break;
281 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300282 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
283 MLX5_GET(query_l2_table_entry_in, in,
284 table_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300285 break;
286 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300287 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
288 MLX5_GET(set_l2_table_entry_in, in,
289 table_index));
Yishai Hadase662e142018-06-17 13:00:02 +0300290 break;
291 case MLX5_CMD_OP_QUERY_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300292 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
293 MLX5_GET(query_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300294 break;
295 case MLX5_CMD_OP_RST2INIT_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300296 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
297 MLX5_GET(rst2init_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300298 break;
299 case MLX5_CMD_OP_INIT2RTR_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300300 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
301 MLX5_GET(init2rtr_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300302 break;
303 case MLX5_CMD_OP_RTR2RTS_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300304 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
305 MLX5_GET(rtr2rts_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300306 break;
307 case MLX5_CMD_OP_RTS2RTS_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300308 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
309 MLX5_GET(rts2rts_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300310 break;
311 case MLX5_CMD_OP_SQERR2RTS_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300312 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
313 MLX5_GET(sqerr2rts_qp_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300314 break;
315 case MLX5_CMD_OP_2ERR_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300316 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
317 MLX5_GET(qp_2err_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300318 break;
319 case MLX5_CMD_OP_2RST_QP:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300320 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
321 MLX5_GET(qp_2rst_in, in, qpn));
Yishai Hadase662e142018-06-17 13:00:02 +0300322 break;
323 case MLX5_CMD_OP_QUERY_DCT:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300324 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
325 MLX5_GET(query_dct_in, in, dctn));
Yishai Hadase662e142018-06-17 13:00:02 +0300326 break;
327 case MLX5_CMD_OP_QUERY_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200328 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
329 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300330 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
331 MLX5_GET(query_xrq_in, in, xrqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300332 break;
333 case MLX5_CMD_OP_QUERY_XRC_SRQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300334 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
335 MLX5_GET(query_xrc_srq_in, in,
336 xrc_srqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300337 break;
338 case MLX5_CMD_OP_ARM_XRC_SRQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300339 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
340 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300341 break;
342 case MLX5_CMD_OP_QUERY_SRQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300343 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
344 MLX5_GET(query_srq_in, in, srqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300345 break;
346 case MLX5_CMD_OP_ARM_RQ:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300347 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
348 MLX5_GET(arm_rq_in, in, srq_number));
Yishai Hadase662e142018-06-17 13:00:02 +0300349 break;
350 case MLX5_CMD_OP_DRAIN_DCT:
351 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300352 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
353 MLX5_GET(drain_dct_in, in, dctn));
Yishai Hadase662e142018-06-17 13:00:02 +0300354 break;
355 case MLX5_CMD_OP_ARM_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200356 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
Yishai Hadas2351776e2018-10-07 12:06:34 +0300357 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
358 MLX5_GET(arm_xrq_in, in, xrqn));
Yishai Hadase662e142018-06-17 13:00:02 +0300359 break;
Yishai Hadas719598c2018-11-26 08:28:37 +0200360 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
361 obj_id = get_enc_obj_id
362 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
363 MLX5_GET(query_packet_reformat_context_in,
364 in, packet_reformat_id));
365 break;
Yishai Hadase662e142018-06-17 13:00:02 +0300366 default:
Yishai Hadas34613eb2018-11-26 08:28:35 +0200367 obj_id = 0;
Yishai Hadase662e142018-06-17 13:00:02 +0300368 }
369
Yishai Hadas34613eb2018-11-26 08:28:35 +0200370 return obj_id;
371}
Yishai Hadase662e142018-06-17 13:00:02 +0300372
Yishai Hadas34613eb2018-11-26 08:28:35 +0200373static bool devx_is_valid_obj_id(struct ib_uobject *uobj, const void *in)
374{
375 u64 obj_id = devx_get_obj_id(in);
376
377 if (!obj_id)
378 return false;
379
380 switch (uobj_get_object_id(uobj)) {
381 case UVERBS_OBJECT_CQ:
382 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
383 to_mcq(uobj->object)->mcq.cqn) ==
384 obj_id;
385
386 case UVERBS_OBJECT_SRQ:
387 {
388 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
389 struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
390 u16 opcode;
391
392 switch (srq->common.res) {
393 case MLX5_RES_XSRQ:
394 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
395 break;
396 case MLX5_RES_XRQ:
397 opcode = MLX5_CMD_OP_CREATE_XRQ;
398 break;
399 default:
400 if (!dev->mdev->issi)
401 opcode = MLX5_CMD_OP_CREATE_SRQ;
402 else
403 opcode = MLX5_CMD_OP_CREATE_RMP;
404 }
405
406 return get_enc_obj_id(opcode,
407 to_msrq(uobj->object)->msrq.srqn) ==
408 obj_id;
409 }
410
411 case UVERBS_OBJECT_QP:
412 {
413 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
414 enum ib_qp_type qp_type = qp->ibqp.qp_type;
415
416 if (qp_type == IB_QPT_RAW_PACKET ||
417 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
418 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
419 &qp->raw_packet_qp;
420 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
421 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
422
423 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
424 rq->base.mqp.qpn) == obj_id ||
425 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
426 sq->base.mqp.qpn) == obj_id ||
427 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
428 rq->tirn) == obj_id ||
429 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
430 sq->tisn) == obj_id);
431 }
432
433 if (qp_type == MLX5_IB_QPT_DCT)
434 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
435 qp->dct.mdct.mqp.qpn) == obj_id;
436
437 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
438 qp->ibqp.qp_num) == obj_id;
439 }
440
441 case UVERBS_OBJECT_WQ:
442 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
443 to_mrwq(uobj->object)->core_qp.qpn) ==
444 obj_id;
445
446 case UVERBS_OBJECT_RWQ_IND_TBL:
447 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
448 to_mrwq_ind_table(uobj->object)->rqtn) ==
449 obj_id;
450
451 case MLX5_IB_OBJECT_DEVX_OBJ:
452 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
453
Yishai Hadase662e142018-06-17 13:00:02 +0300454 default:
455 return false;
456 }
Yishai Hadase662e142018-06-17 13:00:02 +0300457}
458
Yishai Hadasba1a0572018-09-20 21:39:33 +0300459static void devx_set_umem_valid(const void *in)
460{
461 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
462
463 switch (opcode) {
464 case MLX5_CMD_OP_CREATE_MKEY:
465 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
466 break;
467 case MLX5_CMD_OP_CREATE_CQ:
468 {
469 void *cqc;
470
471 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
472 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
473 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
474 break;
475 }
476 case MLX5_CMD_OP_CREATE_QP:
477 {
478 void *qpc;
479
480 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
481 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
482 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
483 break;
484 }
485
486 case MLX5_CMD_OP_CREATE_RQ:
487 {
488 void *rqc, *wq;
489
490 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
491 wq = MLX5_ADDR_OF(rqc, rqc, wq);
492 MLX5_SET(wq, wq, dbr_umem_valid, 1);
493 MLX5_SET(wq, wq, wq_umem_valid, 1);
494 break;
495 }
496
497 case MLX5_CMD_OP_CREATE_SQ:
498 {
499 void *sqc, *wq;
500
501 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
502 wq = MLX5_ADDR_OF(sqc, sqc, wq);
503 MLX5_SET(wq, wq, dbr_umem_valid, 1);
504 MLX5_SET(wq, wq, wq_umem_valid, 1);
505 break;
506 }
507
508 case MLX5_CMD_OP_MODIFY_CQ:
509 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
510 break;
511
512 case MLX5_CMD_OP_CREATE_RMP:
513 {
514 void *rmpc, *wq;
515
516 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
517 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
518 MLX5_SET(wq, wq, dbr_umem_valid, 1);
519 MLX5_SET(wq, wq, wq_umem_valid, 1);
520 break;
521 }
522
523 case MLX5_CMD_OP_CREATE_XRQ:
524 {
525 void *xrqc, *wq;
526
527 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
528 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
529 MLX5_SET(wq, wq, dbr_umem_valid, 1);
530 MLX5_SET(wq, wq, wq_umem_valid, 1);
531 break;
532 }
533
534 case MLX5_CMD_OP_CREATE_XRC_SRQ:
535 {
536 void *xrc_srqc;
537
538 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
539 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
540 xrc_srq_context_entry);
541 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
542 break;
543 }
544
545 default:
546 return;
547 }
548}
549
Yishai Hadas2351776e2018-10-07 12:06:34 +0300550static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
Yishai Hadas7efce362018-06-17 13:00:01 +0300551{
Yishai Hadas2351776e2018-10-07 12:06:34 +0300552 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
Yishai Hadas7efce362018-06-17 13:00:01 +0300553
Yishai Hadas2351776e2018-10-07 12:06:34 +0300554 switch (*opcode) {
Yishai Hadas7efce362018-06-17 13:00:01 +0300555 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
556 case MLX5_CMD_OP_CREATE_MKEY:
557 case MLX5_CMD_OP_CREATE_CQ:
558 case MLX5_CMD_OP_ALLOC_PD:
559 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
560 case MLX5_CMD_OP_CREATE_RMP:
561 case MLX5_CMD_OP_CREATE_SQ:
562 case MLX5_CMD_OP_CREATE_RQ:
563 case MLX5_CMD_OP_CREATE_RQT:
564 case MLX5_CMD_OP_CREATE_TIR:
565 case MLX5_CMD_OP_CREATE_TIS:
566 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
567 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
568 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
569 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
Mark Bloch60786f02018-08-28 14:18:46 +0300570 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
Yishai Hadas7efce362018-06-17 13:00:01 +0300571 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
572 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
573 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
574 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
575 case MLX5_CMD_OP_CREATE_QP:
576 case MLX5_CMD_OP_CREATE_SRQ:
577 case MLX5_CMD_OP_CREATE_XRC_SRQ:
578 case MLX5_CMD_OP_CREATE_DCT:
579 case MLX5_CMD_OP_CREATE_XRQ:
580 case MLX5_CMD_OP_ATTACH_TO_MCG:
581 case MLX5_CMD_OP_ALLOC_XRCD:
582 return true;
583 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
584 {
585 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
586 if (op_mod == 0)
587 return true;
588 return false;
589 }
590 default:
591 return false;
592 }
593}
594
Yishai Hadase662e142018-06-17 13:00:02 +0300595static bool devx_is_obj_modify_cmd(const void *in)
596{
597 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
598
599 switch (opcode) {
600 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
601 case MLX5_CMD_OP_MODIFY_CQ:
602 case MLX5_CMD_OP_MODIFY_RMP:
603 case MLX5_CMD_OP_MODIFY_SQ:
604 case MLX5_CMD_OP_MODIFY_RQ:
605 case MLX5_CMD_OP_MODIFY_RQT:
606 case MLX5_CMD_OP_MODIFY_TIR:
607 case MLX5_CMD_OP_MODIFY_TIS:
608 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
609 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
610 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
611 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
612 case MLX5_CMD_OP_RST2INIT_QP:
613 case MLX5_CMD_OP_INIT2RTR_QP:
614 case MLX5_CMD_OP_RTR2RTS_QP:
615 case MLX5_CMD_OP_RTS2RTS_QP:
616 case MLX5_CMD_OP_SQERR2RTS_QP:
617 case MLX5_CMD_OP_2ERR_QP:
618 case MLX5_CMD_OP_2RST_QP:
619 case MLX5_CMD_OP_ARM_XRC_SRQ:
620 case MLX5_CMD_OP_ARM_RQ:
621 case MLX5_CMD_OP_DRAIN_DCT:
622 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
623 case MLX5_CMD_OP_ARM_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200624 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
Yishai Hadase662e142018-06-17 13:00:02 +0300625 return true;
626 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
627 {
628 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
629
630 if (op_mod == 1)
631 return true;
632 return false;
633 }
634 default:
635 return false;
636 }
637}
638
639static bool devx_is_obj_query_cmd(const void *in)
640{
641 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
642
643 switch (opcode) {
644 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
645 case MLX5_CMD_OP_QUERY_MKEY:
646 case MLX5_CMD_OP_QUERY_CQ:
647 case MLX5_CMD_OP_QUERY_RMP:
648 case MLX5_CMD_OP_QUERY_SQ:
649 case MLX5_CMD_OP_QUERY_RQ:
650 case MLX5_CMD_OP_QUERY_RQT:
651 case MLX5_CMD_OP_QUERY_TIR:
652 case MLX5_CMD_OP_QUERY_TIS:
653 case MLX5_CMD_OP_QUERY_Q_COUNTER:
654 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
655 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
656 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
657 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
658 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
659 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
660 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
661 case MLX5_CMD_OP_QUERY_QP:
662 case MLX5_CMD_OP_QUERY_SRQ:
663 case MLX5_CMD_OP_QUERY_XRC_SRQ:
664 case MLX5_CMD_OP_QUERY_DCT:
665 case MLX5_CMD_OP_QUERY_XRQ:
Yishai Hadas719598c2018-11-26 08:28:37 +0200666 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
667 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
668 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
Yishai Hadase662e142018-06-17 13:00:02 +0300669 return true;
670 default:
671 return false;
672 }
673}
674
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300675static bool devx_is_whitelist_cmd(void *in)
676{
677 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
678
679 switch (opcode) {
680 case MLX5_CMD_OP_QUERY_HCA_CAP:
681 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
682 return true;
683 default:
684 return false;
685 }
686}
687
688static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
689{
690 if (devx_is_whitelist_cmd(cmd_in)) {
691 struct mlx5_ib_dev *dev;
692
693 if (c->devx_uid)
694 return c->devx_uid;
695
696 dev = to_mdev(c->ibucontext.device);
697 if (dev->devx_whitelist_uid)
698 return dev->devx_whitelist_uid;
699
700 return -EOPNOTSUPP;
701 }
702
703 if (!c->devx_uid)
704 return -EINVAL;
705
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300706 return c->devx_uid;
707}
Yishai Hadase662e142018-06-17 13:00:02 +0300708static bool devx_is_general_cmd(void *in)
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300709{
710 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
711
Yishai Hadas719598c2018-11-26 08:28:37 +0200712 if (opcode >= MLX5_CMD_OP_GENERAL_START &&
713 opcode < MLX5_CMD_OP_GENERAL_END)
714 return true;
715
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300716 switch (opcode) {
717 case MLX5_CMD_OP_QUERY_HCA_CAP:
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300718 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300719 case MLX5_CMD_OP_QUERY_VPORT_STATE:
720 case MLX5_CMD_OP_QUERY_ADAPTER:
721 case MLX5_CMD_OP_QUERY_ISSI:
722 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
723 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
724 case MLX5_CMD_OP_QUERY_VNIC_ENV:
725 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
726 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
727 case MLX5_CMD_OP_NOP:
728 case MLX5_CMD_OP_QUERY_CONG_STATUS:
729 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
730 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
731 return true;
732 default:
733 return false;
734 }
735}
736
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600737static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200738 struct uverbs_attr_bundle *attrs)
Yishai Hadasf6fe01b2018-06-17 13:00:05 +0300739{
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600740 struct mlx5_ib_ucontext *c;
741 struct mlx5_ib_dev *dev;
Yishai Hadasf6fe01b2018-06-17 13:00:05 +0300742 int user_vector;
743 int dev_eqn;
744 unsigned int irqn;
745 int err;
746
747 if (uverbs_copy_from(&user_vector, attrs,
748 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
749 return -EFAULT;
750
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200751 c = devx_ufile2uctx(attrs);
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600752 if (IS_ERR(c))
753 return PTR_ERR(c);
754 dev = to_mdev(c->ibucontext.device);
755
Yishai Hadasf6fe01b2018-06-17 13:00:05 +0300756 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
757 if (err < 0)
758 return err;
759
760 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
761 &dev_eqn, sizeof(dev_eqn)))
762 return -EFAULT;
763
764 return 0;
765}
766
Yishai Hadas7c043e92018-06-17 13:00:03 +0300767/*
768 *Security note:
769 * The hardware protection mechanism works like this: Each device object that
770 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
771 * the device specification manual) upon its creation. Then upon doorbell,
772 * hardware fetches the object context for which the doorbell was rang, and
773 * validates that the UAR through which the DB was rang matches the UAR ID
774 * of the object.
775 * If no match the doorbell is silently ignored by the hardware. Of course,
776 * the user cannot ring a doorbell on a UAR that was not mapped to it.
777 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
778 * mailboxes (except tagging them with UID), we expose to the user its UAR
779 * ID, so it can embed it in these objects in the expected specification
780 * format. So the only thing the user can do is hurt itself by creating a
781 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
782 * may ring a doorbell on its objects.
783 * The consequence of that will be that another user can schedule a QP/SQ
784 * of the buggy user for execution (just insert it to the hardware schedule
785 * queue or arm its CQ for event generation), no further harm is expected.
786 */
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600787static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200788 struct uverbs_attr_bundle *attrs)
Yishai Hadas7c043e92018-06-17 13:00:03 +0300789{
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600790 struct mlx5_ib_ucontext *c;
791 struct mlx5_ib_dev *dev;
Yishai Hadas7c043e92018-06-17 13:00:03 +0300792 u32 user_idx;
793 s32 dev_idx;
794
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200795 c = devx_ufile2uctx(attrs);
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600796 if (IS_ERR(c))
797 return PTR_ERR(c);
798 dev = to_mdev(c->ibucontext.device);
799
Yishai Hadas7c043e92018-06-17 13:00:03 +0300800 if (uverbs_copy_from(&user_idx, attrs,
801 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
802 return -EFAULT;
803
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600804 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
Yishai Hadas7c043e92018-06-17 13:00:03 +0300805 if (dev_idx < 0)
806 return dev_idx;
807
808 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
809 &dev_idx, sizeof(dev_idx)))
810 return -EFAULT;
811
812 return 0;
813}
814
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -0600815static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200816 struct uverbs_attr_bundle *attrs)
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300817{
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600818 struct mlx5_ib_ucontext *c;
819 struct mlx5_ib_dev *dev;
Yishai Hadas7efce362018-06-17 13:00:01 +0300820 void *cmd_in = uverbs_attr_get_alloced_ptr(
821 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300822 int cmd_out_len = uverbs_attr_get_len(attrs,
823 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
824 void *cmd_out;
825 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300826 int uid;
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300827
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +0200828 c = devx_ufile2uctx(attrs);
Jason Gunthorpe22fa27f2018-07-10 13:43:06 -0600829 if (IS_ERR(c))
830 return PTR_ERR(c);
831 dev = to_mdev(c->ibucontext.device);
832
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300833 uid = devx_get_uid(c, cmd_in);
834 if (uid < 0)
835 return uid;
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300836
837 /* Only white list of some general HCA commands are allowed for this method. */
838 if (!devx_is_general_cmd(cmd_in))
839 return -EINVAL;
840
Jason Gunthorpeb61815e2018-08-09 20:14:41 -0600841 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
842 if (IS_ERR(cmd_out))
843 return PTR_ERR(cmd_out);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300844
Yishai Hadas7e1335a2018-09-20 21:45:20 +0300845 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300846 err = mlx5_cmd_exec(dev->mdev, cmd_in,
847 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
848 cmd_out, cmd_out_len);
849 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -0600850 return err;
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300851
Jason Gunthorpeb61815e2018-08-09 20:14:41 -0600852 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
853 cmd_out_len);
Yishai Hadas8aa8c952018-06-17 13:00:00 +0300854}
855
Yishai Hadas7efce362018-06-17 13:00:01 +0300856static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
857 u32 *dinlen,
858 u32 *obj_id)
859{
860 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
861 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
862
863 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
864 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
865
866 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
867 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
868
869 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
870 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
871 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
872 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
873 break;
874
Yishai Hadas6e3722b2018-12-19 16:28:15 +0200875 case MLX5_CMD_OP_CREATE_UMEM:
876 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
877 MLX5_CMD_OP_DESTROY_UMEM);
878 break;
Yishai Hadas7efce362018-06-17 13:00:01 +0300879 case MLX5_CMD_OP_CREATE_MKEY:
880 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
881 break;
882 case MLX5_CMD_OP_CREATE_CQ:
883 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
884 break;
885 case MLX5_CMD_OP_ALLOC_PD:
886 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
887 break;
888 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
889 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
890 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
891 break;
892 case MLX5_CMD_OP_CREATE_RMP:
893 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
894 break;
895 case MLX5_CMD_OP_CREATE_SQ:
896 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
897 break;
898 case MLX5_CMD_OP_CREATE_RQ:
899 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
900 break;
901 case MLX5_CMD_OP_CREATE_RQT:
902 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
903 break;
904 case MLX5_CMD_OP_CREATE_TIR:
905 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
906 break;
907 case MLX5_CMD_OP_CREATE_TIS:
908 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
909 break;
910 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
911 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
912 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
913 break;
914 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
915 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
916 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
917 MLX5_SET(destroy_flow_table_in, din, other_vport,
918 MLX5_GET(create_flow_table_in, in, other_vport));
919 MLX5_SET(destroy_flow_table_in, din, vport_number,
920 MLX5_GET(create_flow_table_in, in, vport_number));
921 MLX5_SET(destroy_flow_table_in, din, table_type,
922 MLX5_GET(create_flow_table_in, in, table_type));
923 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
924 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
925 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
926 break;
927 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
928 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
929 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
930 MLX5_SET(destroy_flow_group_in, din, other_vport,
931 MLX5_GET(create_flow_group_in, in, other_vport));
932 MLX5_SET(destroy_flow_group_in, din, vport_number,
933 MLX5_GET(create_flow_group_in, in, vport_number));
934 MLX5_SET(destroy_flow_group_in, din, table_type,
935 MLX5_GET(create_flow_group_in, in, table_type));
936 MLX5_SET(destroy_flow_group_in, din, table_id,
937 MLX5_GET(create_flow_group_in, in, table_id));
938 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
939 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
940 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
941 break;
942 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
943 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
944 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
945 MLX5_SET(delete_fte_in, din, other_vport,
946 MLX5_GET(set_fte_in, in, other_vport));
947 MLX5_SET(delete_fte_in, din, vport_number,
948 MLX5_GET(set_fte_in, in, vport_number));
949 MLX5_SET(delete_fte_in, din, table_type,
950 MLX5_GET(set_fte_in, in, table_type));
951 MLX5_SET(delete_fte_in, din, table_id,
952 MLX5_GET(set_fte_in, in, table_id));
953 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
954 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
955 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
956 break;
957 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
958 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
959 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
960 break;
Mark Bloch60786f02018-08-28 14:18:46 +0300961 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
Yishai Hadas7efce362018-06-17 13:00:01 +0300962 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
Mark Bloch60786f02018-08-28 14:18:46 +0300963 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
Yishai Hadas7efce362018-06-17 13:00:01 +0300964 break;
965 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
966 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
967 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
968 break;
969 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
970 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
971 *obj_id = MLX5_GET(create_scheduling_element_out, out,
972 scheduling_element_id);
973 MLX5_SET(destroy_scheduling_element_in, din,
974 scheduling_hierarchy,
975 MLX5_GET(create_scheduling_element_in, in,
976 scheduling_hierarchy));
977 MLX5_SET(destroy_scheduling_element_in, din,
978 scheduling_element_id, *obj_id);
979 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
980 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
981 break;
982 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
983 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
984 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
985 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
986 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
987 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
988 break;
989 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
990 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
991 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
992 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
993 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
994 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
995 break;
996 case MLX5_CMD_OP_CREATE_QP:
997 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
998 break;
999 case MLX5_CMD_OP_CREATE_SRQ:
1000 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1001 break;
1002 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1003 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1004 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1005 break;
1006 case MLX5_CMD_OP_CREATE_DCT:
1007 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1008 break;
1009 case MLX5_CMD_OP_CREATE_XRQ:
1010 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1011 break;
1012 case MLX5_CMD_OP_ATTACH_TO_MCG:
1013 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1014 MLX5_SET(detach_from_mcg_in, din, qpn,
1015 MLX5_GET(attach_to_mcg_in, in, qpn));
1016 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1017 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1018 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1019 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1020 break;
1021 case MLX5_CMD_OP_ALLOC_XRCD:
1022 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1023 break;
1024 default:
1025 /* The entry must match to one of the devx_is_obj_create_cmd */
1026 WARN_ON(true);
1027 break;
1028 }
1029}
1030
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001031static int devx_handle_mkey_indirect(struct devx_obj *obj,
1032 struct mlx5_ib_dev *dev,
1033 void *in, void *out)
1034{
1035 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
1036 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1037 unsigned long flags;
1038 struct mlx5_core_mkey *mkey;
1039 void *mkc;
1040 u8 key;
1041 int err;
1042
1043 mkey = &devx_mr->mmkey;
1044 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1045 key = MLX5_GET(mkc, mkc, mkey_7_0);
1046 mkey->key = mlx5_idx_to_mkey(
1047 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1048 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1049 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1050 mkey->size = MLX5_GET64(mkc, mkc, len);
1051 mkey->pd = MLX5_GET(mkc, mkc, pd);
1052 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1053
1054 write_lock_irqsave(&table->lock, flags);
1055 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key),
1056 mkey);
1057 write_unlock_irqrestore(&table->lock, flags);
1058 return err;
1059}
1060
Yishai Hadasfa31f142019-01-13 16:01:16 +02001061static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1062 struct devx_obj *obj,
1063 void *in, int in_len)
1064{
1065 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1066 MLX5_FLD_SZ_BYTES(create_mkey_in,
1067 memory_key_mkey_entry);
1068 void *mkc;
1069 u8 access_mode;
1070
1071 if (in_len < min_len)
1072 return -EINVAL;
1073
1074 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1075
1076 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1077 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1078
1079 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001080 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1081 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1082 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
Yishai Hadasfa31f142019-01-13 16:01:16 +02001083 return 0;
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001084 }
Yishai Hadasfa31f142019-01-13 16:01:16 +02001085
1086 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1087 return 0;
1088}
1089
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001090static void devx_free_indirect_mkey(struct rcu_head *rcu)
1091{
1092 kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
1093}
1094
1095/* This function to delete from the radix tree needs to be called before
1096 * destroying the underlying mkey. Otherwise a race might occur in case that
1097 * other thread will get the same mkey before this one will be deleted,
1098 * in that case it will fail via inserting to the tree its own data.
1099 *
1100 * Note:
1101 * An error in the destroy is not expected unless there is some other indirect
1102 * mkey which points to this one. In a kernel cleanup flow it will be just
1103 * destroyed in the iterative destruction call. In a user flow, in case
1104 * the application didn't close in the expected order it's its own problem,
1105 * the mkey won't be part of the tree, in both cases the kernel is safe.
1106 */
1107static void devx_cleanup_mkey(struct devx_obj *obj)
1108{
1109 struct mlx5_mkey_table *table = &obj->mdev->priv.mkey_table;
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001110 unsigned long flags;
1111
1112 write_lock_irqsave(&table->lock, flags);
Kamal Heibe5c1bb42019-01-30 16:13:43 +02001113 radix_tree_delete(&table->tree, mlx5_base_mkey(obj->devx_mr.mmkey.key));
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001114 write_unlock_irqrestore(&table->lock, flags);
1115}
1116
Yishai Hadas7efce362018-06-17 13:00:01 +03001117static int devx_obj_cleanup(struct ib_uobject *uobject,
1118 enum rdma_remove_reason why)
1119{
1120 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1121 struct devx_obj *obj = uobject->object;
1122 int ret;
1123
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001124 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1125 devx_cleanup_mkey(obj);
1126
Yishai Hadas7efce362018-06-17 13:00:01 +03001127 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
Yishai Hadas1c774832018-06-20 17:11:39 +03001128 if (ib_is_destroy_retryable(ret, why, uobject))
Yishai Hadas7efce362018-06-17 13:00:01 +03001129 return ret;
1130
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001131 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1132 struct mlx5_ib_dev *dev = to_mdev(uobject->context->device);
1133
1134 call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
1135 devx_free_indirect_mkey);
1136 return ret;
1137 }
1138
Yishai Hadas7efce362018-06-17 13:00:01 +03001139 kfree(obj);
1140 return ret;
1141}
1142
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001143static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001144 struct uverbs_attr_bundle *attrs)
Yishai Hadas7efce362018-06-17 13:00:01 +03001145{
Yishai Hadas7efce362018-06-17 13:00:01 +03001146 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1147 int cmd_out_len = uverbs_attr_get_len(attrs,
1148 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
Yishai Hadasfa31f142019-01-13 16:01:16 +02001149 int cmd_in_len = uverbs_attr_get_len(attrs,
1150 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
Yishai Hadas7efce362018-06-17 13:00:01 +03001151 void *cmd_out;
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001152 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1153 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1154 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
1155 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
Yishai Hadase8ef0902018-09-25 12:11:12 +03001156 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
Yishai Hadas7efce362018-06-17 13:00:01 +03001157 struct devx_obj *obj;
1158 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001159 int uid;
Yishai Hadas2351776e2018-10-07 12:06:34 +03001160 u32 obj_id;
1161 u16 opcode;
Yishai Hadas7efce362018-06-17 13:00:01 +03001162
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001163 uid = devx_get_uid(c, cmd_in);
1164 if (uid < 0)
1165 return uid;
Yishai Hadas7efce362018-06-17 13:00:01 +03001166
Yishai Hadas2351776e2018-10-07 12:06:34 +03001167 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
Yishai Hadas7efce362018-06-17 13:00:01 +03001168 return -EINVAL;
1169
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001170 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1171 if (IS_ERR(cmd_out))
1172 return PTR_ERR(cmd_out);
1173
Yishai Hadas7efce362018-06-17 13:00:01 +03001174 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1175 if (!obj)
1176 return -ENOMEM;
1177
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001178 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadasfa31f142019-01-13 16:01:16 +02001179 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1180 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1181 if (err)
1182 goto obj_free;
1183 } else {
1184 devx_set_umem_valid(cmd_in);
1185 }
Yishai Hadasba1a0572018-09-20 21:39:33 +03001186
Yishai Hadas7efce362018-06-17 13:00:01 +03001187 err = mlx5_cmd_exec(dev->mdev, cmd_in,
Yishai Hadasfa31f142019-01-13 16:01:16 +02001188 cmd_in_len,
Yishai Hadas7efce362018-06-17 13:00:01 +03001189 cmd_out, cmd_out_len);
1190 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001191 goto obj_free;
Yishai Hadas7efce362018-06-17 13:00:01 +03001192
Yishai Hadas7efce362018-06-17 13:00:01 +03001193 uobj->object = obj;
1194 obj->mdev = dev->mdev;
Yishai Hadas2351776e2018-10-07 12:06:34 +03001195 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1196 &obj_id);
Yishai Hadas7efce362018-06-17 13:00:01 +03001197 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1198
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001199 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1200 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1201 if (err)
1202 goto obj_destroy;
1203 }
1204
Yishai Hadas7efce362018-06-17 13:00:01 +03001205 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1206 if (err)
Yishai Hadas0da4d482019-02-11 17:40:53 +02001207 goto err_copy;
Yishai Hadas7efce362018-06-17 13:00:01 +03001208
Yishai Hadas2351776e2018-10-07 12:06:34 +03001209 obj->obj_id = get_enc_obj_id(opcode, obj_id);
Yishai Hadas7efce362018-06-17 13:00:01 +03001210 return 0;
1211
Yishai Hadas0da4d482019-02-11 17:40:53 +02001212err_copy:
Yishai Hadas534fd7a2019-01-13 16:01:17 +02001213 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1214 devx_cleanup_mkey(obj);
Yishai Hadas0da4d482019-02-11 17:40:53 +02001215obj_destroy:
Yishai Hadase8ef0902018-09-25 12:11:12 +03001216 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
Yishai Hadas7efce362018-06-17 13:00:01 +03001217obj_free:
1218 kfree(obj);
1219 return err;
1220}
1221
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001222static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001223 struct uverbs_attr_bundle *attrs)
Yishai Hadase662e142018-06-17 13:00:02 +03001224{
Yishai Hadase662e142018-06-17 13:00:02 +03001225 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1226 int cmd_out_len = uverbs_attr_get_len(attrs,
1227 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1228 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1229 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001230 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
Yishai Hadas34613eb2018-11-26 08:28:35 +02001231 struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
Yishai Hadase662e142018-06-17 13:00:02 +03001232 void *cmd_out;
1233 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001234 int uid;
Yishai Hadase662e142018-06-17 13:00:02 +03001235
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001236 uid = devx_get_uid(c, cmd_in);
1237 if (uid < 0)
1238 return uid;
Yishai Hadase662e142018-06-17 13:00:02 +03001239
1240 if (!devx_is_obj_modify_cmd(cmd_in))
1241 return -EINVAL;
1242
Yishai Hadas34613eb2018-11-26 08:28:35 +02001243 if (!devx_is_valid_obj_id(uobj, cmd_in))
Yishai Hadase662e142018-06-17 13:00:02 +03001244 return -EINVAL;
1245
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001246 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1247 if (IS_ERR(cmd_out))
1248 return PTR_ERR(cmd_out);
Yishai Hadase662e142018-06-17 13:00:02 +03001249
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001250 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadasba1a0572018-09-20 21:39:33 +03001251 devx_set_umem_valid(cmd_in);
1252
Yishai Hadas34613eb2018-11-26 08:28:35 +02001253 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
Yishai Hadase662e142018-06-17 13:00:02 +03001254 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1255 cmd_out, cmd_out_len);
1256 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001257 return err;
Yishai Hadase662e142018-06-17 13:00:02 +03001258
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001259 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1260 cmd_out, cmd_out_len);
Yishai Hadase662e142018-06-17 13:00:02 +03001261}
1262
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001263static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001264 struct uverbs_attr_bundle *attrs)
Yishai Hadase662e142018-06-17 13:00:02 +03001265{
Yishai Hadase662e142018-06-17 13:00:02 +03001266 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1267 int cmd_out_len = uverbs_attr_get_len(attrs,
1268 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1269 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1270 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001271 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
Yishai Hadase662e142018-06-17 13:00:02 +03001272 void *cmd_out;
1273 int err;
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001274 int uid;
Yishai Hadas34613eb2018-11-26 08:28:35 +02001275 struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
Yishai Hadase662e142018-06-17 13:00:02 +03001276
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001277 uid = devx_get_uid(c, cmd_in);
1278 if (uid < 0)
1279 return uid;
Yishai Hadase662e142018-06-17 13:00:02 +03001280
1281 if (!devx_is_obj_query_cmd(cmd_in))
1282 return -EINVAL;
1283
Yishai Hadas34613eb2018-11-26 08:28:35 +02001284 if (!devx_is_valid_obj_id(uobj, cmd_in))
Yishai Hadase662e142018-06-17 13:00:02 +03001285 return -EINVAL;
1286
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001287 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1288 if (IS_ERR(cmd_out))
1289 return PTR_ERR(cmd_out);
Yishai Hadase662e142018-06-17 13:00:02 +03001290
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001291 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
Yishai Hadas34613eb2018-11-26 08:28:35 +02001292 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
Yishai Hadase662e142018-06-17 13:00:02 +03001293 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1294 cmd_out, cmd_out_len);
1295 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001296 return err;
Yishai Hadase662e142018-06-17 13:00:02 +03001297
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001298 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1299 cmd_out, cmd_out_len);
Yishai Hadase662e142018-06-17 13:00:02 +03001300}
1301
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001302struct devx_async_event_queue {
1303 spinlock_t lock;
1304 wait_queue_head_t poll_wait;
1305 struct list_head event_list;
Yishai Hadasa124edb2019-01-22 08:29:57 +02001306 atomic_t bytes_in_use;
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001307 u8 is_destroyed:1;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001308};
1309
1310struct devx_async_cmd_event_file {
1311 struct ib_uobject uobj;
1312 struct devx_async_event_queue ev_queue;
Yishai Hadasa124edb2019-01-22 08:29:57 +02001313 struct mlx5_async_ctx async_ctx;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001314};
1315
1316static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1317{
1318 spin_lock_init(&ev_queue->lock);
1319 INIT_LIST_HEAD(&ev_queue->event_list);
1320 init_waitqueue_head(&ev_queue->poll_wait);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001321 atomic_set(&ev_queue->bytes_in_use, 0);
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001322 ev_queue->is_destroyed = 0;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001323}
1324
1325static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1326 struct uverbs_attr_bundle *attrs)
1327{
1328 struct devx_async_cmd_event_file *ev_file;
1329
1330 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1331 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001332 struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001333
1334 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1335 uobj);
1336 devx_init_event_queue(&ev_file->ev_queue);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001337 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001338 return 0;
1339}
1340
Yishai Hadasa124edb2019-01-22 08:29:57 +02001341static void devx_query_callback(int status, struct mlx5_async_work *context)
1342{
1343 struct devx_async_data *async_data =
1344 container_of(context, struct devx_async_data, cb_work);
1345 struct ib_uobject *fd_uobj = async_data->fd_uobj;
1346 struct devx_async_cmd_event_file *ev_file;
1347 struct devx_async_event_queue *ev_queue;
1348 unsigned long flags;
1349
1350 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1351 uobj);
1352 ev_queue = &ev_file->ev_queue;
1353
1354 spin_lock_irqsave(&ev_queue->lock, flags);
1355 list_add_tail(&async_data->list, &ev_queue->event_list);
1356 spin_unlock_irqrestore(&ev_queue->lock, flags);
1357
1358 wake_up_interruptible(&ev_queue->poll_wait);
1359 fput(fd_uobj->object);
1360}
1361
1362#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1363
1364static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1365 struct uverbs_attr_bundle *attrs)
1366{
1367 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1368 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1369 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1370 attrs,
1371 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1372 u16 cmd_out_len;
1373 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
1374 struct ib_uobject *fd_uobj;
1375 int err;
1376 int uid;
1377 struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
1378 struct devx_async_cmd_event_file *ev_file;
1379 struct devx_async_data *async_data;
1380
1381 uid = devx_get_uid(c, cmd_in);
1382 if (uid < 0)
1383 return uid;
1384
1385 if (!devx_is_obj_query_cmd(cmd_in))
1386 return -EINVAL;
1387
1388 err = uverbs_get_const(&cmd_out_len, attrs,
1389 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1390 if (err)
1391 return err;
1392
1393 if (!devx_is_valid_obj_id(uobj, cmd_in))
1394 return -EINVAL;
1395
1396 fd_uobj = uverbs_attr_get_uobject(attrs,
1397 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1398 if (IS_ERR(fd_uobj))
1399 return PTR_ERR(fd_uobj);
1400
1401 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1402 uobj);
1403
1404 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1405 MAX_ASYNC_BYTES_IN_USE) {
1406 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1407 return -EAGAIN;
1408 }
1409
1410 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1411 cmd_out_len), GFP_KERNEL);
1412 if (!async_data) {
1413 err = -ENOMEM;
1414 goto sub_bytes;
1415 }
1416
1417 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1418 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1419 if (err)
1420 goto free_async;
1421
1422 async_data->cmd_out_len = cmd_out_len;
1423 async_data->mdev = mdev;
1424 async_data->fd_uobj = fd_uobj;
1425
1426 get_file(fd_uobj->object);
1427 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1428 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1429 uverbs_attr_get_len(attrs,
1430 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1431 async_data->hdr.out_data,
1432 async_data->cmd_out_len,
1433 devx_query_callback, &async_data->cb_work);
1434
1435 if (err)
1436 goto cb_err;
1437
1438 return 0;
1439
1440cb_err:
1441 fput(fd_uobj->object);
1442free_async:
1443 kvfree(async_data);
1444sub_bytes:
1445 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1446 return err;
1447}
1448
Yishai Hadasaeae9452018-06-17 13:00:04 +03001449static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
1450 struct uverbs_attr_bundle *attrs,
1451 struct devx_umem *obj)
1452{
1453 u64 addr;
1454 size_t size;
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001455 u32 access;
Yishai Hadasaeae9452018-06-17 13:00:04 +03001456 int npages;
1457 int err;
1458 u32 page_mask;
1459
1460 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001461 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
Yishai Hadasaeae9452018-06-17 13:00:04 +03001462 return -EFAULT;
1463
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001464 err = uverbs_get_flags32(&access, attrs,
1465 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
Yishai Hadas47f07f02018-12-05 15:50:21 +02001466 IB_ACCESS_LOCAL_WRITE |
1467 IB_ACCESS_REMOTE_WRITE |
1468 IB_ACCESS_REMOTE_READ);
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001469 if (err)
1470 return err;
1471
Yishai Hadasaeae9452018-06-17 13:00:04 +03001472 err = ib_check_mr_access(access);
1473 if (err)
1474 return err;
1475
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001476 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001477 if (IS_ERR(obj->umem))
1478 return PTR_ERR(obj->umem);
1479
1480 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
1481 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
1482 &obj->page_shift, &obj->ncont, NULL);
1483
1484 if (!npages) {
1485 ib_umem_release(obj->umem);
1486 return -EINVAL;
1487 }
1488
1489 page_mask = (1 << obj->page_shift) - 1;
1490 obj->page_offset = obj->umem->address & page_mask;
1491
1492 return 0;
1493}
1494
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001495static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
1496 struct devx_umem *obj,
Yishai Hadasaeae9452018-06-17 13:00:04 +03001497 struct devx_umem_reg_cmd *cmd)
1498{
1499 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
1500 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001501 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
1502 return PTR_ERR_OR_ZERO(cmd->in);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001503}
1504
1505static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
1506 struct devx_umem *obj,
1507 struct devx_umem_reg_cmd *cmd)
1508{
1509 void *umem;
1510 __be64 *mtt;
1511
1512 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
1513 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
1514
Yishai Hadas6e3722b2018-12-19 16:28:15 +02001515 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001516 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
1517 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
1518 MLX5_ADAPTER_PAGE_SHIFT);
1519 MLX5_SET(umem, umem, page_offset, obj->page_offset);
1520 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
1521 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
1522 MLX5_IB_MTT_READ);
1523}
1524
Jason Gunthorpee83f0ec2018-07-25 21:40:18 -06001525static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02001526 struct uverbs_attr_bundle *attrs)
Yishai Hadasaeae9452018-06-17 13:00:04 +03001527{
Yishai Hadasaeae9452018-06-17 13:00:04 +03001528 struct devx_umem_reg_cmd cmd;
1529 struct devx_umem *obj;
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001530 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1531 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001532 u32 obj_id;
Jason Gunthorpec36ee462018-07-10 20:55:22 -06001533 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
1534 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001535 int err;
1536
1537 if (!c->devx_uid)
Yishai Hadas7e1335a2018-09-20 21:45:20 +03001538 return -EINVAL;
1539
Yishai Hadasaeae9452018-06-17 13:00:04 +03001540 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
1541 if (!obj)
1542 return -ENOMEM;
1543
1544 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
1545 if (err)
1546 goto err_obj_free;
1547
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001548 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001549 if (err)
1550 goto err_umem_release;
1551
1552 devx_umem_reg_cmd_build(dev, obj, &cmd);
1553
Yishai Hadas6e3722b2018-12-19 16:28:15 +02001554 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
Yishai Hadasaeae9452018-06-17 13:00:04 +03001555 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
1556 sizeof(cmd.out));
1557 if (err)
Jason Gunthorpeb61815e2018-08-09 20:14:41 -06001558 goto err_umem_release;
Yishai Hadasaeae9452018-06-17 13:00:04 +03001559
1560 obj->mdev = dev->mdev;
1561 uobj->object = obj;
1562 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
1563 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
1564 if (err)
1565 goto err_umem_destroy;
1566
Yishai Hadasaeae9452018-06-17 13:00:04 +03001567 return 0;
1568
1569err_umem_destroy:
1570 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001571err_umem_release:
1572 ib_umem_release(obj->umem);
1573err_obj_free:
1574 kfree(obj);
1575 return err;
1576}
1577
Yishai Hadasaeae9452018-06-17 13:00:04 +03001578static int devx_umem_cleanup(struct ib_uobject *uobject,
1579 enum rdma_remove_reason why)
1580{
1581 struct devx_umem *obj = uobject->object;
1582 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1583 int err;
1584
1585 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
Yishai Hadas1c774832018-06-20 17:11:39 +03001586 if (ib_is_destroy_retryable(err, why, uobject))
Yishai Hadasaeae9452018-06-17 13:00:04 +03001587 return err;
1588
1589 ib_umem_release(obj->umem);
1590 kfree(obj);
1591 return 0;
1592}
1593
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001594static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
1595 size_t count, loff_t *pos)
1596{
Yishai Hadas4accbb32019-01-22 08:29:58 +02001597 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
1598 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1599 struct devx_async_data *event;
1600 int ret = 0;
1601 size_t eventsz;
1602
1603 spin_lock_irq(&ev_queue->lock);
1604
1605 while (list_empty(&ev_queue->event_list)) {
1606 spin_unlock_irq(&ev_queue->lock);
1607
1608 if (filp->f_flags & O_NONBLOCK)
1609 return -EAGAIN;
1610
1611 if (wait_event_interruptible(
1612 ev_queue->poll_wait,
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001613 (!list_empty(&ev_queue->event_list) ||
1614 ev_queue->is_destroyed))) {
Yishai Hadas4accbb32019-01-22 08:29:58 +02001615 return -ERESTARTSYS;
1616 }
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001617
1618 if (list_empty(&ev_queue->event_list) &&
1619 ev_queue->is_destroyed)
1620 return -EIO;
1621
Yishai Hadas4accbb32019-01-22 08:29:58 +02001622 spin_lock_irq(&ev_queue->lock);
1623 }
1624
1625 event = list_entry(ev_queue->event_list.next,
1626 struct devx_async_data, list);
1627 eventsz = event->cmd_out_len +
1628 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
1629
1630 if (eventsz > count) {
1631 spin_unlock_irq(&ev_queue->lock);
1632 return -ENOSPC;
1633 }
1634
1635 list_del(ev_queue->event_list.next);
1636 spin_unlock_irq(&ev_queue->lock);
1637
1638 if (copy_to_user(buf, &event->hdr, eventsz))
1639 ret = -EFAULT;
1640 else
1641 ret = eventsz;
1642
1643 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
1644 kvfree(event);
1645 return ret;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001646}
1647
1648static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
1649{
Yishai Hadasa124edb2019-01-22 08:29:57 +02001650 struct ib_uobject *uobj = filp->private_data;
1651 struct devx_async_cmd_event_file *comp_ev_file = container_of(
1652 uobj, struct devx_async_cmd_event_file, uobj);
1653 struct devx_async_data *entry, *tmp;
1654
1655 spin_lock_irq(&comp_ev_file->ev_queue.lock);
1656 list_for_each_entry_safe(entry, tmp,
1657 &comp_ev_file->ev_queue.event_list, list)
1658 kvfree(entry);
1659 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
1660
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001661 uverbs_close_fd(filp);
1662 return 0;
1663}
1664
1665static __poll_t devx_async_cmd_event_poll(struct file *filp,
1666 struct poll_table_struct *wait)
1667{
Yishai Hadas4accbb32019-01-22 08:29:58 +02001668 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
1669 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1670 __poll_t pollflags = 0;
1671
1672 poll_wait(filp, &ev_queue->poll_wait, wait);
1673
1674 spin_lock_irq(&ev_queue->lock);
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001675 if (ev_queue->is_destroyed)
1676 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1677 else if (!list_empty(&ev_queue->event_list))
Yishai Hadas4accbb32019-01-22 08:29:58 +02001678 pollflags = EPOLLIN | EPOLLRDNORM;
1679 spin_unlock_irq(&ev_queue->lock);
1680
1681 return pollflags;
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001682}
1683
1684const struct file_operations devx_async_cmd_event_fops = {
1685 .owner = THIS_MODULE,
1686 .read = devx_async_cmd_event_read,
1687 .poll = devx_async_cmd_event_poll,
1688 .release = devx_async_cmd_event_close,
1689 .llseek = no_llseek,
1690};
1691
1692static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
1693 enum rdma_remove_reason why)
1694{
Yishai Hadasa124edb2019-01-22 08:29:57 +02001695 struct devx_async_cmd_event_file *comp_ev_file =
1696 container_of(uobj, struct devx_async_cmd_event_file,
1697 uobj);
Yishai Hadaseaebaf72019-01-22 08:29:59 +02001698 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
1699
1700 spin_lock_irq(&ev_queue->lock);
1701 ev_queue->is_destroyed = 1;
1702 spin_unlock_irq(&ev_queue->lock);
1703
1704 if (why == RDMA_REMOVE_DRIVER_REMOVE)
1705 wake_up_interruptible(&ev_queue->poll_wait);
Yishai Hadasa124edb2019-01-22 08:29:57 +02001706
1707 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001708 return 0;
1709};
1710
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001711DECLARE_UVERBS_NAMED_METHOD(
1712 MLX5_IB_METHOD_DEVX_UMEM_REG,
1713 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
1714 MLX5_IB_OBJECT_DEVX_UMEM,
1715 UVERBS_ACCESS_NEW,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001716 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001717 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
1718 UVERBS_ATTR_TYPE(u64),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001719 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001720 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
1721 UVERBS_ATTR_TYPE(u64),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001722 UA_MANDATORY),
Jason Gunthorpebccd0622018-07-26 16:37:14 -06001723 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
1724 enum ib_access_flags),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001725 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
1726 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001727 UA_MANDATORY));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001728
Yishai Hadas528922a2018-07-08 13:24:39 +03001729DECLARE_UVERBS_NAMED_METHOD_DESTROY(
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001730 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
1731 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
1732 MLX5_IB_OBJECT_DEVX_UMEM,
1733 UVERBS_ACCESS_DESTROY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001734 UA_MANDATORY));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001735
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001736DECLARE_UVERBS_NAMED_METHOD(
1737 MLX5_IB_METHOD_DEVX_QUERY_EQN,
1738 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
1739 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001740 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001741 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
1742 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001743 UA_MANDATORY));
Yishai Hadasf6fe01b2018-06-17 13:00:05 +03001744
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001745DECLARE_UVERBS_NAMED_METHOD(
1746 MLX5_IB_METHOD_DEVX_QUERY_UAR,
1747 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
1748 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001749 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001750 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1751 UVERBS_ATTR_TYPE(u32),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001752 UA_MANDATORY));
Yishai Hadas7c043e92018-06-17 13:00:03 +03001753
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001754DECLARE_UVERBS_NAMED_METHOD(
1755 MLX5_IB_METHOD_DEVX_OTHER,
1756 UVERBS_ATTR_PTR_IN(
1757 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
1758 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001759 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001760 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001761 UVERBS_ATTR_PTR_OUT(
1762 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
1763 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001764 UA_MANDATORY));
Yishai Hadas8aa8c952018-06-17 13:00:00 +03001765
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001766DECLARE_UVERBS_NAMED_METHOD(
1767 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
1768 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
1769 MLX5_IB_OBJECT_DEVX_OBJ,
1770 UVERBS_ACCESS_NEW,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001771 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001772 UVERBS_ATTR_PTR_IN(
1773 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
1774 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001775 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001776 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001777 UVERBS_ATTR_PTR_OUT(
1778 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1779 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001780 UA_MANDATORY));
Yishai Hadas7efce362018-06-17 13:00:01 +03001781
Yishai Hadas528922a2018-07-08 13:24:39 +03001782DECLARE_UVERBS_NAMED_METHOD_DESTROY(
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001783 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
1784 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
1785 MLX5_IB_OBJECT_DEVX_OBJ,
1786 UVERBS_ACCESS_DESTROY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001787 UA_MANDATORY));
Yishai Hadas7efce362018-06-17 13:00:01 +03001788
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001789DECLARE_UVERBS_NAMED_METHOD(
1790 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
1791 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
Yishai Hadas34613eb2018-11-26 08:28:35 +02001792 UVERBS_IDR_ANY_OBJECT,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001793 UVERBS_ACCESS_WRITE,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001794 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001795 UVERBS_ATTR_PTR_IN(
1796 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
1797 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001798 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001799 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001800 UVERBS_ATTR_PTR_OUT(
1801 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1802 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001803 UA_MANDATORY));
Yishai Hadase662e142018-06-17 13:00:02 +03001804
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001805DECLARE_UVERBS_NAMED_METHOD(
1806 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
1807 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
Yishai Hadas34613eb2018-11-26 08:28:35 +02001808 UVERBS_IDR_ANY_OBJECT,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001809 UVERBS_ACCESS_READ,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001810 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001811 UVERBS_ATTR_PTR_IN(
1812 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
1813 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001814 UA_MANDATORY,
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03001815 UA_ALLOC_AND_COPY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001816 UVERBS_ATTR_PTR_OUT(
1817 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1818 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
Jason Gunthorpe540cd692018-07-04 08:50:30 +03001819 UA_MANDATORY));
Yishai Hadase662e142018-06-17 13:00:02 +03001820
Yishai Hadasa124edb2019-01-22 08:29:57 +02001821DECLARE_UVERBS_NAMED_METHOD(
1822 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
1823 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
1824 UVERBS_IDR_ANY_OBJECT,
1825 UVERBS_ACCESS_READ,
1826 UA_MANDATORY),
1827 UVERBS_ATTR_PTR_IN(
1828 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
1829 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
1830 UA_MANDATORY,
1831 UA_ALLOC_AND_COPY),
1832 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
1833 u16, UA_MANDATORY),
1834 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
1835 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1836 UVERBS_ACCESS_READ,
1837 UA_MANDATORY),
1838 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
1839 UVERBS_ATTR_TYPE(u64),
1840 UA_MANDATORY));
1841
Jason Gunthorpe6c61d2a2018-07-04 08:50:27 +03001842DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001843 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
1844 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
1845 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
Yishai Hadas8aa8c952018-06-17 13:00:00 +03001846
Jason Gunthorpe6c61d2a2018-07-04 08:50:27 +03001847DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001848 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
1849 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
1850 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
1851 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
Yishai Hadasa124edb2019-01-22 08:29:57 +02001852 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
1853 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
Yishai Hadas7efce362018-06-17 13:00:01 +03001854
Jason Gunthorpe6c61d2a2018-07-04 08:50:27 +03001855DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03001856 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
1857 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
1858 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
Yishai Hadasaeae9452018-06-17 13:00:04 +03001859
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001860
1861DECLARE_UVERBS_NAMED_METHOD(
1862 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
1863 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
1864 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1865 UVERBS_ACCESS_NEW,
1866 UA_MANDATORY));
1867
1868DECLARE_UVERBS_NAMED_OBJECT(
1869 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1870 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
1871 devx_hot_unplug_async_cmd_event_file,
1872 &devx_async_cmd_event_fops, "[devx_async_cmd]",
1873 O_RDONLY),
1874 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
1875
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001876static bool devx_is_supported(struct ib_device *device)
Yishai Hadasc59450c2018-06-17 13:00:06 +03001877{
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001878 struct mlx5_ib_dev *dev = to_mdev(device);
1879
Yishai Hadas6e3722b2018-12-19 16:28:15 +02001880 return !dev->rep && MLX5_CAP_GEN(dev->mdev, log_max_uctx);
Yishai Hadasc59450c2018-06-17 13:00:06 +03001881}
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001882
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02001883const struct uapi_definition mlx5_ib_devx_defs[] = {
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02001884 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1885 MLX5_IB_OBJECT_DEVX,
1886 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
1887 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1888 MLX5_IB_OBJECT_DEVX_OBJ,
1889 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
1890 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1891 MLX5_IB_OBJECT_DEVX_UMEM,
1892 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
Yishai Hadas6bf8f222019-01-22 08:29:56 +02001893 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
1894 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
1895 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02001896 {},
1897};