Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Long Li | 03bee01 | 2017-11-07 01:54:56 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2017, Microsoft Corporation. |
| 4 | * |
| 5 | * Author(s): Long Li <longli@microsoft.com> |
Long Li | 03bee01 | 2017-11-07 01:54:56 -0700 | [diff] [blame] | 6 | */ |
| 7 | #ifndef _SMBDIRECT_H |
| 8 | #define _SMBDIRECT_H |
| 9 | |
Long Li | f198186 | 2017-11-04 18:17:24 -0700 | [diff] [blame] | 10 | #ifdef CONFIG_CIFS_SMB_DIRECT |
| 11 | #define cifs_rdma_enabled(server) ((server)->rdma) |
| 12 | |
| 13 | #include "cifsglob.h" |
| 14 | #include <rdma/ib_verbs.h> |
| 15 | #include <rdma/rdma_cm.h> |
| 16 | #include <linux/mempool.h> |
| 17 | |
Steve French | 6b31471 | 2018-01-25 09:28:25 -0600 | [diff] [blame] | 18 | extern int rdma_readwrite_threshold; |
| 19 | extern int smbd_max_frmr_depth; |
| 20 | extern int smbd_keep_alive_interval; |
| 21 | extern int smbd_max_receive_size; |
| 22 | extern int smbd_max_fragmented_recv_size; |
| 23 | extern int smbd_max_send_size; |
| 24 | extern int smbd_send_credit_target; |
| 25 | extern int smbd_receive_credit_max; |
| 26 | |
Long Li | f198186 | 2017-11-04 18:17:24 -0700 | [diff] [blame] | 27 | enum keep_alive_status { |
| 28 | KEEP_ALIVE_NONE, |
| 29 | KEEP_ALIVE_PENDING, |
| 30 | KEEP_ALIVE_SENT, |
| 31 | }; |
| 32 | |
| 33 | enum smbd_connection_status { |
| 34 | SMBD_CREATED, |
| 35 | SMBD_CONNECTING, |
| 36 | SMBD_CONNECTED, |
| 37 | SMBD_NEGOTIATE_FAILED, |
| 38 | SMBD_DISCONNECTING, |
| 39 | SMBD_DISCONNECTED, |
| 40 | SMBD_DESTROYED |
| 41 | }; |
| 42 | |
| 43 | /* |
| 44 | * The context for the SMBDirect transport |
| 45 | * Everything related to the transport is here. It has several logical parts |
| 46 | * 1. RDMA related structures |
| 47 | * 2. SMBDirect connection parameters |
| 48 | * 3. Memory registrations |
| 49 | * 4. Receive and reassembly queues for data receive path |
| 50 | * 5. mempools for allocating packets |
| 51 | */ |
| 52 | struct smbd_connection { |
| 53 | enum smbd_connection_status transport_status; |
| 54 | |
| 55 | /* RDMA related */ |
| 56 | struct rdma_cm_id *id; |
| 57 | struct ib_qp_init_attr qp_attr; |
| 58 | struct ib_pd *pd; |
| 59 | struct ib_cq *send_cq, *recv_cq; |
| 60 | struct ib_device_attr dev_attr; |
| 61 | int ri_rc; |
| 62 | struct completion ri_done; |
| 63 | wait_queue_head_t conn_wait; |
Long Li | 050b8c3 | 2019-04-04 11:35:42 -0500 | [diff] [blame] | 64 | wait_queue_head_t disconn_wait; |
Long Li | f198186 | 2017-11-04 18:17:24 -0700 | [diff] [blame] | 65 | |
| 66 | struct completion negotiate_completion; |
| 67 | bool negotiate_done; |
| 68 | |
Long Li | f198186 | 2017-11-04 18:17:24 -0700 | [diff] [blame] | 69 | struct work_struct disconnect_work; |
| 70 | struct work_struct recv_done_work; |
| 71 | struct work_struct post_send_credits_work; |
| 72 | |
| 73 | spinlock_t lock_new_credits_offered; |
| 74 | int new_credits_offered; |
| 75 | |
| 76 | /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */ |
| 77 | int receive_credit_max; |
| 78 | int send_credit_target; |
| 79 | int max_send_size; |
| 80 | int max_fragmented_recv_size; |
| 81 | int max_fragmented_send_size; |
| 82 | int max_receive_size; |
| 83 | int keep_alive_interval; |
| 84 | int max_readwrite_size; |
| 85 | enum keep_alive_status keep_alive_requested; |
| 86 | int protocol; |
| 87 | atomic_t send_credits; |
| 88 | atomic_t receive_credits; |
| 89 | int receive_credit_target; |
| 90 | int fragment_reassembly_remaining; |
| 91 | |
Long Li | c739858 | 2017-11-22 17:38:44 -0700 | [diff] [blame] | 92 | /* Memory registrations */ |
| 93 | /* Maximum number of RDMA read/write outstanding on this connection */ |
| 94 | int responder_resources; |
| 95 | /* Maximum number of SGEs in a RDMA write/read */ |
| 96 | int max_frmr_depth; |
| 97 | /* |
| 98 | * If payload is less than or equal to the threshold, |
| 99 | * use RDMA send/recv to send upper layer I/O. |
| 100 | * If payload is more than the threshold, |
| 101 | * use RDMA read/write through memory registration for I/O. |
| 102 | */ |
| 103 | int rdma_readwrite_threshold; |
| 104 | enum ib_mr_type mr_type; |
| 105 | struct list_head mr_list; |
| 106 | spinlock_t mr_list_lock; |
| 107 | /* The number of available MRs ready for memory registration */ |
| 108 | atomic_t mr_ready_count; |
| 109 | atomic_t mr_used_count; |
| 110 | wait_queue_head_t wait_mr; |
| 111 | struct work_struct mr_recovery_work; |
| 112 | /* Used by transport to wait until all MRs are returned */ |
| 113 | wait_queue_head_t wait_for_mr_cleanup; |
| 114 | |
Long Li | f198186 | 2017-11-04 18:17:24 -0700 | [diff] [blame] | 115 | /* Activity accoutning */ |
Long Li | f198186 | 2017-11-04 18:17:24 -0700 | [diff] [blame] | 116 | atomic_t send_pending; |
| 117 | wait_queue_head_t wait_send_pending; |
| 118 | atomic_t send_payload_pending; |
| 119 | wait_queue_head_t wait_send_payload_pending; |
| 120 | |
| 121 | /* Receive queue */ |
| 122 | struct list_head receive_queue; |
| 123 | int count_receive_queue; |
| 124 | spinlock_t receive_queue_lock; |
| 125 | |
| 126 | struct list_head empty_packet_queue; |
| 127 | int count_empty_packet_queue; |
| 128 | spinlock_t empty_packet_queue_lock; |
| 129 | |
| 130 | wait_queue_head_t wait_receive_queues; |
| 131 | |
| 132 | /* Reassembly queue */ |
| 133 | struct list_head reassembly_queue; |
| 134 | spinlock_t reassembly_queue_lock; |
| 135 | wait_queue_head_t wait_reassembly_queue; |
| 136 | |
| 137 | /* total data length of reassembly queue */ |
| 138 | int reassembly_data_length; |
| 139 | int reassembly_queue_length; |
| 140 | /* the offset to first buffer in reassembly queue */ |
| 141 | int first_entry_offset; |
| 142 | |
| 143 | bool send_immediate; |
| 144 | |
| 145 | wait_queue_head_t wait_send_queue; |
| 146 | |
| 147 | /* |
| 148 | * Indicate if we have received a full packet on the connection |
| 149 | * This is used to identify the first SMBD packet of a assembled |
| 150 | * payload (SMB packet) in reassembly queue so we can return a |
| 151 | * RFC1002 length to upper layer to indicate the length of the SMB |
| 152 | * packet received |
| 153 | */ |
| 154 | bool full_packet_received; |
| 155 | |
| 156 | struct workqueue_struct *workqueue; |
| 157 | struct delayed_work idle_timer_work; |
| 158 | struct delayed_work send_immediate_work; |
| 159 | |
| 160 | /* Memory pool for preallocating buffers */ |
| 161 | /* request pool for RDMA send */ |
| 162 | struct kmem_cache *request_cache; |
| 163 | mempool_t *request_mempool; |
| 164 | |
| 165 | /* response pool for RDMA receive */ |
| 166 | struct kmem_cache *response_cache; |
| 167 | mempool_t *response_mempool; |
| 168 | |
| 169 | /* for debug purposes */ |
| 170 | unsigned int count_get_receive_buffer; |
| 171 | unsigned int count_put_receive_buffer; |
| 172 | unsigned int count_reassembly_queue; |
| 173 | unsigned int count_enqueue_reassembly_queue; |
| 174 | unsigned int count_dequeue_reassembly_queue; |
| 175 | unsigned int count_send_empty; |
| 176 | }; |
| 177 | |
| 178 | enum smbd_message_type { |
| 179 | SMBD_NEGOTIATE_RESP, |
| 180 | SMBD_TRANSFER_DATA, |
| 181 | }; |
| 182 | |
| 183 | #define SMB_DIRECT_RESPONSE_REQUESTED 0x0001 |
| 184 | |
| 185 | /* SMBD negotiation request packet [MS-SMBD] 2.2.1 */ |
| 186 | struct smbd_negotiate_req { |
| 187 | __le16 min_version; |
| 188 | __le16 max_version; |
| 189 | __le16 reserved; |
| 190 | __le16 credits_requested; |
| 191 | __le32 preferred_send_size; |
| 192 | __le32 max_receive_size; |
| 193 | __le32 max_fragmented_size; |
| 194 | } __packed; |
| 195 | |
| 196 | /* SMBD negotiation response packet [MS-SMBD] 2.2.2 */ |
| 197 | struct smbd_negotiate_resp { |
| 198 | __le16 min_version; |
| 199 | __le16 max_version; |
| 200 | __le16 negotiated_version; |
| 201 | __le16 reserved; |
| 202 | __le16 credits_requested; |
| 203 | __le16 credits_granted; |
| 204 | __le32 status; |
| 205 | __le32 max_readwrite_size; |
| 206 | __le32 preferred_send_size; |
| 207 | __le32 max_receive_size; |
| 208 | __le32 max_fragmented_size; |
| 209 | } __packed; |
| 210 | |
| 211 | /* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */ |
| 212 | struct smbd_data_transfer { |
| 213 | __le16 credits_requested; |
| 214 | __le16 credits_granted; |
| 215 | __le16 flags; |
| 216 | __le16 reserved; |
| 217 | __le32 remaining_data_length; |
| 218 | __le32 data_offset; |
| 219 | __le32 data_length; |
| 220 | __le32 padding; |
| 221 | __u8 buffer[]; |
| 222 | } __packed; |
| 223 | |
| 224 | /* The packet fields for a registered RDMA buffer */ |
| 225 | struct smbd_buffer_descriptor_v1 { |
| 226 | __le64 offset; |
| 227 | __le32 token; |
| 228 | __le32 length; |
| 229 | } __packed; |
| 230 | |
Long Li | 03bee01 | 2017-11-07 01:54:56 -0700 | [diff] [blame] | 231 | /* Default maximum number of SGEs in a RDMA send/recv */ |
| 232 | #define SMBDIRECT_MAX_SGE 16 |
Long Li | f198186 | 2017-11-04 18:17:24 -0700 | [diff] [blame] | 233 | /* The context for a SMBD request */ |
| 234 | struct smbd_request { |
| 235 | struct smbd_connection *info; |
| 236 | struct ib_cqe cqe; |
| 237 | |
| 238 | /* true if this request carries upper layer payload */ |
| 239 | bool has_payload; |
| 240 | |
| 241 | /* the SGE entries for this packet */ |
| 242 | struct ib_sge sge[SMBDIRECT_MAX_SGE]; |
| 243 | int num_sge; |
| 244 | |
| 245 | /* SMBD packet header follows this structure */ |
| 246 | u8 packet[]; |
| 247 | }; |
| 248 | |
| 249 | /* The context for a SMBD response */ |
| 250 | struct smbd_response { |
| 251 | struct smbd_connection *info; |
| 252 | struct ib_cqe cqe; |
| 253 | struct ib_sge sge; |
| 254 | |
| 255 | enum smbd_message_type type; |
| 256 | |
| 257 | /* Link to receive queue or reassembly queue */ |
| 258 | struct list_head list; |
| 259 | |
| 260 | /* Indicate if this is the 1st packet of a payload */ |
| 261 | bool first_segment; |
| 262 | |
| 263 | /* SMBD packet header and payload follows this structure */ |
| 264 | u8 packet[]; |
| 265 | }; |
| 266 | |
Long Li | 399f953 | 2017-11-17 17:26:52 -0800 | [diff] [blame] | 267 | /* Create a SMBDirect session */ |
| 268 | struct smbd_connection *smbd_get_connection( |
| 269 | struct TCP_Server_Info *server, struct sockaddr *dstaddr); |
| 270 | |
Long Li | ad57b8e | 2017-11-22 17:38:35 -0700 | [diff] [blame] | 271 | /* Reconnect SMBDirect session */ |
| 272 | int smbd_reconnect(struct TCP_Server_Info *server); |
Long Li | 8ef130f | 2017-11-22 17:38:37 -0700 | [diff] [blame] | 273 | /* Destroy SMBDirect session */ |
Long Li | 050b8c3 | 2019-04-04 11:35:42 -0500 | [diff] [blame] | 274 | void smbd_destroy(struct TCP_Server_Info *server); |
Long Li | ad57b8e | 2017-11-22 17:38:35 -0700 | [diff] [blame] | 275 | |
Long Li | f64b78f | 2017-11-22 17:38:40 -0700 | [diff] [blame] | 276 | /* Interface for carrying upper layer I/O through send/recv */ |
| 277 | int smbd_recv(struct smbd_connection *info, struct msghdr *msg); |
Long Li | 4739f23 | 2019-04-15 14:49:17 -0700 | [diff] [blame] | 278 | int smbd_send(struct TCP_Server_Info *server, |
| 279 | int num_rqst, struct smb_rqst *rqst); |
Long Li | f64b78f | 2017-11-22 17:38:40 -0700 | [diff] [blame] | 280 | |
Long Li | c739858 | 2017-11-22 17:38:44 -0700 | [diff] [blame] | 281 | enum mr_state { |
| 282 | MR_READY, |
| 283 | MR_REGISTERED, |
| 284 | MR_INVALIDATED, |
| 285 | MR_ERROR |
| 286 | }; |
| 287 | |
| 288 | struct smbd_mr { |
| 289 | struct smbd_connection *conn; |
| 290 | struct list_head list; |
| 291 | enum mr_state state; |
| 292 | struct ib_mr *mr; |
| 293 | struct scatterlist *sgl; |
| 294 | int sgl_count; |
| 295 | enum dma_data_direction dir; |
| 296 | union { |
| 297 | struct ib_reg_wr wr; |
| 298 | struct ib_send_wr inv_wr; |
| 299 | }; |
| 300 | struct ib_cqe cqe; |
| 301 | bool need_invalidate; |
| 302 | struct completion invalidate_done; |
| 303 | }; |
| 304 | |
| 305 | /* Interfaces to register and deregister MR for RDMA read/write */ |
| 306 | struct smbd_mr *smbd_register_mr( |
| 307 | struct smbd_connection *info, struct page *pages[], int num_pages, |
Long Li | 7cf20bc | 2018-05-30 12:48:02 -0700 | [diff] [blame] | 308 | int offset, int tailsz, bool writing, bool need_invalidate); |
Long Li | c739858 | 2017-11-22 17:38:44 -0700 | [diff] [blame] | 309 | int smbd_deregister_mr(struct smbd_mr *mr); |
| 310 | |
Long Li | f198186 | 2017-11-04 18:17:24 -0700 | [diff] [blame] | 311 | #else |
| 312 | #define cifs_rdma_enabled(server) 0 |
| 313 | struct smbd_connection {}; |
Long Li | 399f953 | 2017-11-17 17:26:52 -0800 | [diff] [blame] | 314 | static inline void *smbd_get_connection( |
| 315 | struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;} |
Long Li | ad57b8e | 2017-11-22 17:38:35 -0700 | [diff] [blame] | 316 | static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; } |
Long Li | 050b8c3 | 2019-04-04 11:35:42 -0500 | [diff] [blame] | 317 | static inline void smbd_destroy(struct TCP_Server_Info *server) {} |
Long Li | f64b78f | 2017-11-22 17:38:40 -0700 | [diff] [blame] | 318 | static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; } |
Long Li | 4739f23 | 2019-04-15 14:49:17 -0700 | [diff] [blame] | 319 | static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; } |
Long Li | f198186 | 2017-11-04 18:17:24 -0700 | [diff] [blame] | 320 | #endif |
| 321 | |
Long Li | 03bee01 | 2017-11-07 01:54:56 -0700 | [diff] [blame] | 322 | #endif |