-
Notifications
You must be signed in to change notification settings - Fork 12
[LTS 8.6] cifs: CVE-2023-53751 #1059
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: ciqlts8_6
Are you sure you want to change the base?
Changes from all commits
c746f8a
601cd8f
9d7bba3
ac27ef5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -147,9 +147,11 @@ static void reconn_set_next_dfs_target(struct TCP_Server_Info *server, | |
|
|
||
| name = dfs_cache_get_tgt_name(*tgt_it); | ||
|
|
||
| spin_lock(&server->srv_lock); | ||
| kfree(server->hostname); | ||
|
|
||
| server->hostname = extract_hostname(name); | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This may schedule while atomic because Follow upstream by calling |
||
| spin_unlock(&server->srv_lock); | ||
| if (IS_ERR(server->hostname)) { | ||
| cifs_dbg(FYI, | ||
| "%s: failed to extract hostname from target: %ld\n", | ||
|
|
@@ -261,7 +263,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |
|
|
||
| /* do not want to be sending data on a socket we are freeing */ | ||
| cifs_dbg(FYI, "%s: tearing down socket\n", __func__); | ||
| mutex_lock(&server->srv_mutex); | ||
| cifs_server_lock(server); | ||
| if (server->ssocket) { | ||
| cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", | ||
| server->ssocket->state, server->ssocket->flags); | ||
|
|
@@ -291,7 +293,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |
| mid_entry->mid_flags |= MID_DELETED; | ||
| } | ||
| spin_unlock(&GlobalMid_Lock); | ||
| mutex_unlock(&server->srv_mutex); | ||
| cifs_server_unlock(server); | ||
|
|
||
| cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); | ||
| list_for_each_safe(tmp, tmp2, &retry_list) { | ||
|
|
@@ -302,15 +304,15 @@ cifs_reconnect(struct TCP_Server_Info *server) | |
| } | ||
|
|
||
| if (cifs_rdma_enabled(server)) { | ||
| mutex_lock(&server->srv_mutex); | ||
| cifs_server_lock(server); | ||
| smbd_destroy(server); | ||
| mutex_unlock(&server->srv_mutex); | ||
| cifs_server_unlock(server); | ||
| } | ||
|
|
||
| do { | ||
| try_to_freeze(); | ||
|
|
||
| mutex_lock(&server->srv_mutex); | ||
| cifs_server_lock(server); | ||
|
|
||
| #ifdef CONFIG_CIFS_SWN_UPCALL | ||
| if (server->use_swn_dstaddr) { | ||
|
|
@@ -352,7 +354,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |
| rc = generic_ip_connect(server); | ||
| if (rc) { | ||
| cifs_dbg(FYI, "reconnect error %d\n", rc); | ||
| mutex_unlock(&server->srv_mutex); | ||
| cifs_server_unlock(server); | ||
| msleep(3000); | ||
| } else { | ||
| atomic_inc(&tcpSesReconnectCount); | ||
|
|
@@ -364,7 +366,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |
| #ifdef CONFIG_CIFS_SWN_UPCALL | ||
| server->use_swn_dstaddr = false; | ||
| #endif | ||
| mutex_unlock(&server->srv_mutex); | ||
| cifs_server_unlock(server); | ||
| } | ||
| } while (server->tcpStatus == CifsNeedReconnect); | ||
|
|
||
|
|
@@ -418,9 +420,7 @@ cifs_echo_request(struct work_struct *work) | |
| goto requeue_echo; | ||
|
|
||
| rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; | ||
| if (rc) | ||
| cifs_dbg(FYI, "Unable to send echo request to server: %s\n", | ||
| server->hostname); | ||
| cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc); | ||
|
|
||
| #ifdef CONFIG_CIFS_SWN_UPCALL | ||
| /* Check witness registrations */ | ||
|
|
@@ -1177,6 +1177,8 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context * | |
| { | ||
| struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; | ||
|
|
||
| lockdep_assert_held(&cifs_tcp_ses_lock); | ||
|
|
||
|
Comment on lines
+1180
to
+1181
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Remove this lockdep assert. The upstream commit added |
||
| if (ctx->nosharesock) | ||
| return 0; | ||
|
|
||
|
|
@@ -1194,8 +1196,12 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context * | |
| if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) | ||
| return 0; | ||
|
|
||
| if (strcasecmp(server->hostname, ctx->server_hostname)) | ||
| spin_lock(&server->srv_lock); | ||
| if (strcasecmp(server->hostname, ctx->server_hostname)) { | ||
| spin_unlock(&server->srv_lock); | ||
| return 0; | ||
| } | ||
| spin_unlock(&server->srv_lock); | ||
|
|
||
| if (!match_address(server, addr, | ||
| (struct sockaddr *)&ctx->srcaddr)) | ||
|
|
@@ -1332,7 +1338,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx) | |
| init_waitqueue_head(&tcp_ses->response_q); | ||
| init_waitqueue_head(&tcp_ses->request_q); | ||
| INIT_LIST_HEAD(&tcp_ses->pending_mid_q); | ||
| mutex_init(&tcp_ses->srv_mutex); | ||
| mutex_init(&tcp_ses->_srv_mutex); | ||
| memcpy(tcp_ses->workstation_RFC1001_name, | ||
| ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); | ||
| memcpy(tcp_ses->server_RFC1001_name, | ||
|
|
@@ -1343,6 +1349,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx) | |
| tcp_ses->lstrp = jiffies; | ||
| tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression); | ||
| spin_lock_init(&tcp_ses->req_lock); | ||
| spin_lock_init(&tcp_ses->srv_lock); | ||
| INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); | ||
| INIT_LIST_HEAD(&tcp_ses->smb_ses_list); | ||
| INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); | ||
|
|
@@ -1512,7 +1519,9 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) | |
| if (tcon == NULL) | ||
| return -ENOMEM; | ||
|
|
||
| spin_lock(&server->srv_lock); | ||
| scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname); | ||
| spin_unlock(&server->srv_lock); | ||
|
|
||
| xid = get_xid(); | ||
| tcon->ses = ses; | ||
|
|
@@ -4081,7 +4090,9 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru | |
|
|
||
| if (!tcon->dfs_path) { | ||
| if (tcon->ipc) { | ||
| cifs_server_lock(server); | ||
| scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname); | ||
| cifs_server_unlock(server); | ||
| rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc); | ||
| } else { | ||
| rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc); | ||
|
|
@@ -4095,8 +4106,6 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru | |
| isroot = ref.server_type == DFS_TYPE_ROOT; | ||
| free_dfs_info_param(&ref); | ||
|
|
||
| extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len); | ||
|
|
||
| for (it = dfs_cache_get_tgt_iterator(&tl); it; it = dfs_cache_get_next_tgt(&tl, it)) { | ||
| bool target_match; | ||
|
|
||
|
|
@@ -4114,10 +4123,13 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru | |
|
|
||
| extract_unc_hostname(share, &dfs_host, &dfs_host_len); | ||
|
|
||
| cifs_server_lock(server); | ||
| extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len); | ||
| if (dfs_host_len != tcp_host_len | ||
| || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) { | ||
| cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len, | ||
| dfs_host, (int)tcp_host_len, tcp_host); | ||
| cifs_server_unlock(server); | ||
|
|
||
| rc = match_target_ip(server, dfs_host, dfs_host_len, &target_match); | ||
| if (rc) { | ||
|
|
@@ -4129,7 +4141,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru | |
| cifs_dbg(FYI, "%s: skipping target\n", __func__); | ||
| continue; | ||
| } | ||
| } | ||
| } else | ||
| cifs_server_unlock(server); | ||
|
|
||
| if (tcon->ipc) { | ||
| scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", share); | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.