diff options
Diffstat (limited to 'fs/cifs')
36 files changed, 1810 insertions, 1515 deletions
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 8c9f2c00be72..7c9785973f49 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile @@ -5,9 +5,9 @@ ccflags-y += -I$(src) # needed for trace events obj-$(CONFIG_CIFS) += cifs.o -cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \ +cifs-y := trace.o cifsfs.o cifs_debug.o connect.o dir.o file.o \ inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \ - cifs_unicode.o nterr.o cifsencrypt.o \ + cached_dir.o cifs_unicode.o nterr.o cifsencrypt.o \ readdir.o ioctl.o sess.o export.o unc.o winucase.o \ smb2ops.o smb2maperror.o smb2transport.o \ smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \ @@ -31,4 +31,4 @@ cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o -cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o +cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c new file mode 100644 index 000000000000..b401339f6e73 --- /dev/null +++ b/fs/cifs/cached_dir.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Functions to handle the cached directory entries + * + * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> + */ + +#include "cifsglob.h" +#include "cifsproto.h" +#include "cifs_debug.h" +#include "smb2proto.h" +#include "cached_dir.h" + +/* + * Open the and cache a directory handle. + * If error then *cfid is not initialized. + */ +int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, + const char *path, + struct cifs_sb_info *cifs_sb, + bool lookup_only, struct cached_fid **ret_cfid) +{ + struct cifs_ses *ses; + struct TCP_Server_Info *server; + struct cifs_open_parms oparms; + struct smb2_create_rsp *o_rsp = NULL; + struct smb2_query_info_rsp *qi_rsp = NULL; + int resp_buftype[2]; + struct smb_rqst rqst[2]; + struct kvec rsp_iov[2]; + struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; + struct kvec qi_iov[1]; + int rc, flags = 0; + __le16 utf16_path = 0; /* Null - since an open of top of share */ + u8 oplock = SMB2_OPLOCK_LEVEL_II; + struct cifs_fid *pfid; + struct dentry *dentry; + struct cached_fid *cfid; + + if (tcon == NULL || tcon->nohandlecache || + is_smb1_server(tcon->ses->server)) + return -EOPNOTSUPP; + + ses = tcon->ses; + server = ses->server; + + if (cifs_sb->root == NULL) + return -ENOENT; + + if (strlen(path)) + return -ENOENT; + + dentry = cifs_sb->root; + + cfid = tcon->cfid; + mutex_lock(&cfid->fid_mutex); + if (cfid->is_valid) { + cifs_dbg(FYI, "found a cached root file handle\n"); + *ret_cfid = cfid; + kref_get(&cfid->refcount); + mutex_unlock(&cfid->fid_mutex); + return 0; + } + + /* + * We do not hold the lock for the open because in case + * SMB2_open needs to reconnect, it will end up calling + * cifs_mark_open_files_invalid() which takes the lock again + * thus causing a deadlock + */ + mutex_unlock(&cfid->fid_mutex); + + if (lookup_only) + return -ENOENT; + + if (smb3_encryption_required(tcon)) + flags |= CIFS_TRANSFORM_REQ; + + if (!server->ops->new_lease_key) + return -EIO; + + pfid = &cfid->fid; + server->ops->new_lease_key(pfid); + + memset(rqst, 0, sizeof(rqst)); + resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; + memset(rsp_iov, 0, sizeof(rsp_iov)); + + /* Open */ + memset(&open_iov, 0, sizeof(open_iov)); + rqst[0].rq_iov = open_iov; + rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; + + oparms.tcon = tcon; + oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE); + oparms.desired_access = FILE_READ_ATTRIBUTES; + oparms.disposition = FILE_OPEN; + oparms.fid = pfid; + oparms.reconnect = false; + + rc = SMB2_open_init(tcon, server, + &rqst[0], &oplock, &oparms, &utf16_path); + if (rc) + goto oshr_free; + smb2_set_next_command(tcon, &rqst[0]); + + memset(&qi_iov, 0, sizeof(qi_iov)); + rqst[1].rq_iov = qi_iov; + rqst[1].rq_nvec = 1; + + rc = SMB2_query_info_init(tcon, server, + &rqst[1], COMPOUND_FID, + COMPOUND_FID, FILE_ALL_INFORMATION, + SMB2_O_INFO_FILE, 0, + sizeof(struct smb2_file_all_info) + + PATH_MAX * 2, 0, NULL); + if (rc) + goto oshr_free; + + smb2_set_related(&rqst[1]); + + rc = compound_send_recv(xid, ses, server, + flags, 2, rqst, + resp_buftype, rsp_iov); + mutex_lock(&cfid->fid_mutex); + + /* + * Now we need to check again as the cached root might have + * been successfully re-opened from a concurrent process + */ + + if (cfid->is_valid) { + /* work was already done */ + + /* stash fids for close() later */ + struct cifs_fid fid = { + .persistent_fid = pfid->persistent_fid, + .volatile_fid = pfid->volatile_fid, + }; + + /* + * caller expects this func to set the fid in cfid to valid + * cached root, so increment the refcount. + */ + kref_get(&cfid->refcount); + + mutex_unlock(&cfid->fid_mutex); + + if (rc == 0) { + /* close extra handle outside of crit sec */ + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); + } + rc = 0; + goto oshr_free; + } + + /* Cached root is still invalid, continue normaly */ + + if (rc) { + if (rc == -EREMCHG) { + tcon->need_reconnect = true; + pr_warn_once("server share %s deleted\n", + tcon->treeName); + } + goto oshr_exit; + } + + atomic_inc(&tcon->num_remote_opens); + + o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; + oparms.fid->persistent_fid = o_rsp->PersistentFileId; + oparms.fid->volatile_fid = o_rsp->VolatileFileId; +#ifdef CONFIG_CIFS_DEBUG2 + oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); +#endif /* CIFS_DEBUG2 */ + + cfid->tcon = tcon; + cfid->is_valid = true; + cfid->dentry = dentry; + dget(dentry); + kref_init(&cfid->refcount); + + /* BB TBD check to see if oplock level check can be removed below */ + if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) { + /* + * See commit 2f94a3125b87. Increment the refcount when we + * get a lease for root, release it if lease break occurs + */ + kref_get(&cfid->refcount); + cfid->has_lease = true; + smb2_parse_contexts(server, o_rsp, + &oparms.fid->epoch, + oparms.fid->lease_key, &oplock, + NULL, NULL); + } else + goto oshr_exit; + + qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; + if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) + goto oshr_exit; + if (!smb2_validate_and_copy_iov( + le16_to_cpu(qi_rsp->OutputBufferOffset), + sizeof(struct smb2_file_all_info), + &rsp_iov[1], sizeof(struct smb2_file_all_info), + (char *)&cfid->file_all_info)) + cfid->file_all_info_is_valid = true; + + cfid->time = jiffies; + +oshr_exit: + mutex_unlock(&cfid->fid_mutex); +oshr_free: + SMB2_open_free(&rqst[0]); + SMB2_query_info_free(&rqst[1]); + free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); + free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); + if (rc == 0) + *ret_cfid = cfid; + + return rc; +} + +int open_cached_dir_by_dentry(struct cifs_tcon *tcon, + struct dentry *dentry, + struct cached_fid **ret_cfid) +{ + struct cached_fid *cfid; + + cfid = tcon->cfid; + + mutex_lock(&cfid->fid_mutex); + if (cfid->dentry == dentry) { + cifs_dbg(FYI, "found a cached root file handle by dentry\n"); + *ret_cfid = cfid; + kref_get(&cfid->refcount); + mutex_unlock(&cfid->fid_mutex); + return 0; + } + mutex_unlock(&cfid->fid_mutex); + return -ENOENT; +} + +static void +smb2_close_cached_fid(struct kref *ref) +{ + struct cached_fid *cfid = container_of(ref, struct cached_fid, + refcount); + struct cached_dirent *dirent, *q; + + if (cfid->is_valid) { + cifs_dbg(FYI, "clear cached root file handle\n"); + SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, + cfid->fid.volatile_fid); + } + + /* + * We only check validity above to send SMB2_close, + * but we still need to invalidate these entries + * when this function is called + */ + cfid->is_valid = false; + cfid->file_all_info_is_valid = false; + cfid->has_lease = false; + if (cfid->dentry) { + dput(cfid->dentry); + cfid->dentry = NULL; + } + /* + * Delete all cached dirent names + */ + mutex_lock(&cfid->dirents.de_mutex); + list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { + list_del(&dirent->entry); + kfree(dirent->name); + kfree(dirent); + } + cfid->dirents.is_valid = 0; + cfid->dirents.is_failed = 0; + cfid->dirents.ctx = NULL; + cfid->dirents.pos = 0; + mutex_unlock(&cfid->dirents.de_mutex); + +} + +void close_cached_dir(struct cached_fid *cfid) +{ + mutex_lock(&cfid->fid_mutex); + kref_put(&cfid->refcount, smb2_close_cached_fid); + mutex_unlock(&cfid->fid_mutex); +} + +void close_cached_dir_lease_locked(struct cached_fid *cfid) +{ + if (cfid->has_lease) { + cfid->has_lease = false; + kref_put(&cfid->refcount, smb2_close_cached_fid); + } +} + +void close_cached_dir_lease(struct cached_fid *cfid) +{ + mutex_lock(&cfid->fid_mutex); + close_cached_dir_lease_locked(cfid); + mutex_unlock(&cfid->fid_mutex); +} + +/* + * Called from cifs_kill_sb when we unmount a share + */ +void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) +{ + struct rb_root *root = &cifs_sb->tlink_tree; + struct rb_node *node; + struct cached_fid *cfid; + struct cifs_tcon *tcon; + struct tcon_link *tlink; + + for (node = rb_first(root); node; node = rb_next(node)) { + tlink = rb_entry(node, struct tcon_link, tl_rbnode); + tcon = tlink_tcon(tlink); + if (IS_ERR(tcon)) + continue; + cfid = tcon->cfid; + mutex_lock(&cfid->fid_mutex); + if (cfid->dentry) { + dput(cfid->dentry); + cfid->dentry = NULL; + } + mutex_unlock(&cfid->fid_mutex); + } +} + +/* + * Invalidate and close all cached dirs when a TCON has been reset + * due to a session loss. + */ +void invalidate_all_cached_dirs(struct cifs_tcon *tcon) +{ + mutex_lock(&tcon->cfid->fid_mutex); + tcon->cfid->is_valid = false; + /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ + close_cached_dir_lease_locked(tcon->cfid); + memset(&tcon->cfid->fid, 0, sizeof(struct cifs_fid)); + mutex_unlock(&tcon->cfid->fid_mutex); +} + +static void +smb2_cached_lease_break(struct work_struct *work) +{ + struct cached_fid *cfid = container_of(work, + struct cached_fid, lease_break); + + close_cached_dir_lease(cfid); +} + +int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) +{ + if (tcon->cfid->is_valid && + !memcmp(lease_key, + tcon->cfid->fid.lease_key, + SMB2_LEASE_KEY_SIZE)) { + tcon->cfid->time = 0; + INIT_WORK(&tcon->cfid->lease_break, + smb2_cached_lease_break); + queue_work(cifsiod_wq, + &tcon->cfid->lease_break); + return true; + } + return false; +} + +struct cached_fid *init_cached_dir(void) +{ + struct cached_fid *cfid; + + cfid = kzalloc(sizeof(*cfid), GFP_KERNEL); + if (!cfid) + return NULL; + INIT_LIST_HEAD(&cfid->dirents.entries); + mutex_init(&cfid->dirents.de_mutex); + mutex_init(&cfid->fid_mutex); + return cfid; +} + +void free_cached_dir(struct cifs_tcon *tcon) +{ + kfree(tcon->cfid); +} diff --git a/fs/cifs/cached_dir.h b/fs/cifs/cached_dir.h new file mode 100644 index 000000000000..bd262dc8b179 --- /dev/null +++ b/fs/cifs/cached_dir.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Functions to handle the cached directory entries + * + * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> + */ + +#ifndef _CACHED_DIR_H +#define _CACHED_DIR_H + + +struct cached_dirent { + struct list_head entry; + char *name; + int namelen; + loff_t pos; + + struct cifs_fattr fattr; +}; + +struct cached_dirents { + bool is_valid:1; + bool is_failed:1; + struct dir_context *ctx; /* + * Only used to make sure we only take entries + * from a single context. Never dereferenced. + */ + struct mutex de_mutex; + int pos; /* Expected ctx->pos */ + struct list_head entries; +}; + +struct cached_fid { + bool is_valid:1; /* Do we have a useable root fid */ + bool file_all_info_is_valid:1; + bool has_lease:1; + unsigned long time; /* jiffies of when lease was taken */ + struct kref refcount; + struct cifs_fid fid; + struct mutex fid_mutex; + struct cifs_tcon *tcon; + struct dentry *dentry; + struct work_struct lease_break; + struct smb2_file_all_info file_all_info; + struct cached_dirents dirents; +}; + +extern struct cached_fid *init_cached_dir(void); +extern void free_cached_dir(struct cifs_tcon *tcon); +extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, + const char *path, + struct cifs_sb_info *cifs_sb, + bool lookup_only, struct cached_fid **cfid); +extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon, + struct dentry *dentry, + struct cached_fid **cfid); +extern void close_cached_dir(struct cached_fid *cfid); +extern void close_cached_dir_lease(struct cached_fid *cfid); +extern void close_cached_dir_lease_locked(struct cached_fid *cfid); +extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb); +extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon); +extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]); + +#endif /* _CACHED_DIR_H */ diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 2cfbac8bb965..c05477e28cff 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -36,13 +36,13 @@ cifs_dump_mem(char *label, void *data, int length) void cifs_dump_detail(void *buf, struct TCP_Server_Info *server) { #ifdef CONFIG_CIFS_DEBUG2 - struct smb_hdr *smb = (struct smb_hdr *)buf; + struct smb_hdr *smb = buf; cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n", smb->Command, smb->Status.CifsError, smb->Flags, smb->Flags2, smb->Mid, smb->Pid); cifs_dbg(VFS, "smb buf %p len %u\n", smb, - server->ops->calc_smb_size(smb, server)); + server->ops->calc_smb_size(smb)); #endif /* CONFIG_CIFS_DEBUG2 */ } @@ -55,7 +55,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server) return; cifs_dbg(VFS, "Dump pending requests:\n"); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n", mid_entry->mid_state, @@ -78,7 +78,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server) mid_entry->resp_buf, 62); } } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); #endif /* CONFIG_CIFS_DEBUG2 */ } @@ -168,7 +168,6 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface) static int cifs_debug_files_proc_show(struct seq_file *m, void *v) { - struct list_head *tmp, *tmp1, *tmp2; struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; @@ -184,14 +183,10 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v) #endif /* CIFS_DEBUG2 */ spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { - list_for_each(tmp, &server->smb_ses_list) { - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); - list_for_each(tmp1, &ses->tcon_list) { - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { spin_lock(&tcon->open_file_lock); - list_for_each(tmp2, &tcon->openFileList) { - cfile = list_entry(tmp2, struct cifsFileInfo, - tlist); + list_for_each_entry(cfile, &tcon->openFileList, tlist) { seq_printf(m, "0x%x 0x%llx 0x%x %d %d %d %pd", tcon->tid, @@ -218,7 +213,6 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v) static int cifs_debug_data_proc_show(struct seq_file *m, void *v) { - struct list_head *tmp2, *tmp3; struct mid_q_entry *mid_entry; struct TCP_Server_Info *server; struct cifs_ses *ses; @@ -381,9 +375,7 @@ skip_rdma: seq_printf(m, "\n\n\tSessions: "); i = 0; - list_for_each(tmp2, &server->smb_ses_list) { - ses = list_entry(tmp2, struct cifs_ses, - smb_ses_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { i++; if ((ses->serverDomain == NULL) || (ses->serverOS == NULL) || @@ -447,9 +439,7 @@ skip_rdma: else seq_puts(m, "none\n"); - list_for_each(tmp3, &ses->tcon_list) { - tcon = list_entry(tmp3, struct cifs_tcon, - tcon_list); + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { ++j; seq_printf(m, "\n\t%d) ", j); cifs_debug_tcon(m, tcon); @@ -473,10 +463,8 @@ skip_rdma: seq_printf(m, "\n\t\t[NONE]"); seq_puts(m, "\n\n\tMIDs: "); - spin_lock(&GlobalMid_Lock); - list_for_each(tmp3, &server->pending_mid_q) { - mid_entry = list_entry(tmp3, struct mid_q_entry, - qhead); + spin_lock(&server->mid_lock); + list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { seq_printf(m, "\n\tState: %d com: %d pid:" " %d cbdata: %p mid %llu\n", mid_entry->mid_state, @@ -485,7 +473,7 @@ skip_rdma: mid_entry->callback_data, mid_entry->mid); } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); seq_printf(m, "\n--\n"); } if (c == 0) @@ -504,7 +492,6 @@ static ssize_t cifs_stats_proc_write(struct file *file, { bool bv; int rc; - struct list_head *tmp1, *tmp2, *tmp3; struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; @@ -514,8 +501,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, #ifdef CONFIG_CIFS_STATS2 int i; - atomic_set(&totBufAllocCount, 0); - atomic_set(&totSmBufAllocCount, 0); + atomic_set(&total_buf_alloc_count, 0); + atomic_set(&total_small_buf_alloc_count, 0); #endif /* CONFIG_CIFS_STATS2 */ atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0); @@ -525,9 +512,7 @@ static ssize_t cifs_stats_proc_write(struct file *file, GlobalCurrentXid = 0; spin_unlock(&GlobalMid_Lock); spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp1, &cifs_tcp_ses_list) { - server = list_entry(tmp1, struct TCP_Server_Info, - tcp_ses_list); + list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { server->max_in_flight = 0; #ifdef CONFIG_CIFS_STATS2 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) { @@ -538,13 +523,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, server->fastest_cmd[0] = 0; } #endif /* CONFIG_CIFS_STATS2 */ - list_for_each(tmp2, &server->smb_ses_list) { - ses = list_entry(tmp2, struct cifs_ses, - smb_ses_list); - list_for_each(tmp3, &ses->tcon_list) { - tcon = list_entry(tmp3, - struct cifs_tcon, - tcon_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { atomic_set(&tcon->num_smbs_sent, 0); spin_lock(&tcon->stat_lock); tcon->bytes_read = 0; @@ -569,7 +549,6 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) #ifdef CONFIG_CIFS_STATS2 int j; #endif /* STATS2 */ - struct list_head *tmp2, *tmp3; struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; @@ -579,17 +558,17 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) seq_printf(m, "Share (unique mount targets): %d\n", tconInfoAllocCount.counter); seq_printf(m, "SMB Request/Response Buffer: %d Pool size: %d\n", - bufAllocCount.counter, + buf_alloc_count.counter, cifs_min_rcv + tcpSesAllocCount.counter); seq_printf(m, "SMB Small Req/Resp Buffer: %d Pool size: %d\n", - smBufAllocCount.counter, cifs_min_small); + small_buf_alloc_count.counter, cifs_min_small); #ifdef CONFIG_CIFS_STATS2 seq_printf(m, "Total Large %d Small %d Allocations\n", - atomic_read(&totBufAllocCount), - atomic_read(&totSmBufAllocCount)); + atomic_read(&total_buf_alloc_count), + atomic_read(&total_small_buf_alloc_count)); #endif /* CONFIG_CIFS_STATS2 */ - seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount)); + seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&mid_count)); seq_printf(m, "\n%d session %d share reconnects\n", tcpSesReconnectCount.counter, tconInfoReconnectCount.counter); @@ -619,13 +598,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) atomic_read(&server->smb2slowcmd[j]), server->hostname, j); #endif /* STATS2 */ - list_for_each(tmp2, &server->smb_ses_list) { - ses = list_entry(tmp2, struct cifs_ses, - smb_ses_list); - list_for_each(tmp3, &ses->tcon_list) { - tcon = list_entry(tmp3, - struct cifs_tcon, - tcon_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { i++; seq_printf(m, "\n%d) %s", i, tcon->treeName); if (tcon->need_reconnect) diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index bf861fef2f0c..fa480d62f313 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -1379,6 +1379,7 @@ chown_chgrp_exit: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, const struct cifs_fid *cifsfid, u32 *pacllen, u32 __maybe_unused unused) @@ -1512,6 +1513,7 @@ out: cifs_put_tlink(tlink); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */ int diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 663cb9db4908..46f5718754f9 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -32,10 +32,9 @@ int __cifs_calc_signature(struct smb_rqst *rqst, int rc; struct kvec *iov = rqst->rq_iov; int n_vec = rqst->rq_nvec; - int is_smb2 = server->vals->header_preamble_size == 0; /* iov[0] is actual data and not the rfc1002 length for SMB2+ */ - if (is_smb2) { + if (!is_smb1(server)) { if (iov[0].iov_len <= 4) return -EIO; i = 0; @@ -141,13 +140,13 @@ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server, if ((cifs_pdu == NULL) || (server == NULL)) return -EINVAL; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) || server->tcpStatus == CifsNeedNegotiate) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return rc; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (!server->session_estab) { memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8f2e003e0590..8042d7280dec 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -46,6 +46,7 @@ #include "netlink.h" #endif #include "fs_context.h" +#include "cached_dir.h" /* * DOS dates from 1980/1/1 through 2107/12/31 @@ -68,6 +69,34 @@ bool enable_negotiate_signing; /* false by default */ unsigned int global_secflags = CIFSSEC_DEF; /* unsigned int ntlmv2_support = 0; */ unsigned int sign_CIFS_PDUs = 1; + +/* + * Global transaction id (XID) information + */ +unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ +unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ +unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ +spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ + +/* + * Global counters, updated atomically + */ +atomic_t sesInfoAllocCount; +atomic_t tconInfoAllocCount; +atomic_t tcpSesNextId; +atomic_t tcpSesAllocCount; +atomic_t tcpSesReconnectCount; +atomic_t tconInfoReconnectCount; + +atomic_t mid_count; +atomic_t buf_alloc_count; +atomic_t small_buf_alloc_count; +#ifdef CONFIG_CIFS_STATS2 +atomic_t total_buf_alloc_count; +atomic_t total_small_buf_alloc_count; +#endif/* STATS2 */ +struct list_head cifs_tcp_ses_list; +spinlock_t cifs_tcp_ses_lock; static const struct super_operations cifs_super_ops; unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; module_param(CIFSMaxBufSize, uint, 0444); @@ -255,30 +284,13 @@ out_no_root: static void cifs_kill_sb(struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - struct cifs_tcon *tcon; - struct cached_fid *cfid; - struct rb_root *root = &cifs_sb->tlink_tree; - struct rb_node *node; - struct tcon_link *tlink; /* * We ned to release all dentries for the cached directories * before we kill the sb. */ if (cifs_sb->root) { - for (node = rb_first(root); node; node = rb_next(node)) { - tlink = rb_entry(node, struct tcon_link, tl_rbnode); - tcon = tlink_tcon(tlink); - if (IS_ERR(tcon)) - continue; - cfid = &tcon->crfid; - mutex_lock(&cfid->fid_mutex); - if (cfid->dentry) { - dput(cfid->dentry); - cfid->dentry = NULL; - } - mutex_unlock(&cfid->fid_mutex); - } + close_all_cached_dirs(cifs_sb); /* finally release root dentry */ dput(cifs_sb->root); @@ -681,6 +693,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root) seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ); seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ); } + seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ); if (tcon->ses->chan_max > 1) seq_printf(s, ",multichannel,max_channels=%zu", @@ -703,14 +716,17 @@ static void cifs_umount_begin(struct super_block *sb) tcon = cifs_sb_master_tcon(cifs_sb); spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { /* we have other mounts to same share or we have already tried to force umount this and woken up all waiting network requests, nothing to do */ + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); return; } else if (tcon->tc_count == 1) tcon->status = TID_EXITING; + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ @@ -1232,6 +1248,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid, lock_two_nondirectories(target_inode, src_inode); cifs_dbg(FYI, "about to flush pages\n"); + + rc = filemap_write_and_wait_range(src_inode->i_mapping, off, + off + len - 1); + if (rc) + goto out; + /* should we flush first and last page first */ truncate_inode_pages(&target_inode->i_data, 0); @@ -1537,8 +1559,7 @@ cifs_destroy_request_bufs(void) kmem_cache_destroy(cifs_sm_req_cachep); } -static int -cifs_init_mids(void) +static int init_mids(void) { cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", sizeof(struct mid_q_entry), 0, @@ -1556,8 +1577,7 @@ cifs_init_mids(void) return 0; } -static void -cifs_destroy_mids(void) +static void destroy_mids(void) { mempool_destroy(cifs_mid_poolp); kmem_cache_destroy(cifs_mid_cachep); @@ -1579,11 +1599,11 @@ init_cifs(void) atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0); - atomic_set(&bufAllocCount, 0); - atomic_set(&smBufAllocCount, 0); + atomic_set(&buf_alloc_count, 0); + atomic_set(&small_buf_alloc_count, 0); #ifdef CONFIG_CIFS_STATS2 - atomic_set(&totBufAllocCount, 0); - atomic_set(&totSmBufAllocCount, 0); + atomic_set(&total_buf_alloc_count, 0); + atomic_set(&total_small_buf_alloc_count, 0); if (slow_rsp_threshold < 1) cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); else if (slow_rsp_threshold > 32767) @@ -1591,7 +1611,7 @@ init_cifs(void) "slow response threshold set higher than recommended (0 to 32767)\n"); #endif /* CONFIG_CIFS_STATS2 */ - atomic_set(&midCount, 0); + atomic_set(&mid_count, 0); GlobalCurrentXid = 0; GlobalTotalActiveXid = 0; GlobalMaxActiveXid = 0; @@ -1654,7 +1674,7 @@ init_cifs(void) if (rc) goto out_destroy_deferredclose_wq; - rc = cifs_init_mids(); + rc = init_mids(); if (rc) goto out_destroy_inodecache; @@ -1711,7 +1731,7 @@ out_destroy_request_bufs: #endif cifs_destroy_request_bufs(); out_destroy_mids: - cifs_destroy_mids(); + destroy_mids(); out_destroy_inodecache: cifs_destroy_inodecache(); out_destroy_deferredclose_wq: @@ -1747,7 +1767,7 @@ exit_cifs(void) dfs_cache_destroy(); #endif cifs_destroy_request_bufs(); - cifs_destroy_mids(); + destroy_mids(); cifs_destroy_inodecache(); destroy_workqueue(deferredclose_wq); destroy_workqueue(cifsoplockd_wq); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index b17be47a8e59..5b4a7a32bdc5 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -153,6 +153,6 @@ extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ /* when changing internal version - update following two lines at same time */ -#define SMB3_PRODUCT_BUILD 37 -#define CIFS_VERSION "2.37" +#define SMB3_PRODUCT_BUILD 39 +#define CIFS_VERSION "2.39" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index a643c84ff1e9..ae7f571a7dba 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -417,7 +417,7 @@ struct smb_version_operations { int (*close_dir)(const unsigned int, struct cifs_tcon *, struct cifs_fid *); /* calculate a size of SMB message */ - unsigned int (*calc_smb_size)(void *buf, struct TCP_Server_Info *ptcpi); + unsigned int (*calc_smb_size)(void *buf); /* check for STATUS_PENDING and process the response if yes */ bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server); /* check for STATUS_NETWORK_SESSION_EXPIRED */ @@ -557,6 +557,8 @@ struct smb_version_values { #define HEADER_SIZE(server) (server->vals->header_size) #define MAX_HEADER_SIZE(server) (server->vals->max_header_size) +#define HEADER_PREAMBLE_SIZE(server) (server->vals->header_preamble_size) +#define MID_HEADER_SIZE(server) (HEADER_SIZE(server) - 1 - HEADER_PREAMBLE_SIZE(server)) /** * CIFS superblock mount flags (mnt_cifs_flags) to consider when @@ -605,6 +607,7 @@ inc_rfc1001_len(void *buf, int count) struct TCP_Server_Info { struct list_head tcp_ses_list; struct list_head smb_ses_list; + spinlock_t srv_lock; /* protect anything here that is not protected */ __u64 conn_id; /* connection identifier (useful for debugging) */ int srv_count; /* reference counter */ /* 15 character server name + 0x20 16th byte indicating type = srv */ @@ -622,6 +625,7 @@ struct TCP_Server_Info { #endif wait_queue_head_t response_q; wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ + spinlock_t mid_lock; /* protect mid queue and it's entries */ struct list_head pending_mid_q; bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ @@ -748,6 +752,11 @@ struct TCP_Server_Info { #endif }; +static inline bool is_smb1(struct TCP_Server_Info *server) +{ + return HEADER_PREAMBLE_SIZE(server) != 0; +} + static inline void cifs_server_lock(struct TCP_Server_Info *server) { unsigned int nofs_flag = memalloc_nofs_save(); @@ -1008,6 +1017,7 @@ struct cifs_ses { struct list_head rlist; /* reconnect list */ struct list_head tcon_list; struct cifs_tcon *tcon_ipc; + spinlock_t ses_lock; /* protect anything here that is not protected */ struct mutex session_mutex; struct TCP_Server_Info *server; /* pointer to server info */ int ses_count; /* reference counter */ @@ -1125,42 +1135,6 @@ struct cifs_fattr { u32 cf_cifstag; }; -struct cached_dirent { - struct list_head entry; - char *name; - int namelen; - loff_t pos; - - struct cifs_fattr fattr; -}; - -struct cached_dirents { - bool is_valid:1; - bool is_failed:1; - struct dir_context *ctx; /* - * Only used to make sure we only take entries - * from a single context. Never dereferenced. - */ - struct mutex de_mutex; - int pos; /* Expected ctx->pos */ - struct list_head entries; -}; - -struct cached_fid { - bool is_valid:1; /* Do we have a useable root fid */ - bool file_all_info_is_valid:1; - bool has_lease:1; - unsigned long time; /* jiffies of when lease was taken */ - struct kref refcount; - struct cifs_fid *fid; - struct mutex fid_mutex; - struct cifs_tcon *tcon; - struct dentry *dentry; - struct work_struct lease_break; - struct smb2_file_all_info file_all_info; - struct cached_dirents dirents; -}; - /* * there is one of these for each connection to a resource on a particular * session @@ -1169,6 +1143,7 @@ struct cifs_tcon { struct list_head tcon_list; int tc_count; struct list_head rlist; /* reconnect list */ + spinlock_t tc_lock; /* protect anything here that is not protected */ atomic_t num_local_opens; /* num of all opens including disconnected */ atomic_t num_remote_opens; /* num of all network opens on server */ struct list_head openFileList; @@ -1253,7 +1228,7 @@ struct cifs_tcon { struct fscache_volume *fscache; /* cookie for share */ #endif struct list_head pending_opens; /* list of incomplete opens */ - struct cached_fid crfid; /* Cached root fid */ + struct cached_fid *cfid; /* Cached root fid */ /* BB add field for back pointer to sb struct(s)? */ #ifdef CONFIG_CIFS_DFS_UPCALL struct list_head ulist; /* cache update list */ @@ -1899,33 +1874,78 @@ require use of the stronger protocol */ */ /**************************************************************************** - * Locking notes. All updates to global variables and lists should be - * protected by spinlocks or semaphores. + * Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according + * to the locking order. i.e. if two locks are to be held together, the lock that + * appears higher in this list needs to be taken before the other. + * + * If you hold a lock that is lower in this list, and you need to take a higher lock + * (or if you think that one of the functions that you're calling may need to), first + * drop the lock you hold, pick up the higher lock, then the lower one. This will + * ensure that locks are picked up only in one direction in the below table + * (top to bottom). * - * Spinlocks - * --------- - * GlobalMid_Lock protects: - * list operations on pending_mid_q and oplockQ - * updates to XID counters, multiplex id and SMB sequence numbers - * list operations on global DnotifyReqList - * updates to ses->status and TCP_Server_Info->tcpStatus - * updates to server->CurrentMid - * tcp_ses_lock protects: - * list operations on tcp and SMB session lists - * tcon->open_file_lock protects the list of open files hanging off the tcon - * inode->open_file_lock protects the openFileList hanging off the inode - * cfile->file_info_lock protects counters and fields in cifs file struct - * f_owner.lock protects certain per file struct operations - * mapping->page_lock protects certain per page operations + * Also, if you expect a function to be called with a lock held, explicitly document + * this in the comments on top of your function definition. * - * Note that the cifs_tcon.open_file_lock should be taken before - * not after the cifsInodeInfo.open_file_lock + * And also, try to keep the critical sections (lock hold time) to be as minimal as + * possible. Blocking / calling other functions with a lock held always increase + * the risk of a possible deadlock. * - * Semaphores - * ---------- - * cifsInodeInfo->lock_sem protects: - * the list of locks held by the inode + * Following this rule will avoid unnecessary deadlocks, which can get really hard to + * debug. Also, any new lock that you introduce, please add to this list in the correct + * order. * + * Please populate this list whenever you introduce new locks in your changes. Or in + * case I've missed some existing locks. Please ensure that it's added in the list + * based on the locking order expected. + * + * ===================================================================================== + * Lock Protects Initialization fn + * ===================================================================================== + * vol_list_lock + * vol_info->ctx_lock vol_info->ctx + * cifs_sb_info->tlink_tree_lock cifs_sb_info->tlink_tree cifs_setup_cifs_sb + * TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session + * reconnect_mutex + * TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session + * cifs_ses->session_mutex cifs_ses sesInfoAlloc + * cifs_tcon + * cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc + * cifs_tcon->pending_opens + * cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc + * cifs_tcon->bytes_written + * cifs_tcp_ses_lock cifs_tcp_ses_list sesInfoAlloc + * GlobalMid_Lock GlobalMaxActiveXid init_cifs + * GlobalCurrentXid + * GlobalTotalActiveXid + * TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change) + * TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session + * ->CurrentMid + * (any changes in mid_q_entry fields) + * TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session + * ->credits + * ->echo_credits + * ->oplock_credits + * ->reconnect_instance + * cifs_ses->ses_lock (anything that is not protected by another lock and can change) + * cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc + * ->iface_count + * ->iface_last_update + * cifs_ses->chan_lock cifs_ses->chans + * ->chans_need_reconnect + * ->chans_in_reconnect + * cifs_tcon->tc_lock (anything that is not protected by another lock and can change) + * cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode + * cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc + * cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once + * ->can_cache_brlcks + * cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc + * cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc + * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo + * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo + * ->invalidHandle initiate_cifs_search + * ->oplock_break_cancelled + * cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc ****************************************************************************/ #ifdef DECLARE_GLOBALS_HERE @@ -1941,47 +1961,44 @@ require use of the stronger protocol */ * sessions (and from that the tree connections) can be found * by iterating over cifs_tcp_ses_list */ -GLOBAL_EXTERN struct list_head cifs_tcp_ses_list; +extern struct list_head cifs_tcp_ses_list; /* * This lock protects the cifs_tcp_ses_list, the list of smb sessions per * tcp session, and the list of tcon's per smb session. It also protects - * the reference counters for the server, smb session, and tcon. It also - * protects some fields in the TCP_Server_Info struct such as dstaddr. Finally, - * changes to the tcon->tidStatus should be done while holding this lock. + * the reference counters for the server, smb session, and tcon. * generally the locks should be taken in order tcp_ses_lock before * tcon->open_file_lock and that before file->file_info_lock since the * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file */ -GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock; +extern spinlock_t cifs_tcp_ses_lock; /* * Global transaction id (XID) information */ -GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ -GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ -GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ -GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */ - /* on midQ entries */ +extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ +extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ +extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ +extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ + /* * Global counters, updated atomically */ -GLOBAL_EXTERN atomic_t sesInfoAllocCount; -GLOBAL_EXTERN atomic_t tconInfoAllocCount; -GLOBAL_EXTERN atomic_t tcpSesNextId; -GLOBAL_EXTERN atomic_t tcpSesAllocCount; -GLOBAL_EXTERN atomic_t tcpSesReconnectCount; -GLOBAL_EXTERN atomic_t tconInfoReconnectCount; +extern atomic_t sesInfoAllocCount; +extern atomic_t tconInfoAllocCount; +extern atomic_t tcpSesNextId; +extern atomic_t tcpSesAllocCount; +extern atomic_t tcpSesReconnectCount; +extern atomic_t tconInfoReconnectCount; /* Various Debug counters */ -GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ +extern atomic_t buf_alloc_count; /* current number allocated */ +extern atomic_t small_buf_alloc_count; #ifdef CONFIG_CIFS_STATS2 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ -GLOBAL_EXTERN atomic_t totSmBufAllocCount; +extern atomic_t total_buf_alloc_count; /* total allocated over all time */ +extern atomic_t total_small_buf_alloc_count; extern unsigned int slow_rsp_threshold; /* number of secs before logging */ #endif -GLOBAL_EXTERN atomic_t smBufAllocCount; -GLOBAL_EXTERN atomic_t midCount; /* Misc globals */ extern bool enable_oplocks; /* enable or disable oplocks */ @@ -1998,6 +2015,7 @@ extern unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ extern unsigned int cifs_min_small; /* min size of small buf pool */ extern unsigned int cifs_max_pending; /* MAX requests at once to server*/ extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */ +extern atomic_t mid_count; void cifs_oplock_break(struct work_struct *work); void cifs_queue_oplock_break(struct cifsFileInfo *cfile); @@ -2085,9 +2103,9 @@ static inline bool cifs_is_referral_server(struct cifs_tcon *tcon, return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER)); } -static inline u64 cifs_flock_len(struct file_lock *fl) +static inline u64 cifs_flock_len(const struct file_lock *fl) { - return fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1; + return (u64)fl->fl_end - fl->fl_start + 1; } static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses) diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index d59aebefa71c..3bc94bcc7177 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -78,12 +78,8 @@ extern char *build_wildcard_path_from_dentry(struct dentry *direntry); extern char *cifs_compose_mount_options(const char *sb_mountdata, const char *fullpath, const struct dfs_info3_param *ref, char **devname); -/* extern void renew_parental_timestamps(struct dentry *direntry);*/ -extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, - struct TCP_Server_Info *server); -extern void DeleteMidQEntry(struct mid_q_entry *midEntry); -extern void cifs_delete_mid(struct mid_q_entry *mid); -extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry); +extern void delete_mid(struct mid_q_entry *mid); +extern void release_mid(struct mid_q_entry *mid); extern void cifs_wake_up_task(struct mid_q_entry *mid); extern int cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid); @@ -155,7 +151,7 @@ extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool); extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, struct cifsFileInfo **ret_file); -extern unsigned int smbCalcSize(void *buf, struct TCP_Server_Info *server); +extern unsigned int smbCalcSize(void *buf); extern int decode_negTokenInit(unsigned char *security_blob, int length, struct TCP_Server_Info *server); extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len); @@ -521,6 +517,7 @@ extern int generate_smb30signingkey(struct cifs_ses *ses, extern int generate_smb311signingkey(struct cifs_ses *ses, struct TCP_Server_Info *server); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY extern int CIFSSMBCopy(unsigned int xid, struct cifs_tcon *source_tcon, const char *fromName, @@ -551,6 +548,7 @@ extern int CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nls_codepage, int remap_special_chars); extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, const int netfid, __u64 *pExtAttrBits, __u64 *pMask); +#endif /* CIFS_ALLOW_INSECURE_LEGACY */ extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); extern bool couldbe_mf_symlink(const struct cifs_fattr *fattr); extern int check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, @@ -599,7 +597,6 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *, struct cifs_aio_ctx *cifs_aio_ctx_alloc(void); void cifs_aio_ctx_release(struct kref *refcount); int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw); -void smb2_cached_lease_break(struct work_struct *work); int cifs_alloc_hash(const char *name, struct crypto_shash **shash, struct sdesc **sdesc); diff --git a/fs/cifs/cifsroot.c b/fs/cifs/cifsroot.c index 9e91a5a40aae..56ec1b233f52 100644 --- a/fs/cifs/cifsroot.c +++ b/fs/cifs/cifsroot.c @@ -59,7 +59,7 @@ static int __init cifs_root_setup(char *line) pr_err("Root-CIFS: UNC path too long\n"); return 1; } - strlcpy(root_dev, line, len); + strscpy(root_dev, line, len); srvaddr = parse_srvaddr(&line[2], s); if (*s) { int n = snprintf(root_opts, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 6371b9eebdad..7aa91e272027 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -29,7 +29,6 @@ #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" -#include "smb2proto.h" #include "fscache.h" #include "smbdirect.h" #ifdef CONFIG_CIFS_DFS_UPCALL @@ -62,52 +61,6 @@ static struct { #define CIFS_NUM_PROT 1 #endif /* CIFS_POSIX */ -/* - * Mark as invalid, all open files on tree connections since they - * were closed when session to server was lost. - */ -void -cifs_mark_open_files_invalid(struct cifs_tcon *tcon) -{ - struct cifsFileInfo *open_file = NULL; - struct list_head *tmp; - struct list_head *tmp1; - - /* only send once per connect */ - spin_lock(&cifs_tcp_ses_lock); - if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) { - spin_unlock(&cifs_tcp_ses_lock); - return; - } - tcon->status = TID_IN_FILES_INVALIDATE; - spin_unlock(&cifs_tcp_ses_lock); - - /* list all files open on tree connection and mark them invalid */ - spin_lock(&tcon->open_file_lock); - list_for_each_safe(tmp, tmp1, &tcon->openFileList) { - open_file = list_entry(tmp, struct cifsFileInfo, tlist); - open_file->invalidHandle = true; - open_file->oplock_break_cancelled = true; - } - spin_unlock(&tcon->open_file_lock); - - mutex_lock(&tcon->crfid.fid_mutex); - tcon->crfid.is_valid = false; - /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ - close_cached_dir_lease_locked(&tcon->crfid); - memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid)); - mutex_unlock(&tcon->crfid.fid_mutex); - - spin_lock(&cifs_tcp_ses_lock); - if (tcon->status == TID_IN_FILES_INVALIDATE) - tcon->status = TID_NEED_TCON; - spin_unlock(&cifs_tcp_ses_lock); - - /* - * BB Add call to invalidate_inodes(sb) for all superblocks mounted - * to this tcon. - */ -} /* reconnect the socket, tcon, and smb session if needed */ static int @@ -134,18 +87,18 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) * only tree disconnect, open, and write, (and ulogoff which does not * have tcon) are allowed as we start force umount */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_EXITING) { if (smb_command != SMB_COM_WRITE_ANDX && smb_command != SMB_COM_OPEN_ANDX && smb_command != SMB_COM_TREE_DISCONNECT) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb_command); return -ENODEV; } } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); retries = server->nr_targets; @@ -165,12 +118,12 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) } /* are we still trying to reconnect? */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus != CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); break; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (retries && --retries) continue; @@ -201,13 +154,13 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) * and the server never sends an answer the socket will be closed * and tcpStatus set to reconnect. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = -EHOSTDOWN; goto out; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* * need to prevent multiple threads trying to simultaneously @@ -457,52 +410,6 @@ decode_ext_sec_blob(struct cifs_ses *ses, NEGOTIATE_RSP *pSMBr) return 0; } -int -cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) -{ - bool srv_sign_required = server->sec_mode & server->vals->signing_required; - bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; - bool mnt_sign_enabled = global_secflags & CIFSSEC_MAY_SIGN; - - /* - * Is signing required by mnt options? If not then check - * global_secflags to see if it is there. - */ - if (!mnt_sign_required) - mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == - CIFSSEC_MUST_SIGN); - - /* - * If signing is required then it's automatically enabled too, - * otherwise, check to see if the secflags allow it. - */ - mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : - (global_secflags & CIFSSEC_MAY_SIGN); - - /* If server requires signing, does client allow it? */ - if (srv_sign_required) { - if (!mnt_sign_enabled) { - cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); - return -ENOTSUPP; - } - server->sign = true; - } - - /* If client requires signing, does server allow it? */ - if (mnt_sign_required) { - if (!srv_sign_enabled) { - cifs_dbg(VFS, "Server does not support signing!\n"); - return -ENOTSUPP; - } - server->sign = true; - } - - if (cifs_rdma_enabled(server) && server->sign) - cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); - - return 0; -} - static bool should_set_ext_sec_flag(enum securityEnum sectype) { @@ -684,7 +591,7 @@ cifs_echo_callback(struct mid_q_entry *mid) struct TCP_Server_Info *server = mid->callback_data; struct cifs_credits credits = { .value = 1, .instance = 0 }; - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, CIFS_ECHO_OP); } @@ -1379,184 +1286,6 @@ openRetry: return rc; } -/* - * Discard any remaining data in the current SMB. To do this, we borrow the - * current bigbuf. - */ -int -cifs_discard_remaining_data(struct TCP_Server_Info *server) -{ - unsigned int rfclen = server->pdu_size; - int remaining = rfclen + server->vals->header_preamble_size - - server->total_read; - - while (remaining > 0) { - int length; - - length = cifs_discard_from_socket(server, - min_t(size_t, remaining, - CIFSMaxBufSize + MAX_HEADER_SIZE(server))); - if (length < 0) - return length; - server->total_read += length; - remaining -= length; - } - - return 0; -} - -static int -__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, - bool malformed) -{ - int length; - - length = cifs_discard_remaining_data(server); - dequeue_mid(mid, malformed); - mid->resp_buf = server->smallbuf; - server->smallbuf = NULL; - return length; -} - -static int -cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) -{ - struct cifs_readdata *rdata = mid->callback_data; - - return __cifs_readv_discard(server, mid, rdata->result); -} - -int -cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) -{ - int length, len; - unsigned int data_offset, data_len; - struct cifs_readdata *rdata = mid->callback_data; - char *buf = server->smallbuf; - unsigned int buflen = server->pdu_size + - server->vals->header_preamble_size; - bool use_rdma_mr = false; - - cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", - __func__, mid->mid, rdata->offset, rdata->bytes); - - /* - * read the rest of READ_RSP header (sans Data array), or whatever we - * can if there's not enough data. At this point, we've read down to - * the Mid. - */ - len = min_t(unsigned int, buflen, server->vals->read_rsp_size) - - HEADER_SIZE(server) + 1; - - length = cifs_read_from_socket(server, - buf + HEADER_SIZE(server) - 1, len); - if (length < 0) - return length; - server->total_read += length; - - if (server->ops->is_session_expired && - server->ops->is_session_expired(buf)) { - cifs_reconnect(server, true); - return -1; - } - - if (server->ops->is_status_pending && - server->ops->is_status_pending(buf, server)) { - cifs_discard_remaining_data(server); - return -1; - } - - /* set up first two iov for signature check and to get credits */ - rdata->iov[0].iov_base = buf; - rdata->iov[0].iov_len = server->vals->header_preamble_size; - rdata->iov[1].iov_base = buf + server->vals->header_preamble_size; - rdata->iov[1].iov_len = - server->total_read - server->vals->header_preamble_size; - cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", - rdata->iov[0].iov_base, rdata->iov[0].iov_len); - cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", - rdata->iov[1].iov_base, rdata->iov[1].iov_len); - - /* Was the SMB read successful? */ - rdata->result = server->ops->map_error(buf, false); - if (rdata->result != 0) { - cifs_dbg(FYI, "%s: server returned error %d\n", - __func__, rdata->result); - /* normal error on read response */ - return __cifs_readv_discard(server, mid, false); - } - - /* Is there enough to get to the rest of the READ_RSP header? */ - if (server->total_read < server->vals->read_rsp_size) { - cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n", - __func__, server->total_read, - server->vals->read_rsp_size); - rdata->result = -EIO; - return cifs_readv_discard(server, mid); - } - - data_offset = server->ops->read_data_offset(buf) + - server->vals->header_preamble_size; - if (data_offset < server->total_read) { - /* - * win2k8 sometimes sends an offset of 0 when the read - * is beyond the EOF. Treat it as if the data starts just after - * the header. - */ - cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", - __func__, data_offset); - data_offset = server->total_read; - } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { - /* data_offset is beyond the end of smallbuf */ - cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", - __func__, data_offset); - rdata->result = -EIO; - return cifs_readv_discard(server, mid); - } - - cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n", - __func__, server->total_read, data_offset); - - len = data_offset - server->total_read; - if (len > 0) { - /* read any junk before data into the rest of smallbuf */ - length = cifs_read_from_socket(server, - buf + server->total_read, len); - if (length < 0) - return length; - server->total_read += length; - } - - /* how much data is in the response? */ -#ifdef CONFIG_CIFS_SMB_DIRECT - use_rdma_mr = rdata->mr; -#endif - data_len = server->ops->read_data_length(buf, use_rdma_mr); - if (!use_rdma_mr && (data_offset + data_len > buflen)) { - /* data_len is corrupt -- discard frame */ - rdata->result = -EIO; - return cifs_readv_discard(server, mid); - } - - length = rdata->read_into_pages(server, rdata, data_len); - if (length < 0) - return length; - - server->total_read += length; - - cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", - server->total_read, buflen, data_len); - - /* discard anything left over */ - if (server->total_read < buflen) - return cifs_readv_discard(server, mid); - - dequeue_mid(mid, false); - mid->resp_buf = server->smallbuf; - server->smallbuf = NULL; - return length; -} - static void cifs_readv_callback(struct mid_q_entry *mid) { @@ -1607,7 +1336,7 @@ cifs_readv_callback(struct mid_q_entry *mid) } queue_work(cifsiod_wq, &rdata->work); - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, 0); } @@ -1909,183 +1638,6 @@ CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms, return rc; } -void -cifs_writedata_release(struct kref *refcount) -{ - struct cifs_writedata *wdata = container_of(refcount, - struct cifs_writedata, refcount); -#ifdef CONFIG_CIFS_SMB_DIRECT - if (wdata->mr) { - smbd_deregister_mr(wdata->mr); - wdata->mr = NULL; - } -#endif - - if (wdata->cfile) - cifsFileInfo_put(wdata->cfile); - - kvfree(wdata->pages); - kfree(wdata); -} - -/* - * Write failed with a retryable error. Resend the write request. It's also - * possible that the page was redirtied so re-clean the page. - */ -static void -cifs_writev_requeue(struct cifs_writedata *wdata) -{ - int i, rc = 0; - struct inode *inode = d_inode(wdata->cfile->dentry); - struct TCP_Server_Info *server; - unsigned int rest_len; - - server = tlink_tcon(wdata->cfile->tlink)->ses->server; - i = 0; - rest_len = wdata->bytes; - do { - struct cifs_writedata *wdata2; - unsigned int j, nr_pages, wsize, tailsz, cur_len; - - wsize = server->ops->wp_retry_size(inode); - if (wsize < rest_len) { - nr_pages = wsize / PAGE_SIZE; - if (!nr_pages) { - rc = -ENOTSUPP; - break; - } - cur_len = nr_pages * PAGE_SIZE; - tailsz = PAGE_SIZE; - } else { - nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE); - cur_len = rest_len; - tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE; - } - - wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); - if (!wdata2) { - rc = -ENOMEM; - break; - } - - for (j = 0; j < nr_pages; j++) { - wdata2->pages[j] = wdata->pages[i + j]; - lock_page(wdata2->pages[j]); - clear_page_dirty_for_io(wdata2->pages[j]); - } - - wdata2->sync_mode = wdata->sync_mode; - wdata2->nr_pages = nr_pages; - wdata2->offset = page_offset(wdata2->pages[0]); - wdata2->pagesz = PAGE_SIZE; - wdata2->tailsz = tailsz; - wdata2->bytes = cur_len; - - rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, - &wdata2->cfile); - if (!wdata2->cfile) { - cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n", - rc); - if (!is_retryable_error(rc)) - rc = -EBADF; - } else { - wdata2->pid = wdata2->cfile->pid; - rc = server->ops->async_writev(wdata2, - cifs_writedata_release); - } - - for (j = 0; j < nr_pages; j++) { - unlock_page(wdata2->pages[j]); - if (rc != 0 && !is_retryable_error(rc)) { - SetPageError(wdata2->pages[j]); - end_page_writeback(wdata2->pages[j]); - put_page(wdata2->pages[j]); - } - } - - kref_put(&wdata2->refcount, cifs_writedata_release); - if (rc) { - if (is_retryable_error(rc)) - continue; - i += nr_pages; - break; - } - - rest_len -= cur_len; - i += nr_pages; - } while (i < wdata->nr_pages); - - /* cleanup remaining pages from the original wdata */ - for (; i < wdata->nr_pages; i++) { - SetPageError(wdata->pages[i]); - end_page_writeback(wdata->pages[i]); - put_page(wdata->pages[i]); - } - - if (rc != 0 && !is_retryable_error(rc)) - mapping_set_error(inode->i_mapping, rc); - kref_put(&wdata->refcount, cifs_writedata_release); -} - -void -cifs_writev_complete(struct work_struct *work) -{ - struct cifs_writedata *wdata = container_of(work, - struct cifs_writedata, work); - struct inode *inode = d_inode(wdata->cfile->dentry); - int i = 0; - - if (wdata->result == 0) { - spin_lock(&inode->i_lock); - cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); - spin_unlock(&inode->i_lock); - cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), - wdata->bytes); - } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) - return cifs_writev_requeue(wdata); - - for (i = 0; i < wdata->nr_pages; i++) { - struct page *page = wdata->pages[i]; - if (wdata->result == -EAGAIN) - __set_page_dirty_nobuffers(page); - else if (wdata->result < 0) - SetPageError(page); - end_page_writeback(page); - cifs_readpage_to_fscache(inode, page); - put_page(page); - } - if (wdata->result != -EAGAIN) - mapping_set_error(inode->i_mapping, wdata->result); - kref_put(&wdata->refcount, cifs_writedata_release); -} - -struct cifs_writedata * -cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) -{ - struct page **pages = - kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); - if (pages) - return cifs_writedata_direct_alloc(pages, complete); - - return NULL; -} - -struct cifs_writedata * -cifs_writedata_direct_alloc(struct page **pages, work_func_t complete) -{ - struct cifs_writedata *wdata; - - wdata = kzalloc(sizeof(*wdata), GFP_NOFS); - if (wdata != NULL) { - wdata->pages = pages; - kref_init(&wdata->refcount); - INIT_LIST_HEAD(&wdata->list); - init_completion(&wdata->done); - INIT_WORK(&wdata->work, complete); - } - return wdata; -} - /* * Check the mid_state and signature on received buffer (if any), and queue the * workqueue completion task. @@ -2132,7 +1684,7 @@ cifs_writev_callback(struct mid_q_entry *mid) } queue_work(cifsiod_wq, &wdata->work); - DeleteMidQEntry(mid); + release_mid(mid); add_credits(tcon->ses->server, &credits, 0); } @@ -3660,7 +3212,6 @@ setACLerrorExit: return rc; } -/* BB fix tabs in this function FIXME BB */ int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, const int netfid, __u64 *pExtAttrBits, __u64 *pMask) @@ -3677,7 +3228,7 @@ CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, GetExtAttrRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, - (void **) &pSMBr); + (void **) &pSMBr); if (rc) return rc; @@ -3723,7 +3274,7 @@ GetExtAttrRetry: __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_chattr_info *pfinfo; - /* BB Do we need a cast or hash here ? */ + if (count != 16) { cifs_dbg(FYI, "Invalid size ret in GetExtAttr\n"); rc = -EIO; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fa29c9aae24b..7ae6f2c08153 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -119,10 +119,10 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) goto requeue_resolve; } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr, strlen(ipaddr)); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); kfree(ipaddr); /* rc == 1 means success here */ @@ -205,17 +205,22 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, /* If server is a channel, select the primary channel */ pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&pserver->srv_lock); if (!all_channels) { pserver->tcpStatus = CifsNeedReconnect; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&pserver->srv_lock); return; } + spin_unlock(&pserver->srv_lock); + spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { spin_lock(&ses->chan_lock); - for (i = 0; i < ses->chan_count; i++) + for (i = 0; i < ses->chan_count; i++) { + spin_lock(&ses->chans[i].server->srv_lock); ses->chans[i].server->tcpStatus = CifsNeedReconnect; + spin_unlock(&ses->chans[i].server->srv_lock); + } spin_unlock(&ses->chan_lock); } spin_unlock(&cifs_tcp_ses_lock); @@ -252,17 +257,8 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, spin_lock(&cifs_tcp_ses_lock); list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { /* check if iface is still active */ - if (!cifs_chan_is_iface_active(ses, server)) { - /* - * HACK: drop the lock before calling - * cifs_chan_update_iface to avoid deadlock - */ - ses->ses_count++; - spin_unlock(&cifs_tcp_ses_lock); + if (!cifs_chan_is_iface_active(ses, server)) cifs_chan_update_iface(ses, server); - spin_lock(&cifs_tcp_ses_lock); - ses->ses_count--; - } spin_lock(&ses->chan_lock); if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) @@ -323,7 +319,7 @@ cifs_abort_connection(struct TCP_Server_Info *server) /* mark submitted MIDs for retry and issue callback */ INIT_LIST_HEAD(&retry_list); cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { kref_get(&mid->refcount); if (mid->mid_state == MID_REQUEST_SUBMITTED) @@ -331,14 +327,14 @@ cifs_abort_connection(struct TCP_Server_Info *server) list_move(&mid->qhead, &retry_list); mid->mid_flags |= MID_DELETED; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); cifs_server_unlock(server); cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); list_for_each_entry_safe(mid, nmid, &retry_list, qhead) { list_del_init(&mid->qhead); mid->callback(mid); - cifs_mid_q_entry_release(mid); + release_mid(mid); } if (cifs_rdma_enabled(server)) { @@ -350,11 +346,11 @@ cifs_abort_connection(struct TCP_Server_Info *server) static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); server->nr_targets = num_targets; if (server->tcpStatus == CifsExiting) { /* the demux thread will exit normally next time through the loop */ - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); wake_up(&server->response_q); return false; } @@ -364,7 +360,7 @@ static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num server->hostname); server->tcpStatus = CifsNeedReconnect; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return true; } @@ -414,20 +410,20 @@ static int __cifs_reconnect(struct TCP_Server_Info *server, } else { atomic_inc(&tcpSesReconnectCount); set_credits(server, 1); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_swn_reset_server_dstaddr(server); cifs_server_unlock(server); mod_delayed_work(cifsiod_wq, &server->reconnect, 0); } } while (server->tcpStatus == CifsNeedReconnect); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedNegotiate) mod_delayed_work(cifsiod_wq, &server->echo, 0); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); wake_up(&server->response_q); return rc; @@ -541,10 +537,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) */ atomic_inc(&tcpSesReconnectCount); set_credits(server, 1); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_swn_reset_server_dstaddr(server); cifs_server_unlock(server); mod_delayed_work(cifsiod_wq, &server->reconnect, 0); @@ -556,11 +552,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) dfs_cache_free_tgts(&tl); /* Need to set up echo worker again once connection has been established */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedNegotiate) mod_delayed_work(cifsiod_wq, &server->echo, 0); - - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); wake_up(&server->response_q); return rc; @@ -569,12 +564,12 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) { /* If tcp session is not an dfs connection, then reconnect to last target server */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (!server->is_dfs_conn) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return __cifs_reconnect(server, mark_smb_session); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); mutex_lock(&server->refpath_lock); if (!server->origin_fullpath || !server->leaf_fullpath) { @@ -670,18 +665,18 @@ server_unresponsive(struct TCP_Server_Info *server) * 65s kernel_recvmsg times out, and we see that we haven't gotten * a response in >60s. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if ((server->tcpStatus == CifsGood || server->tcpStatus == CifsNeedNegotiate) && (!server->ops->can_echo || server->ops->can_echo(server)) && time_after(jiffies, server->lstrp + 3 * server->echo_interval)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n", (3 * server->echo_interval) / HZ); cifs_reconnect(server, false); return true; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return false; } @@ -707,9 +702,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) int length = 0; int total_read; - smb_msg->msg_control = NULL; - smb_msg->msg_controllen = 0; - for (total_read = 0; msg_data_left(smb_msg); total_read += length) { try_to_freeze(); @@ -726,18 +718,18 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) else length = sock_recvmsg(server->ssocket, smb_msg, 0); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ESHUTDOWN; } if (server->tcpStatus == CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_reconnect(server, false); return -ECONNABORTED; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (length == -ERESTARTSYS || length == -EAGAIN || @@ -765,7 +757,7 @@ int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read) { - struct msghdr smb_msg; + struct msghdr smb_msg = {}; struct kvec iov = {.iov_base = buf, .iov_len = to_read}; iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read); @@ -775,15 +767,13 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, ssize_t cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read) { - struct msghdr smb_msg; + struct msghdr smb_msg = {}; /* * iov_iter_discard already sets smb_msg.type and count and iov_offset * and cifs_readv_from_socket sets msg_control and msg_controllen * so little to initialize in struct msghdr */ - smb_msg.msg_name = NULL; - smb_msg.msg_namelen = 0; iov_iter_discard(&smb_msg.msg_iter, READ, to_read); return cifs_readv_from_socket(server, &smb_msg); @@ -793,7 +783,7 @@ int cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, unsigned int page_offset, unsigned int to_read) { - struct msghdr smb_msg; + struct msghdr smb_msg = {}; struct bio_vec bv = { .bv_page = page, .bv_len = to_read, .bv_offset = page_offset}; iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read); @@ -849,7 +839,7 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed) #ifdef CONFIG_CIFS_STATS2 mid->when_received = jiffies; #endif - spin_lock(&GlobalMid_Lock); + spin_lock(&mid->server->mid_lock); if (!malformed) mid->mid_state = MID_RESPONSE_RECEIVED; else @@ -859,12 +849,12 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed) * function has finished processing it is a bug. */ if (mid->mid_flags & MID_DELETED) { - spin_unlock(&GlobalMid_Lock); + spin_unlock(&mid->server->mid_lock); pr_warn_once("trying to dequeue a deleted mid\n"); } else { list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; - spin_unlock(&GlobalMid_Lock); + spin_unlock(&mid->server->mid_lock); } } @@ -876,7 +866,7 @@ smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) /* * SMB1 does not use credits. */ - if (server->vals->header_preamble_size) + if (is_smb1(server)) return 0; return le16_to_cpu(shdr->CreditRequest); @@ -903,21 +893,68 @@ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, dequeue_mid(mid, malformed); } +int +cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) +{ + bool srv_sign_required = server->sec_mode & server->vals->signing_required; + bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; + bool mnt_sign_enabled; + + /* + * Is signing required by mnt options? If not then check + * global_secflags to see if it is there. + */ + if (!mnt_sign_required) + mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == + CIFSSEC_MUST_SIGN); + + /* + * If signing is required then it's automatically enabled too, + * otherwise, check to see if the secflags allow it. + */ + mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : + (global_secflags & CIFSSEC_MAY_SIGN); + + /* If server requires signing, does client allow it? */ + if (srv_sign_required) { + if (!mnt_sign_enabled) { + cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); + return -EOPNOTSUPP; + } + server->sign = true; + } + + /* If client requires signing, does server allow it? */ + if (mnt_sign_required) { + if (!srv_sign_enabled) { + cifs_dbg(VFS, "Server does not support signing!\n"); + return -EOPNOTSUPP; + } + server->sign = true; + } + + if (cifs_rdma_enabled(server) && server->sign) + cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); + + return 0; +} + + static void clean_demultiplex_info(struct TCP_Server_Info *server) { int length; /* take it off the list, if it's not already */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); list_del_init(&server->tcp_ses_list); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cancel_delayed_work_sync(&server->echo); cancel_delayed_work_sync(&server->resolve); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); server->tcpStatus = CifsExiting; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); wake_up_all(&server->response_q); /* check if we have blocked requests that need to free */ @@ -948,7 +985,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) struct list_head *tmp, *tmp2; INIT_LIST_HEAD(&dispose_list); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid); @@ -957,7 +994,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) list_move(&mid_entry->qhead, &dispose_list); mid_entry->mid_flags |= MID_DELETED; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); /* now walk dispose list and issue callbacks */ list_for_each_safe(tmp, tmp2, &dispose_list) { @@ -965,7 +1002,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); - cifs_mid_q_entry_release(mid_entry); + release_mid(mid_entry); } /* 1/8th of sec is more than enough time for them to exit */ msleep(125); @@ -1008,7 +1045,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) /* make sure this will fit in a large buffer */ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - - server->vals->header_preamble_size) { + HEADER_PREAMBLE_SIZE(server)) { cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length); cifs_reconnect(server, true); return -ECONNABORTED; @@ -1023,8 +1060,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) /* now read the rest */ length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, - pdu_length - HEADER_SIZE(server) + 1 - + server->vals->header_preamble_size); + pdu_length - MID_HEADER_SIZE(server)); if (length < 0) return length; @@ -1039,19 +1075,18 @@ int cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) { char *buf = server->large_buf ? server->bigbuf : server->smallbuf; - int length; + int rc; /* * We know that we received enough to get to the MID as we * checked the pdu_length earlier. Now check to see - * if the rest of the header is OK. We borrow the length - * var for the rest of the loop to avoid a new stack var. + * if the rest of the header is OK. * * 48 bytes is enough to display the header and a little bit * into the payload for debugging purposes. */ - length = server->ops->check_message(buf, server->total_read, server); - if (length != 0) + rc = server->ops->check_message(buf, server->total_read, server); + if (rc) cifs_dump_mem("Bad SMB: ", buf, min_t(unsigned int, server->total_read, 48)); @@ -1066,9 +1101,9 @@ cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) return -1; if (!mid) - return length; + return rc; - handle_mid(mid, server, buf, length); + handle_mid(mid, server, buf, rc); return 0; } @@ -1081,7 +1116,7 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) /* * SMB1 does not use credits. */ - if (server->vals->header_preamble_size) + if (is_smb1(server)) return; if (shdr->CreditRequest) { @@ -1139,10 +1174,10 @@ cifs_demultiplex_thread(void *p) if (length < 0) continue; - if (server->vals->header_preamble_size == 0) - server->total_read = 0; - else + if (is_smb1(server)) server->total_read = length; + else + server->total_read = 0; /* * The right amount was read from socket - 4 bytes, @@ -1157,8 +1192,7 @@ next_pdu: server->pdu_size = pdu_length; /* make sure we have enough to get to the MID */ - if (server->pdu_size < HEADER_SIZE(server) - 1 - - server->vals->header_preamble_size) { + if (server->pdu_size < MID_HEADER_SIZE(server)) { cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n", server->pdu_size); cifs_reconnect(server, true); @@ -1167,9 +1201,8 @@ next_pdu: /* read down to the MID */ length = cifs_read_from_socket(server, - buf + server->vals->header_preamble_size, - HEADER_SIZE(server) - 1 - - server->vals->header_preamble_size); + buf + HEADER_PREAMBLE_SIZE(server), + MID_HEADER_SIZE(server)); if (length < 0) continue; server->total_read += length; @@ -1205,7 +1238,7 @@ next_pdu: if (length < 0) { for (i = 0; i < num_mids; i++) if (mids[i]) - cifs_mid_q_entry_release(mids[i]); + release_mid(mids[i]); continue; } @@ -1232,7 +1265,7 @@ next_pdu: if (!mids[i]->multiRsp || mids[i]->multiEnd) mids[i]->callback(mids[i]); - cifs_mid_q_entry_release(mids[i]); + release_mid(mids[i]); } else if (server->ops->is_oplock_break && server->ops->is_oplock_break(bufs[i], server)) { @@ -1240,7 +1273,7 @@ next_pdu: cifs_dbg(FYI, "Received oplock break\n"); } else { cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", - atomic_read(&midCount)); + atomic_read(&mid_count)); cifs_dump_mem("Received Data is: ", bufs[i], HEADER_SIZE(server)); smb2_add_credits_from_hdr(bufs[i], server); @@ -1411,6 +1444,7 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) return true; } +/* this function must be called with srv_lock held */ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) { struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; @@ -1471,6 +1505,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { + spin_lock(&server->srv_lock); #ifdef CONFIG_CIFS_DFS_UPCALL /* * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for @@ -1478,15 +1513,20 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx) * shares or even links that may connect to same server but having completely * different failover targets. */ - if (server->is_dfs_conn) + if (server->is_dfs_conn) { + spin_unlock(&server->srv_lock); continue; + } #endif /* * Skip ses channels since they're only handled in lower layers * (e.g. cifs_send_recv). */ - if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) + if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) { + spin_unlock(&server->srv_lock); continue; + } + spin_unlock(&server->srv_lock); ++server->srv_count; spin_unlock(&cifs_tcp_ses_lock); @@ -1534,9 +1574,9 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) else cancel_delayed_work_sync(&server->reconnect); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); server->tcpStatus = CifsExiting; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_crypto_secmech_release(server); @@ -1595,8 +1635,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, if (primary_server) { spin_lock(&cifs_tcp_ses_lock); ++primary_server->srv_count; - tcp_ses->primary_server = primary_server; spin_unlock(&cifs_tcp_ses_lock); + tcp_ses->primary_server = primary_server; } init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); @@ -1612,6 +1652,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, tcp_ses->lstrp = jiffies; tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression); spin_lock_init(&tcp_ses->req_lock); + spin_lock_init(&tcp_ses->srv_lock); + spin_lock_init(&tcp_ses->mid_lock); INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); @@ -1685,9 +1727,9 @@ smbd_connected: * to the struct since the kernel thread not created yet * no need to spinlock this update of tcpStatus */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcp_ses->srv_lock); tcp_ses->tcpStatus = CifsNeedNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcp_ses->srv_lock); if ((ctx->max_credits < 20) || (ctx->max_credits > 60000)) tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE; @@ -1729,6 +1771,7 @@ out_err: return ERR_PTR(rc); } +/* this function must be called with ses_lock held */ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) { if (ctx->sectype != Unspecified && @@ -1864,10 +1907,17 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { - if (ses->ses_status == SES_EXITING) + spin_lock(&ses->ses_lock); + if (ses->ses_status == SES_EXITING) { + spin_unlock(&ses->ses_lock); continue; - if (!match_session(ses, ctx)) + } + if (!match_session(ses, ctx)) { + spin_unlock(&ses->ses_lock); continue; + } + spin_unlock(&ses->ses_lock); + ++ses->ses_count; spin_unlock(&cifs_tcp_ses_lock); return ses; @@ -1882,26 +1932,28 @@ void cifs_put_smb_ses(struct cifs_ses *ses) unsigned int chan_count; struct TCP_Server_Info *server = ses->server; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_EXITING) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return; } + spin_unlock(&ses->ses_lock); cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->treeName : "NONE"); + spin_lock(&cifs_tcp_ses_lock); if (--ses->ses_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } + spin_unlock(&cifs_tcp_ses_lock); /* ses_count can never go negative */ WARN_ON(ses->ses_count < 0); if (ses->ses_status == SES_GOOD) ses->ses_status = SES_EXITING; - spin_unlock(&cifs_tcp_ses_lock); cifs_free_ipc(ses); @@ -1918,7 +1970,6 @@ void cifs_put_smb_ses(struct cifs_ses *ses) list_del_init(&ses->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); - spin_lock(&ses->chan_lock); chan_count = ses->chan_count; /* close any extra channels */ @@ -1934,7 +1985,6 @@ void cifs_put_smb_ses(struct cifs_ses *ses) ses->chans[i].server = NULL; } } - spin_unlock(&ses->chan_lock); sesInfoFree(ses); cifs_put_tcp_session(server, 0); @@ -2238,6 +2288,7 @@ get_ses_fail: return ERR_PTR(rc); } +/* this function must be called with tc_lock held */ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { if (tcon->status == TID_EXITING) @@ -2260,16 +2311,17 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) static struct cifs_tcon * cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) { - struct list_head *tmp; struct cifs_tcon *tcon; spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &ses->tcon_list) { - tcon = list_entry(tmp, struct cifs_tcon, tcon_list); - - if (!match_tcon(tcon, ctx)) + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { + spin_lock(&tcon->tc_lock); + if (!match_tcon(tcon, ctx)) { + spin_unlock(&tcon->tc_lock); continue; + } ++tcon->tc_count; + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); return tcon; } @@ -2293,7 +2345,9 @@ cifs_put_tcon(struct cifs_tcon *tcon) ses = tcon->ses; cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (--tcon->tc_count > 0) { + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); return; } @@ -2302,6 +2356,7 @@ cifs_put_tcon(struct cifs_tcon *tcon) WARN_ON(tcon->tc_count < 0); list_del_init(&tcon->tcon_list); + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); /* cancel polling of interfaces */ @@ -2621,6 +2676,8 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) return 0; if (old->ctx->acdirmax != new->ctx->acdirmax) return 0; + if (old->ctx->closetimeo != new->ctx->closetimeo) + return 0; return 1; } @@ -2646,7 +2703,7 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data) int cifs_match_super(struct super_block *sb, void *data) { - struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data; + struct cifs_mnt_data *mnt_data = data; struct smb3_fs_context *ctx; struct cifs_sb_info *cifs_sb; struct TCP_Server_Info *tcp_srv; @@ -2669,6 +2726,9 @@ cifs_match_super(struct super_block *sb, void *data) ctx = mnt_data->ctx; + spin_lock(&tcp_srv->srv_lock); + spin_lock(&ses->ses_lock); + spin_lock(&tcon->tc_lock); if (!match_server(tcp_srv, ctx) || !match_session(ses, ctx) || !match_tcon(tcon, ctx) || @@ -2679,6 +2739,10 @@ cifs_match_super(struct super_block *sb, void *data) rc = compare_mount_options(sb, mnt_data); out: + spin_unlock(&tcon->tc_lock); + spin_unlock(&ses->ses_lock); + spin_unlock(&tcp_srv->srv_lock); + spin_unlock(&cifs_tcp_ses_lock); cifs_put_tlink(tlink); return rc; @@ -2956,6 +3020,7 @@ ip_connect(struct TCP_Server_Info *server) return generic_ip_connect(server); } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) { @@ -3061,6 +3126,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, } } } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb) { @@ -3177,6 +3243,7 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx) if (tcon->posix_extensions) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* tell server which Unix caps we support */ if (cap_unix(tcon->ses)) { /* @@ -3184,16 +3251,17 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx) * for just this mount. */ reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->ses->server->srv_lock); if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && (le64_to_cpu(tcon->fsUnixInfo.Capability) & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->ses->server->srv_lock); rc = -EACCES; goto out; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->ses->server->srv_lock); } else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ tcon->unix_ext = 0; /* server does not support them */ /* do not care if a following call succeed - informational */ @@ -3275,9 +3343,9 @@ static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx) rc = mount_get_conns(mnt_ctx); if (mnt_ctx->server) { cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&mnt_ctx->server->srv_lock); mnt_ctx->server->is_dfs_conn = true; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&mnt_ctx->server->srv_lock); } return rc; } @@ -3921,7 +3989,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, } bcc_ptr += length + 1; bytes_left -= (length + 1); - strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); + strscpy(tcon->treeName, tree, sizeof(tcon->treeName)); /* mostly informational -- no need to fail on error here */ kfree(tcon->nativeFileSystem); @@ -3992,28 +4060,28 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, return -ENOSYS; /* only send once per connect */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (!server->ops->need_neg(server) || server->tcpStatus != CifsNeedNegotiate) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return 0; } server->tcpStatus = CifsInNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = server->ops->negotiate(xid, ses, server); if (rc == 0) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsInNegotiate) server->tcpStatus = CifsGood; else rc = -EHOSTDOWN; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); } else { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsInNegotiate) server->tcpStatus = CifsNeedNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); } return rc; @@ -4029,7 +4097,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; bool is_binding = false; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (server->dstaddr.ss_family == AF_INET6) scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr); else @@ -4038,7 +4106,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, if (ses->ses_status != SES_GOOD && ses->ses_status != SES_NEW && ses->ses_status != SES_NEED_RECON) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return 0; } @@ -4047,7 +4115,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, if (CIFS_ALL_CHANS_GOOD(ses) || cifs_chan_in_reconnect(ses, server)) { spin_unlock(&ses->chan_lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return 0; } is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses); @@ -4056,7 +4124,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, if (!is_binding) ses->ses_status = SES_IN_SETUP; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); if (!is_binding) { ses->capabilities = server->capabilities; @@ -4080,22 +4148,22 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, if (rc) { cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_IN_SETUP) ses->ses_status = SES_NEED_RECON; spin_lock(&ses->chan_lock); cifs_chan_clear_in_reconnect(ses, server); spin_unlock(&ses->chan_lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); } else { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_IN_SETUP) ses->ses_status = SES_GOOD; spin_lock(&ses->chan_lock); cifs_chan_clear_in_reconnect(ses, server); cifs_chan_clear_need_reconnect(ses, server); spin_unlock(&ses->chan_lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); } return rc; @@ -4169,8 +4237,10 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) goto out; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(ses)) reset_cifs_unix_caps(0, tcon, NULL, ctx); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ out: kfree(ctx->username); @@ -4559,15 +4629,15 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru struct dfs_info3_param ref = {0}; /* only send once per connect */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->ses->ses_status != SES_GOOD || (tcon->status != TID_NEW && tcon->status != TID_NEED_TCON)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); return 0; } tcon->status = TID_IN_TCON; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); if (!tree) { @@ -4606,15 +4676,15 @@ out: cifs_put_tcp_super(sb); if (rc) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_NEED_TCON; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); } else { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_GOOD; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); tcon->need_reconnect = false; } @@ -4627,28 +4697,28 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru const struct smb_version_operations *ops = tcon->ses->server->ops; /* only send once per connect */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->ses->ses_status != SES_GOOD || (tcon->status != TID_NEW && tcon->status != TID_NEED_TCON)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); return 0; } tcon->status = TID_IN_TCON; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc); if (rc) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_NEED_TCON; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); } else { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_GOOD; - spin_unlock(&cifs_tcp_ses_lock); tcon->need_reconnect = false; + spin_unlock(&tcon->tc_lock); } return rc; diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index 34a8f3baed5e..a9b6c3eba6de 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -1526,15 +1526,21 @@ static void refresh_mounts(struct cifs_ses **sessions) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { - if (!server->is_dfs_conn) + spin_lock(&server->srv_lock); + if (!server->is_dfs_conn) { + spin_unlock(&server->srv_lock); continue; + } + spin_unlock(&server->srv_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { + spin_lock(&tcon->tc_lock); if (!tcon->ipc && !tcon->need_reconnect) { tcon->tc_count++; list_add_tail(&tcon->ulist, &tcons); } + spin_unlock(&tcon->tc_lock); } } } diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index ce9b22aecfba..08f7392716e2 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -193,6 +193,7 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, return PTR_ERR(full_path); } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { @@ -261,6 +262,7 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, * rare for path not covered on files) */ } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ desired_access = 0; if (OPEN_FMODE(oflags) & FMODE_READ) @@ -316,6 +318,7 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, goto out; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * If Open reported that we actually created a file then we now have to * set the mode if possible. @@ -357,6 +360,9 @@ cifs_create_get_file_info: rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); else { +#else + { +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* TODO: Add support for calling POSIX query info here, but passing in fid */ rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, xid, fid); @@ -377,7 +383,9 @@ cifs_create_get_file_info: } } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY cifs_create_set_dentry: +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (rc != 0) { cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n", rc); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e64cda7a7610..6f38b134a346 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -26,6 +26,7 @@ #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" +#include "smb2proto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" @@ -33,6 +34,48 @@ #include "smbdirect.h" #include "fs_context.h" #include "cifs_ioctl.h" +#include "cached_dir.h" + +/* + * Mark as invalid, all open files on tree connections since they + * were closed when session to server was lost. + */ +void +cifs_mark_open_files_invalid(struct cifs_tcon *tcon) +{ + struct cifsFileInfo *open_file = NULL; + struct list_head *tmp; + struct list_head *tmp1; + + /* only send once per connect */ + spin_lock(&tcon->ses->ses_lock); + if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) { + spin_unlock(&tcon->ses->ses_lock); + return; + } + tcon->status = TID_IN_FILES_INVALIDATE; + spin_unlock(&tcon->ses->ses_lock); + + /* list all files open on tree connection and mark them invalid */ + spin_lock(&tcon->open_file_lock); + list_for_each_safe(tmp, tmp1, &tcon->openFileList) { + open_file = list_entry(tmp, struct cifsFileInfo, tlist); + open_file->invalidHandle = true; + open_file->oplock_break_cancelled = true; + } + spin_unlock(&tcon->open_file_lock); + + invalidate_all_cached_dirs(tcon); + spin_lock(&tcon->tc_lock); + if (tcon->status == TID_IN_FILES_INVALIDATE) + tcon->status = TID_NEED_TCON; + spin_unlock(&tcon->tc_lock); + + /* + * BB Add call to invalidate_inodes(sb) for all superblocks mounted + * to this tcon. + */ +} static inline int cifs_convert_flags(unsigned int flags) { @@ -52,6 +95,7 @@ static inline int cifs_convert_flags(unsigned int flags) FILE_READ_DATA); } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static u32 cifs_posix_convert_flags(unsigned int flags) { u32 posix_flags = 0; @@ -85,6 +129,7 @@ static u32 cifs_posix_convert_flags(unsigned int flags) return posix_flags; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static inline int cifs_get_disposition(unsigned int flags) { @@ -100,6 +145,7 @@ static inline int cifs_get_disposition(unsigned int flags) return FILE_OPEN; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY int cifs_posix_open(const char *full_path, struct inode **pinode, struct super_block *sb, int mode, unsigned int f_flags, __u32 *poplock, __u16 *pnetfid, unsigned int xid) @@ -161,6 +207,7 @@ posix_open_ret: kfree(presp_data); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, @@ -579,6 +626,7 @@ int cifs_open(struct inode *inode, struct file *file) else oplock = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (!tcon->broken_posix_open && tcon->unix_ext && cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { @@ -603,6 +651,7 @@ int cifs_open(struct inode *inode, struct file *file) * or DFS errors. */ } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &fid); @@ -630,6 +679,7 @@ int cifs_open(struct inode *inode, struct file *file) goto out; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { /* * Time to set mode which we can not set earlier due to @@ -647,6 +697,7 @@ int cifs_open(struct inode *inode, struct file *file) CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid, cfile->pid); } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ use_cache: fscache_use_cookie(cifs_inode_cookie(file_inode(file)), @@ -664,7 +715,9 @@ out: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_push_posix_locks(struct cifsFileInfo *cfile); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* * Try to reacquire byte range locks that were released when session @@ -673,10 +726,12 @@ static int cifs_push_posix_locks(struct cifsFileInfo *cfile); static int cifs_relock_file(struct cifsFileInfo *cfile) { - struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); if (cinode->can_cache_brlcks) { @@ -685,11 +740,13 @@ cifs_relock_file(struct cifsFileInfo *cfile) return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) rc = cifs_push_posix_locks(cfile); else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = tcon->ses->server->ops->push_mand_locks(cfile); up_read(&cinode->lock_sem); @@ -750,6 +807,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) else oplock = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext && cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { @@ -773,6 +831,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) * in the reconnect path it is important to retry hard */ } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ desired_access = cifs_convert_flags(cfile->f_flags); @@ -817,7 +876,9 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) goto reopen_error_exit; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY reopen_success: +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ cfile->invalidHandle = false; mutex_unlock(&cfile->fh_mutex); cinode = CIFS_I(inode); @@ -903,12 +964,12 @@ int cifs_close(struct inode *inode, struct file *file) * So, Increase the ref count to avoid use-after-free. */ if (!mod_delayed_work(deferredclose_wq, - &cfile->deferred, cifs_sb->ctx->acregmax)) + &cfile->deferred, cifs_sb->ctx->closetimeo)) cifsFileInfo_get(cfile); } else { /* Deferred close for files */ queue_delayed_work(deferredclose_wq, - &cfile->deferred, cifs_sb->ctx->acregmax); + &cfile->deferred, cifs_sb->ctx->closetimeo); cfile->deferred_close_scheduled = true; spin_unlock(&cinode->deferred_lock); return 0; @@ -928,9 +989,7 @@ int cifs_close(struct inode *inode, struct file *file) void cifs_reopen_persistent_handles(struct cifs_tcon *tcon) { - struct cifsFileInfo *open_file; - struct list_head *tmp; - struct list_head *tmp1; + struct cifsFileInfo *open_file, *tmp; struct list_head tmp_list; if (!tcon->use_persistent || !tcon->need_reopen_files) @@ -943,8 +1002,7 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon) /* list all files open on tree connection, reopen resilient handles */ spin_lock(&tcon->open_file_lock); - list_for_each(tmp, &tcon->openFileList) { - open_file = list_entry(tmp, struct cifsFileInfo, tlist); + list_for_each_entry(open_file, &tcon->openFileList, tlist) { if (!open_file->invalidHandle) continue; cifsFileInfo_get(open_file); @@ -952,8 +1010,7 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon) } spin_unlock(&tcon->open_file_lock); - list_for_each_safe(tmp, tmp1, &tmp_list) { - open_file = list_entry(tmp, struct cifsFileInfo, rlist); + list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) { if (cifs_reopen_file(open_file, false /* do not flush */)) tcon->need_reopen_files = true; list_del_init(&open_file->rlist); @@ -1196,6 +1253,7 @@ try_again: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * Check if there is another lock that prevents us to set the lock (posix * style). If such a lock exists, update the flock structure with its @@ -1334,6 +1392,7 @@ hash_lockowner(fl_owner_t owner) { return cifs_lock_secret ^ hash32_ptr((const void *)owner); } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ struct lock_to_push { struct list_head llist; @@ -1344,6 +1403,7 @@ struct lock_to_push { __u8 type; }; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_push_posix_locks(struct cifsFileInfo *cfile) { @@ -1431,14 +1491,17 @@ err_out: } goto out; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_push_locks(struct cifsFileInfo *cfile) { - struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* we are going to update can_cache_brlcks here - need a write access */ cifs_down_write(&cinode->lock_sem); @@ -1447,11 +1510,13 @@ cifs_push_locks(struct cifsFileInfo *cfile) return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) rc = cifs_push_posix_locks(cfile); else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = tcon->ses->server->ops->push_mand_locks(cfile); cinode->can_cache_brlcks = false; @@ -1515,6 +1580,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY __u16 netfid = cfile->fid.netfid; if (posix_lck) { @@ -1534,6 +1600,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, posix_lock_type, wait_flag); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock); if (!rc) @@ -1594,6 +1661,7 @@ cifs_free_llist(struct list_head *llist) } } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY int cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, unsigned int xid) @@ -1706,6 +1774,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, kfree(buf); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, @@ -1719,6 +1788,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, struct TCP_Server_Info *server = tcon->ses->server; struct inode *inode = d_inode(cfile->dentry); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (posix_lck) { int posix_lock_type; @@ -1740,7 +1810,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, NULL, posix_lock_type, wait_flag); goto out; } - +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (lock) { struct cifsLockInfo *lock; @@ -1861,9 +1931,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock) rc = -EACCES; xid = get_xid(); - cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n", - cmd, flock->fl_flags, flock->fl_type, - flock->fl_start, flock->fl_end); + cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd, + flock->fl_flags, flock->fl_type, (long long)flock->fl_start, + (long long)flock->fl_end); cfile = (struct cifsFileInfo *)file->private_data; tcon = tlink_tcon(cfile->tlink); @@ -2204,6 +2274,185 @@ cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, return -ENOENT; } +void +cifs_writedata_release(struct kref *refcount) +{ + struct cifs_writedata *wdata = container_of(refcount, + struct cifs_writedata, refcount); +#ifdef CONFIG_CIFS_SMB_DIRECT + if (wdata->mr) { + smbd_deregister_mr(wdata->mr); + wdata->mr = NULL; + } +#endif + + if (wdata->cfile) + cifsFileInfo_put(wdata->cfile); + + kvfree(wdata->pages); + kfree(wdata); +} + +/* + * Write failed with a retryable error. Resend the write request. It's also + * possible that the page was redirtied so re-clean the page. + */ +static void +cifs_writev_requeue(struct cifs_writedata *wdata) +{ + int i, rc = 0; + struct inode *inode = d_inode(wdata->cfile->dentry); + struct TCP_Server_Info *server; + unsigned int rest_len; + + server = tlink_tcon(wdata->cfile->tlink)->ses->server; + i = 0; + rest_len = wdata->bytes; + do { + struct cifs_writedata *wdata2; + unsigned int j, nr_pages, wsize, tailsz, cur_len; + + wsize = server->ops->wp_retry_size(inode); + if (wsize < rest_len) { + nr_pages = wsize / PAGE_SIZE; + if (!nr_pages) { + rc = -EOPNOTSUPP; + break; + } + cur_len = nr_pages * PAGE_SIZE; + tailsz = PAGE_SIZE; + } else { + nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE); + cur_len = rest_len; + tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE; + } + + wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); + if (!wdata2) { + rc = -ENOMEM; + break; + } + + for (j = 0; j < nr_pages; j++) { + wdata2->pages[j] = wdata->pages[i + j]; + lock_page(wdata2->pages[j]); + clear_page_dirty_for_io(wdata2->pages[j]); + } + + wdata2->sync_mode = wdata->sync_mode; + wdata2->nr_pages = nr_pages; + wdata2->offset = page_offset(wdata2->pages[0]); + wdata2->pagesz = PAGE_SIZE; + wdata2->tailsz = tailsz; + wdata2->bytes = cur_len; + + rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, + &wdata2->cfile); + if (!wdata2->cfile) { + cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n", + rc); + if (!is_retryable_error(rc)) + rc = -EBADF; + } else { + wdata2->pid = wdata2->cfile->pid; + rc = server->ops->async_writev(wdata2, + cifs_writedata_release); + } + + for (j = 0; j < nr_pages; j++) { + unlock_page(wdata2->pages[j]); + if (rc != 0 && !is_retryable_error(rc)) { + SetPageError(wdata2->pages[j]); + end_page_writeback(wdata2->pages[j]); + put_page(wdata2->pages[j]); + } + } + + kref_put(&wdata2->refcount, cifs_writedata_release); + if (rc) { + if (is_retryable_error(rc)) + continue; + i += nr_pages; + break; + } + + rest_len -= cur_len; + i += nr_pages; + } while (i < wdata->nr_pages); + + /* cleanup remaining pages from the original wdata */ + for (; i < wdata->nr_pages; i++) { + SetPageError(wdata->pages[i]); + end_page_writeback(wdata->pages[i]); + put_page(wdata->pages[i]); + } + + if (rc != 0 && !is_retryable_error(rc)) + mapping_set_error(inode->i_mapping, rc); + kref_put(&wdata->refcount, cifs_writedata_release); +} + +void +cifs_writev_complete(struct work_struct *work) +{ + struct cifs_writedata *wdata = container_of(work, + struct cifs_writedata, work); + struct inode *inode = d_inode(wdata->cfile->dentry); + int i = 0; + + if (wdata->result == 0) { + spin_lock(&inode->i_lock); + cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); + spin_unlock(&inode->i_lock); + cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), + wdata->bytes); + } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) + return cifs_writev_requeue(wdata); + + for (i = 0; i < wdata->nr_pages; i++) { + struct page *page = wdata->pages[i]; + + if (wdata->result == -EAGAIN) + __set_page_dirty_nobuffers(page); + else if (wdata->result < 0) + SetPageError(page); + end_page_writeback(page); + cifs_readpage_to_fscache(inode, page); + put_page(page); + } + if (wdata->result != -EAGAIN) + mapping_set_error(inode->i_mapping, wdata->result); + kref_put(&wdata->refcount, cifs_writedata_release); +} + +struct cifs_writedata * +cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) +{ + struct page **pages = + kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); + if (pages) + return cifs_writedata_direct_alloc(pages, complete); + + return NULL; +} + +struct cifs_writedata * +cifs_writedata_direct_alloc(struct page **pages, work_func_t complete) +{ + struct cifs_writedata *wdata; + + wdata = kzalloc(sizeof(*wdata), GFP_NOFS); + if (wdata != NULL) { + wdata->pages = pages; + kref_init(&wdata->refcount); + INIT_LIST_HEAD(&wdata->list); + init_completion(&wdata->done); + INIT_WORK(&wdata->work, complete); + } + return wdata; +} + + static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) { struct address_space *mapping = page->mapping; @@ -3022,7 +3271,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, if (ctx->direct_io) { ssize_t result; - result = iov_iter_get_pages_alloc( + result = iov_iter_get_pages_alloc2( from, &pagevec, cur_len, &start); if (result < 0) { cifs_dbg(VFS, @@ -3036,7 +3285,6 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, break; } cur_len = (size_t)result; - iov_iter_advance(from, cur_len); nr_pages = (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE; @@ -3327,6 +3575,9 @@ static ssize_t __cifs_writev( ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from) { + struct file *file = iocb->ki_filp; + + cifs_revalidate_mapping(file->f_inode); return __cifs_writev(iocb, from, true); } @@ -3758,7 +4009,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, if (ctx->direct_io) { ssize_t result; - result = iov_iter_get_pages_alloc( + result = iov_iter_get_pages_alloc2( &direct_iov, &pagevec, cur_len, &start); if (result < 0) { @@ -3774,7 +4025,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, break; } cur_len = (size_t)result; - iov_iter_advance(&direct_iov, cur_len); rdata = cifs_readdata_direct_alloc( pagevec, cifs_uncached_readv_complete); @@ -4004,7 +4254,7 @@ static ssize_t __cifs_readv( if (!is_sync_kiocb(iocb)) ctx->iocb = iocb; - if (iter_is_iovec(to)) + if (user_backed_iter(to)) ctx->should_dirty = true; if (direct) { @@ -4459,10 +4709,11 @@ static void cifs_readahead(struct readahead_control *ractl) * TODO: Send a whole batch of pages to be read * by the cache. */ - page = readahead_page(ractl); - last_batch_size = 1 << thp_order(page); + struct folio *folio = readahead_folio(ractl); + + last_batch_size = folio_nr_pages(folio); if (cifs_readpage_from_fscache(ractl->mapping->host, - page) < 0) { + &folio->page) < 0) { /* * TODO: Deal with cache read failure * here, but for the moment, delegate @@ -4470,7 +4721,7 @@ static void cifs_readahead(struct readahead_control *ractl) */ caching = false; } - unlock_page(page); + folio_unlock(folio); next_cached++; cache_nr_pages--; if (cache_nr_pages == 0) @@ -4811,8 +5062,6 @@ void cifs_oplock_break(struct work_struct *work) struct TCP_Server_Info *server = tcon->ses->server; int rc = 0; bool purge_cache = false; - bool is_deferred = false; - struct cifs_deferred_close *dclose; wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); @@ -4849,22 +5098,6 @@ void cifs_oplock_break(struct work_struct *work) oplock_break_ack: /* - * When oplock break is received and there are no active - * file handles but cached, then schedule deferred close immediately. - * So, new open will not use cached handle. - */ - spin_lock(&CIFS_I(inode)->deferred_lock); - is_deferred = cifs_is_deferred_close(cfile, &dclose); - spin_unlock(&CIFS_I(inode)->deferred_lock); - if (is_deferred && - cfile->deferred_close_scheduled && - delayed_work_pending(&cfile->deferred)) { - if (cancel_delayed_work(&cfile->deferred)) { - _cifsFileInfo_put(cfile, false, false); - goto oplock_break_done; - } - } - /* * releasing stale oplock after recent reconnect of smb session using * a now incorrect file handle is not a data integrity issue but do * not bother sending an oplock release if session to server still is @@ -4875,7 +5108,7 @@ oplock_break_ack: cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } -oplock_break_done: + _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); cifs_done_oplock_break(cinode); } diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c index 8dc0d923ef6a..0e13dec86b25 100644 --- a/fs/cifs/fs_context.c +++ b/fs/cifs/fs_context.c @@ -147,6 +147,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = { fsparam_u32("actimeo", Opt_actimeo), fsparam_u32("acdirmax", Opt_acdirmax), fsparam_u32("acregmax", Opt_acregmax), + fsparam_u32("closetimeo", Opt_closetimeo), fsparam_u32("echo_interval", Opt_echo_interval), fsparam_u32("max_credits", Opt_max_credits), fsparam_u32("handletimeout", Opt_handletimeout), @@ -1074,6 +1075,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, } ctx->acdirmax = ctx->acregmax = HZ * result.uint_32; break; + case Opt_closetimeo: + ctx->closetimeo = HZ * result.uint_32; + if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) { + cifs_errorf(fc, "closetimeo too large\n"); + goto cifs_parse_mount_err; + } + break; case Opt_echo_interval: ctx->echo_interval = result.uint_32; break; @@ -1521,6 +1529,7 @@ int smb3_init_fs_context(struct fs_context *fc) ctx->acregmax = CIFS_DEF_ACTIMEO; ctx->acdirmax = CIFS_DEF_ACTIMEO; + ctx->closetimeo = SMB3_DEF_DCLOSETIMEO; /* Most clients set timeout to 0, allows server to use its default */ ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */ diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h index 5f093cb7e9b9..bbaee4c2281f 100644 --- a/fs/cifs/fs_context.h +++ b/fs/cifs/fs_context.h @@ -125,6 +125,7 @@ enum cifs_param { Opt_actimeo, Opt_acdirmax, Opt_acregmax, + Opt_closetimeo, Opt_echo_interval, Opt_max_credits, Opt_snapshot, @@ -247,6 +248,8 @@ struct smb3_fs_context { /* attribute cache timemout for files and directories in jiffies */ unsigned long acregmax; unsigned long acdirmax; + /* timeout for deferred close of files in jiffies */ + unsigned long closetimeo; struct smb_version_operations *ops; struct smb_version_values *vals; char *prepath; @@ -279,4 +282,9 @@ static inline struct smb3_fs_context *smb3_fc2context(const struct fs_context *f extern int smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx); extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb); +/* + * max deferred close timeout (jiffies) - 2^30 + */ +#define SMB3_MAX_DCLOSETIMEO (1 << 30) +#define SMB3_DEF_DCLOSETIMEO (5 * HZ) /* Can increase later, other clients use larger */ #endif diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h index aa3b941a5555..67b601041f0a 100644 --- a/fs/cifs/fscache.h +++ b/fs/cifs/fscache.h @@ -108,17 +108,6 @@ static inline void cifs_readpage_to_fscache(struct inode *inode, __cifs_readpage_to_fscache(inode, page); } -static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp) -{ - if (PageFsCache(page)) { - if (current_is_kswapd() || !(gfp & __GFP_FS)) - return false; - wait_on_page_fscache(page); - fscache_note_page_release(cifs_inode_cookie(page->mapping->host)); - } - return true; -} - #else /* CONFIG_CIFS_FSCACHE */ static inline void cifs_fscache_fill_coherency(struct inode *inode, @@ -154,11 +143,6 @@ cifs_readpage_from_fscache(struct inode *inode, struct page *page) static inline void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {} -static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp) -{ - return true; /* May release page */ -} - #endif /* CONFIG_CIFS_FSCACHE */ #endif /* _CIFS_FSCACHE_H */ diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 81da81e18553..bac08c20f559 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -25,6 +25,7 @@ #include "fscache.h" #include "fs_context.h" #include "cifs_ioctl.h" +#include "cached_dir.h" static void cifs_set_ops(struct inode *inode) { @@ -339,6 +340,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb) fattr->cf_flags = CIFS_FATTR_DFS_REFERRAL; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_get_file_info_unix(struct file *filp) { @@ -432,6 +434,14 @@ int cifs_get_inode_info_unix(struct inode **pinode, cgiiu_exit: return rc; } +#else +int cifs_get_inode_info_unix(struct inode **pinode, + const unsigned char *full_path, + struct super_block *sb, unsigned int xid) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_sfu_type(struct cifs_fattr *fattr, const char *path, @@ -795,6 +805,7 @@ static __u64 simple_hashstr(const char *str) return hash; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /** * cifs_backup_query_path_info - SMB1 fallback code to get ino * @@ -847,6 +858,7 @@ cifs_backup_query_path_info(int xid, *data = (FILE_ALL_INFO *)info.srch_entries_start; return 0; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static void cifs_set_fattr_ino(int xid, @@ -991,6 +1003,7 @@ cifs_get_inode_info(struct inode **inode, rc = 0; break; case -EACCES: +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * perm errors, try again with backup flags if possible * @@ -1022,6 +1035,9 @@ cifs_get_inode_info(struct inode **inode, /* nothing we can do, bail out */ goto out; } +#else + goto out; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ break; default: cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc); @@ -1037,8 +1053,9 @@ cifs_get_inode_info(struct inode **inode, /* * 4. Tweak fattr based on mount options */ - +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY handle_mnt_opt: +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* query for SFU type info if supported and needed */ if (fattr.cf_cifsattrs & ATTR_SYSTEM && cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { @@ -1223,7 +1240,7 @@ static const struct inode_operations cifs_ipc_inode_ops = { static int cifs_find_inode(struct inode *inode, void *opaque) { - struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; + struct cifs_fattr *fattr = opaque; /* don't match inode with different uniqueid */ if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) @@ -1247,7 +1264,7 @@ cifs_find_inode(struct inode *inode, void *opaque) static int cifs_init_inode(struct inode *inode, void *opaque) { - struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; + struct cifs_fattr *fattr = opaque; CIFS_I(inode)->uniqueid = fattr->cf_uniqueid; CIFS_I(inode)->createtime = fattr->cf_createtime; @@ -1435,6 +1452,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid, return server->ops->set_file_info(inode, full_path, &info_buf, xid); } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * Open the given file (if it isn't already), set the DELETE_ON_CLOSE bit * and rename it to a random name that hopefully won't conflict with @@ -1565,6 +1583,7 @@ undo_setattr: goto out_close; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* copied from fs/nfs/dir.c with small changes */ static void @@ -1627,6 +1646,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) } cifs_close_deferred_file_under_dentry(tcon, full_path); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = CIFSPOSIXDelFile(xid, tcon, full_path, @@ -1636,6 +1656,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) if ((rc == 0) || (rc == -ENOENT)) goto psx_del_no_retry; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ retry_std_delete: if (!server->ops->unlink) { @@ -1714,9 +1735,11 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode, if (tcon->posix_extensions) rc = smb311_posix_get_inode_info(&inode, full_path, parent->i_sb, xid); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY else if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, parent->i_sb, xid); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ else rc = cifs_get_inode_info(&inode, full_path, NULL, parent->i_sb, xid, NULL); @@ -1746,6 +1769,7 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode, if (parent->i_mode & S_ISGID) mode |= S_ISGID; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext) { struct cifs_unix_set_info_args args = { .mode = mode, @@ -1768,6 +1792,9 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode, cifs_sb->local_nls, cifs_remap(cifs_sb)); } else { +#else + { +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ struct TCP_Server_Info *server = tcon->ses->server; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo) @@ -1788,6 +1815,7 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode, return 0; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode, const char *full_path, struct cifs_sb_info *cifs_sb, @@ -1850,6 +1878,7 @@ posix_mkdir_get_info: xid); goto posix_mkdir_out; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode, struct dentry *direntry, umode_t mode) @@ -1892,6 +1921,7 @@ int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode, goto mkdir_out; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb, @@ -1899,6 +1929,7 @@ int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode, if (rc != -EOPNOTSUPP) goto mkdir_out; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (!server->ops->mkdir) { rc = -ENOSYS; @@ -2015,9 +2046,12 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, struct tcon_link *tlink; struct cifs_tcon *tcon; struct TCP_Server_Info *server; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_fid fid; struct cifs_open_parms oparms; - int oplock, rc; + int oplock; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ + int rc; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) @@ -2043,6 +2077,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, if (server->vals->protocol_id != 0) goto do_rename_exit; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* open-file renames don't work across directories */ if (to_dentry->d_parent != from_dentry->d_parent) goto do_rename_exit; @@ -2064,6 +2099,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, cifs_sb->local_nls, cifs_remap(cifs_sb)); CIFSSMBClose(xid, tcon, fid.netfid); } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ do_rename_exit: if (rc == 0) d_move(from_dentry, to_dentry); @@ -2081,11 +2117,13 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; - FILE_UNIX_BASIC_INFO *info_buf_source = NULL; - FILE_UNIX_BASIC_INFO *info_buf_target; unsigned int xid; int rc, tmprc; int retry_count = 0; + FILE_UNIX_BASIC_INFO *info_buf_source = NULL; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + FILE_UNIX_BASIC_INFO *info_buf_target; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (flags & ~RENAME_NOREPLACE) return -EINVAL; @@ -2139,6 +2177,7 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, if (flags & RENAME_NOREPLACE) goto cifs_rename_exit; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (rc == -EEXIST && tcon->unix_ext) { /* * Are src and dst hardlinks of same inode? We can only tell @@ -2178,6 +2217,8 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, */ unlink_target: +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ + /* Try unlinking the target dentry if it's not negative */ if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) { if (d_is_dir(target_dentry)) @@ -2337,14 +2378,18 @@ int cifs_revalidate_file_attr(struct file *filp) { int rc = 0; struct dentry *dentry = file_dentry(filp); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (!cifs_dentry_needs_reval(dentry)) return rc; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tlink_tcon(cfile->tlink)->unix_ext) rc = cifs_get_file_info_unix(filp); else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = cifs_get_file_info(filp); return rc; @@ -2653,6 +2698,7 @@ set_size_out: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) { @@ -2800,6 +2846,7 @@ out: free_xid(xid); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) @@ -2995,16 +3042,20 @@ cifs_setattr(struct user_namespace *mnt_userns, struct dentry *direntry, struct iattr *attrs) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); - struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); int rc, retries = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; do { +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (pTcon->unix_ext) rc = cifs_setattr_unix(direntry, attrs); else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = cifs_setattr_nounix(direntry, attrs); retries++; } while (is_retryable_error(rc) && retries < 2); diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 0359b604bdbc..b6e6e5d6c8dd 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -333,6 +333,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) tcon = tlink_tcon(pSMBFile->tlink); caps = le64_to_cpu(tcon->fsUnixInfo.Capability); #ifdef CONFIG_CIFS_POSIX +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (CIFS_UNIX_EXTATTR_CAP & caps) { __u64 ExtAttrMask = 0; rc = CIFSGetExtAttr(xid, tcon, @@ -345,6 +346,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) if (rc != EOPNOTSUPP) break; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ #endif /* CONFIG_CIFS_POSIX */ rc = 0; if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) { diff --git a/fs/cifs/link.c b/fs/cifs/link.c index bbdf3281559c..6803cb27eecc 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -286,6 +286,7 @@ out: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * SMB 1.0 Protocol specific functions */ @@ -368,6 +369,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, CIFSSMBClose(xid, tcon, fid.netfid); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* * SMB 2.1/SMB3 Protocol specific functions @@ -532,11 +534,15 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, goto cifs_hl_exit; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext) rc = CIFSUnixCreateHardLink(xid, tcon, from_name, to_name, cifs_sb->local_nls, cifs_remap(cifs_sb)); else { +#else + { +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ server = tcon->ses->server; if (!server->ops->create_hardlink) { rc = -ENOSYS; @@ -704,10 +710,12 @@ cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode, /* BB what if DFS and this volume is on different share? BB */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY else if (pTcon->unix_ext) rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, cifs_sb->local_nls, cifs_remap(cifs_sb)); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* else rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName, cifs_sb_target->local_nls); */ diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 0e84e6fcf8ab..87f60f736731 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -23,6 +23,7 @@ #include "dns_resolve.h" #endif #include "fs_context.h" +#include "cached_dir.h" extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_req_poolp; @@ -69,6 +70,7 @@ sesInfoAlloc(void) ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); if (ret_buf) { atomic_inc(&sesInfoAllocCount); + spin_lock_init(&ret_buf->ses_lock); ret_buf->ses_status = SES_NEW; ++ret_buf->ses_count; INIT_LIST_HEAD(&ret_buf->smb_ses_list); @@ -115,21 +117,19 @@ tconInfoAlloc(void) ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL); if (!ret_buf) return NULL; - ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL); - if (!ret_buf->crfid.fid) { + ret_buf->cfid = init_cached_dir(); + if (!ret_buf->cfid) { kfree(ret_buf); return NULL; } - INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries); - mutex_init(&ret_buf->crfid.dirents.de_mutex); atomic_inc(&tconInfoAllocCount); ret_buf->status = TID_NEW; ++ret_buf->tc_count; + spin_lock_init(&ret_buf->tc_lock); INIT_LIST_HEAD(&ret_buf->openFileList); INIT_LIST_HEAD(&ret_buf->tcon_list); spin_lock_init(&ret_buf->open_file_lock); - mutex_init(&ret_buf->crfid.fid_mutex); spin_lock_init(&ret_buf->stat_lock); atomic_set(&ret_buf->num_local_opens, 0); atomic_set(&ret_buf->num_remote_opens, 0); @@ -138,17 +138,17 @@ tconInfoAlloc(void) } void -tconInfoFree(struct cifs_tcon *buf_to_free) +tconInfoFree(struct cifs_tcon *tcon) { - if (buf_to_free == NULL) { + if (tcon == NULL) { cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n"); return; } + free_cached_dir(tcon); atomic_dec(&tconInfoAllocCount); - kfree(buf_to_free->nativeFileSystem); - kfree_sensitive(buf_to_free->password); - kfree(buf_to_free->crfid.fid); - kfree(buf_to_free); + kfree(tcon->nativeFileSystem); + kfree_sensitive(tcon->password); + kfree(tcon); } struct smb_hdr * @@ -172,9 +172,9 @@ cifs_buf_get(void) /* clear the first few header bytes */ /* for most paths, more is cleared in header_assemble */ memset(ret_buf, 0, buf_size + 3); - atomic_inc(&bufAllocCount); + atomic_inc(&buf_alloc_count); #ifdef CONFIG_CIFS_STATS2 - atomic_inc(&totBufAllocCount); + atomic_inc(&total_buf_alloc_count); #endif /* CONFIG_CIFS_STATS2 */ return ret_buf; @@ -189,7 +189,7 @@ cifs_buf_release(void *buf_to_free) } mempool_free(buf_to_free, cifs_req_poolp); - atomic_dec(&bufAllocCount); + atomic_dec(&buf_alloc_count); return; } @@ -205,9 +205,9 @@ cifs_small_buf_get(void) ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS); /* No need to clear memory here, cleared in header assemble */ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ - atomic_inc(&smBufAllocCount); + atomic_inc(&small_buf_alloc_count); #ifdef CONFIG_CIFS_STATS2 - atomic_inc(&totSmBufAllocCount); + atomic_inc(&total_small_buf_alloc_count); #endif /* CONFIG_CIFS_STATS2 */ return ret_buf; @@ -223,7 +223,7 @@ cifs_small_buf_release(void *buf_to_free) } mempool_free(buf_to_free, cifs_sm_req_poolp); - atomic_dec(&smBufAllocCount); + atomic_dec(&small_buf_alloc_count); return; } @@ -354,7 +354,7 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server) /* otherwise, there is enough to get to the BCC */ if (check_smb_hdr(smb)) return -EIO; - clc_len = smbCalcSize(smb, server); + clc_len = smbCalcSize(smb); if (4 + rfclen != total_read) { cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n", @@ -400,7 +400,6 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) { struct smb_hdr *buf = (struct smb_hdr *)buffer; struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; - struct list_head *tmp, *tmp1, *tmp2; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifsInodeInfo *pCifsInode; @@ -467,18 +466,14 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) /* look up tcon based on tid & uid */ spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &srv->smb_ses_list) { - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); - list_for_each(tmp1, &ses->tcon_list) { - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); + list_for_each_entry(ses, &srv->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->tid != buf->Tid) continue; cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); spin_lock(&tcon->open_file_lock); - list_for_each(tmp2, &tcon->openFileList) { - netfile = list_entry(tmp2, struct cifsFileInfo, - tlist); + list_for_each_entry(netfile, &tcon->openFileList, tlist) { if (pSMB->Fid != netfile->fid.netfid) continue; @@ -742,6 +737,8 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode) list_for_each_entry(cfile, &cifs_inode->openFileList, flist) { if (delayed_work_pending(&cfile->deferred)) { if (cancel_delayed_work(&cfile->deferred)) { + cifs_del_deferred_close(cfile); + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); if (tmp_list == NULL) break; @@ -763,16 +760,16 @@ void cifs_close_all_deferred_files(struct cifs_tcon *tcon) { struct cifsFileInfo *cfile; - struct list_head *tmp; struct file_list *tmp_list, *tmp_next_list; struct list_head file_head; INIT_LIST_HEAD(&file_head); spin_lock(&tcon->open_file_lock); - list_for_each(tmp, &tcon->openFileList) { - cfile = list_entry(tmp, struct cifsFileInfo, tlist); + list_for_each_entry(cfile, &tcon->openFileList, tlist) { if (delayed_work_pending(&cfile->deferred)) { if (cancel_delayed_work(&cfile->deferred)) { + cifs_del_deferred_close(cfile); + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); if (tmp_list == NULL) break; @@ -793,7 +790,6 @@ void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) { struct cifsFileInfo *cfile; - struct list_head *tmp; struct file_list *tmp_list, *tmp_next_list; struct list_head file_head; void *page; @@ -802,12 +798,13 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) INIT_LIST_HEAD(&file_head); page = alloc_dentry_path(); spin_lock(&tcon->open_file_lock); - list_for_each(tmp, &tcon->openFileList) { - cfile = list_entry(tmp, struct cifsFileInfo, tlist); + list_for_each_entry(cfile, &tcon->openFileList, tlist) { full_path = build_path_from_dentry(cfile->dentry, page); if (strstr(full_path, path)) { if (delayed_work_pending(&cfile->deferred)) { if (cancel_delayed_work(&cfile->deferred)) { + cifs_del_deferred_close(cfile); + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); if (tmp_list == NULL) break; @@ -1029,7 +1026,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) saved_len = count; while (count && npages < max_pages) { - rc = iov_iter_get_pages(iter, pages, count, max_pages, &start); + rc = iov_iter_get_pages2(iter, pages, count, max_pages, &start); if (rc < 0) { cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc); break; @@ -1041,7 +1038,6 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) break; } - iov_iter_advance(iter, rc); count -= rc; rc += start; cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE); diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 235aa1b395eb..1b52e6ac431c 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -909,9 +909,9 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr) * portion, the number of word parameters and the data portion of the message */ unsigned int -smbCalcSize(void *buf, struct TCP_Server_Info *server) +smbCalcSize(void *buf) { - struct smb_hdr *ptr = (struct smb_hdr *)buf; + struct smb_hdr *ptr = buf; return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) + 2 /* size of the bcc field */ + get_bcc(ptr)); } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 384cabdf47ca..8e060c00c969 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -21,6 +21,7 @@ #include "cifsfs.h" #include "smb2proto.h" #include "fs_context.h" +#include "cached_dir.h" /* * To be safe - for UCS to UTF-8 with strings loaded with the rare long @@ -805,8 +806,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, end_of_smb = cfile->srch_inf.ntwrk_buf_start + server->ops->calc_smb_size( - cfile->srch_inf.ntwrk_buf_start, - server); + cfile->srch_inf.ntwrk_buf_start); cur_ent = cfile->srch_inf.srch_entries_start; first_entry_in_buffer = cfile->srch_inf.index_of_last_entry @@ -1071,7 +1071,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx) tcon = tlink_tcon(cifsFile->tlink); } - rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid); + rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid); cifs_put_tlink(tlink); if (rc) goto cache_not_found; @@ -1142,7 +1142,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx) tcon = tlink_tcon(cifsFile->tlink); rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path, ¤t_entry, &num_to_fill); - open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid); + open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid); if (rc) { cifs_dbg(FYI, "fce error %d\n", rc); goto rddir2_exit; @@ -1160,8 +1160,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx) cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n", num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); max_len = tcon->ses->server->ops->calc_smb_size( - cifsFile->srch_inf.ntwrk_buf_start, - tcon->ses->server); + cifsFile->srch_inf.ntwrk_buf_start); end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index b85718f32b53..3af3b05b6c74 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -474,6 +474,14 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, out: if (rc && chan->server) { + /* + * we should avoid race with these delayed works before we + * remove this channel + */ + cancel_delayed_work_sync(&chan->server->echo); + cancel_delayed_work_sync(&chan->server->resolve); + cancel_delayed_work_sync(&chan->server->reconnect); + spin_lock(&ses->chan_lock); /* we rely on all bits beyond chan_count to be clear */ cifs_chan_clear_need_reconnect(ses, chan->server); @@ -484,14 +492,14 @@ out: */ WARN_ON(ses->chan_count < 1); spin_unlock(&ses->chan_lock); - } - if (rc && chan->server) cifs_put_tcp_session(chan->server, 0); + } return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, struct TCP_Server_Info *server, SESSION_SETUP_ANDX *pSMB) @@ -584,7 +592,6 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, *pbcc_area = bcc_ptr; } - static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, const struct nls_table *nls_cp) { @@ -746,6 +753,7 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft, for it later, but it is not very important */ cifs_dbg(FYI, "ascii: bytes left %d\n", bleft); } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses) @@ -1163,6 +1171,7 @@ struct sess_data { struct kvec iov[3]; }; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int sess_alloc_buffer(struct sess_data *sess_data, int wct) { @@ -1839,3 +1848,4 @@ out: kfree(sess_data); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 2e20ee4dab7b..f36b2d2d40ca 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -92,17 +92,17 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer) struct smb_hdr *buf = (struct smb_hdr *)buffer; struct mid_q_entry *mid; - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_entry(mid, &server->pending_mid_q, qhead) { if (compare_mid(mid->mid, buf) && mid->mid_state == MID_REQUEST_SUBMITTED && le16_to_cpu(mid->command) == buf->Command) { kref_get(&mid->refcount); - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return mid; } } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return NULL; } @@ -166,7 +166,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server) __u16 last_mid, cur_mid; bool collision, reconnect = false; - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); /* mid is 16 bit only for CIFS/SMB */ cur_mid = (__u16)((server->CurrentMid) & 0xffff); @@ -225,7 +225,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server) } cur_mid++; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); if (reconnect) { cifs_signal_cifsd_for_reconnect(server, false); diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index f5dcc4940b6d..9dfd2dd612c2 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -61,7 +61,6 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, nr_ioctl_req.Reserved = 0; rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, - true /* is_fsctl */, (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), CIFSMaxBufSize, NULL, NULL /* no return info */); if (rc == -EOPNOTSUPP) { diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 8571a459c710..b83f59051b26 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -23,6 +23,7 @@ #include "smb2glob.h" #include "smb2pdu.h" #include "smb2proto.h" +#include "cached_dir.h" static void free_set_inf_compound(struct smb_rqst *rqst) @@ -515,16 +516,16 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, if (strcmp(full_path, "")) rc = -ENOENT; else - rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid); + rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid); /* If it is a root and its handle is cached then use it */ if (!rc) { - if (tcon->crfid.file_all_info_is_valid) { + if (cfid->file_all_info_is_valid) { move_smb2_info_to_cifs(data, - &tcon->crfid.file_all_info); + &cfid->file_all_info); } else { rc = SMB2_query_info(xid, tcon, - cfid->fid->persistent_fid, - cfid->fid->volatile_fid, smb2_data); + cfid->fid.persistent_fid, + cfid->fid.volatile_fid, smb2_data); if (!rc) move_smb2_info_to_cifs(data, smb2_data); } diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 17813c3d0c6e..d73e5672aac4 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -16,6 +16,7 @@ #include "smb2status.h" #include "smb2glob.h" #include "nterr.h" +#include "cached_dir.h" static int check_smb2_hdr(struct smb2_hdr *shdr, __u64 mid) @@ -132,15 +133,15 @@ static __u32 get_neg_ctxt_len(struct smb2_hdr *hdr, __u32 len, } int -smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) +smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server) { struct smb2_hdr *shdr = (struct smb2_hdr *)buf; struct smb2_pdu *pdu = (struct smb2_pdu *)shdr; - __u64 mid; - __u32 clc_len; /* calculated length */ - int command; - int pdu_size = sizeof(struct smb2_pdu); int hdr_size = sizeof(struct smb2_hdr); + int pdu_size = sizeof(struct smb2_pdu); + int command; + __u32 calc_len; /* calculated length */ + __u64 mid; /* * Add function to do table lookup of StructureSize by command @@ -154,7 +155,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) /* decrypt frame now that it is completely read in */ spin_lock(&cifs_tcp_ses_lock); - list_for_each_entry(iter, &srvr->smb_ses_list, smb_ses_list) { + list_for_each_entry(iter, &server->smb_ses_list, smb_ses_list) { if (iter->Suid == le64_to_cpu(thdr->SessionId)) { ses = iter; break; @@ -221,30 +222,33 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) } } - clc_len = smb2_calc_size(buf, srvr); + calc_len = smb2_calc_size(buf); + + /* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might + * be 0, and not a real miscalculation */ + if (command == SMB2_IOCTL_HE && calc_len == 0) + return 0; - if (shdr->Command == SMB2_NEGOTIATE) - clc_len += get_neg_ctxt_len(shdr, len, clc_len); + if (command == SMB2_NEGOTIATE_HE) + calc_len += get_neg_ctxt_len(shdr, len, calc_len); - if (len != clc_len) { - cifs_dbg(FYI, "Calculated size %u length %u mismatch mid %llu\n", - clc_len, len, mid); + if (len != calc_len) { /* create failed on symlink */ if (command == SMB2_CREATE_HE && shdr->Status == STATUS_STOPPED_ON_SYMLINK) return 0; /* Windows 7 server returns 24 bytes more */ - if (clc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE) + if (calc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE) return 0; /* server can return one byte more due to implied bcc[0] */ - if (clc_len == len + 1) + if (calc_len == len + 1) return 0; /* * Some windows servers (win2016) will pad also the final * PDU in a compound to 8 bytes. */ - if (((clc_len + 7) & ~7) == len) + if (((calc_len + 7) & ~7) == len) return 0; /* @@ -253,12 +257,18 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) * SMB2/SMB3 frame length (header + smb2 response specific data) * Some windows servers also pad up to 8 bytes when compounding. */ - if (clc_len < len) + if (calc_len < len) return 0; - pr_warn_once( - "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n", - len, clc_len, command, mid); + /* Only log a message if len was really miscalculated */ + if (unlikely(cifsFYI)) + cifs_dbg(FYI, "Server response too short: calculated " + "length %u doesn't match read length %u (cmd=%d, mid=%llu)\n", + calc_len, len, command, mid); + else + pr_warn("Server response too short: calculated length " + "%u doesn't match read length %u (cmd=%d, mid=%llu)\n", + calc_len, len, command, mid); return 1; } @@ -400,9 +410,9 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr) * portion, the number of word parameters and the data portion of the message. */ unsigned int -smb2_calc_size(void *buf, struct TCP_Server_Info *srvr) +smb2_calc_size(void *buf) { - struct smb2_pdu *pdu = (struct smb2_pdu *)buf; + struct smb2_pdu *pdu = buf; struct smb2_hdr *shdr = &pdu->hdr; int offset; /* the offset from the beginning of SMB to data area */ int data_length; /* the length of the variable length data area */ @@ -639,15 +649,7 @@ smb2_is_valid_lease_break(char *buffer) } spin_unlock(&tcon->open_file_lock); - if (tcon->crfid.is_valid && - !memcmp(rsp->LeaseKey, - tcon->crfid.fid->lease_key, - SMB2_LEASE_KEY_SIZE)) { - tcon->crfid.time = 0; - INIT_WORK(&tcon->crfid.lease_break, - smb2_cached_lease_break); - queue_work(cifsiod_wq, - &tcon->crfid.lease_break); + if (cached_dir_lease_break(tcon, rsp->LeaseKey)) { spin_unlock(&cifs_tcp_ses_lock); return true; } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 8802995b2d3d..421be43af425 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -27,6 +27,7 @@ #include "smbdirect.h" #include "fscache.h" #include "fs_context.h" +#include "cached_dir.h" /* Change credits for different ops and return the total number of credits */ static int @@ -126,13 +127,13 @@ smb2_add_credits(struct TCP_Server_Info *server, optype, scredits, add); } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect || server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); switch (rc) { case -1: @@ -218,12 +219,12 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, spin_lock(&server->req_lock); } else { spin_unlock(&server->req_lock); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); spin_lock(&server->req_lock); scredits = server->credits; @@ -319,19 +320,19 @@ smb2_get_next_mid(struct TCP_Server_Info *server) { __u64 mid; /* for SMB2 we need the current value */ - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); mid = server->CurrentMid++; - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return mid; } static void smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) { - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); if (server->CurrentMid >= val) server->CurrentMid -= val; - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); } static struct mid_q_entry * @@ -346,7 +347,7 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue) return NULL; } - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_entry(mid, &server->pending_mid_q, qhead) { if ((mid->mid == wire_mid) && (mid->mid_state == MID_REQUEST_SUBMITTED) && @@ -356,11 +357,11 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue) list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return mid; } } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return NULL; } @@ -386,7 +387,7 @@ smb2_dump_detail(void *buf, struct TCP_Server_Info *server) shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId, shdr->Id.SyncId.ProcessId); cifs_server_dbg(VFS, "smb buf %p len %u\n", buf, - server->ops->calc_smb_size(buf, server)); + server->ops->calc_smb_size(buf)); #endif } @@ -403,9 +404,9 @@ smb2_negotiate(const unsigned int xid, { int rc; - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); server->CurrentMid = 0; - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); rc = SMB2_negotiate(xid, ses, server); /* BB we probably don't need to retry with modern servers */ if (rc == -EAGAIN) @@ -680,7 +681,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) struct cifs_ses *ses = tcon->ses; rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, - FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, + FSCTL_QUERY_NETWORK_INTERFACE_INFO, NULL /* no data input */, 0 /* no data input */, CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); if (rc == -EOPNOTSUPP) { @@ -702,300 +703,6 @@ out: } static void -smb2_close_cached_fid(struct kref *ref) -{ - struct cached_fid *cfid = container_of(ref, struct cached_fid, - refcount); - struct cached_dirent *dirent, *q; - - if (cfid->is_valid) { - cifs_dbg(FYI, "clear cached root file handle\n"); - SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid, - cfid->fid->volatile_fid); - } - - /* - * We only check validity above to send SMB2_close, - * but we still need to invalidate these entries - * when this function is called - */ - cfid->is_valid = false; - cfid->file_all_info_is_valid = false; - cfid->has_lease = false; - if (cfid->dentry) { - dput(cfid->dentry); - cfid->dentry = NULL; - } - /* - * Delete all cached dirent names - */ - mutex_lock(&cfid->dirents.de_mutex); - list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { - list_del(&dirent->entry); - kfree(dirent->name); - kfree(dirent); - } - cfid->dirents.is_valid = 0; - cfid->dirents.is_failed = 0; - cfid->dirents.ctx = NULL; - cfid->dirents.pos = 0; - mutex_unlock(&cfid->dirents.de_mutex); - -} - -void close_cached_dir(struct cached_fid *cfid) -{ - mutex_lock(&cfid->fid_mutex); - kref_put(&cfid->refcount, smb2_close_cached_fid); - mutex_unlock(&cfid->fid_mutex); -} - -void close_cached_dir_lease_locked(struct cached_fid *cfid) -{ - if (cfid->has_lease) { - cfid->has_lease = false; - kref_put(&cfid->refcount, smb2_close_cached_fid); - } -} - -void close_cached_dir_lease(struct cached_fid *cfid) -{ - mutex_lock(&cfid->fid_mutex); - close_cached_dir_lease_locked(cfid); - mutex_unlock(&cfid->fid_mutex); -} - -void -smb2_cached_lease_break(struct work_struct *work) -{ - struct cached_fid *cfid = container_of(work, - struct cached_fid, lease_break); - - close_cached_dir_lease(cfid); -} - -/* - * Open the and cache a directory handle. - * Only supported for the root handle. - * If error then *cfid is not initialized. - */ -int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, - const char *path, - struct cifs_sb_info *cifs_sb, - struct cached_fid **cfid) -{ - struct cifs_ses *ses; - struct TCP_Server_Info *server; - struct cifs_open_parms oparms; - struct smb2_create_rsp *o_rsp = NULL; - struct smb2_query_info_rsp *qi_rsp = NULL; - int resp_buftype[2]; - struct smb_rqst rqst[2]; - struct kvec rsp_iov[2]; - struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; - struct kvec qi_iov[1]; - int rc, flags = 0; - __le16 utf16_path = 0; /* Null - since an open of top of share */ - u8 oplock = SMB2_OPLOCK_LEVEL_II; - struct cifs_fid *pfid; - struct dentry *dentry; - - if (tcon == NULL || tcon->nohandlecache || - is_smb1_server(tcon->ses->server)) - return -ENOTSUPP; - - ses = tcon->ses; - server = ses->server; - - if (cifs_sb->root == NULL) - return -ENOENT; - - if (strlen(path)) - return -ENOENT; - - dentry = cifs_sb->root; - - mutex_lock(&tcon->crfid.fid_mutex); - if (tcon->crfid.is_valid) { - cifs_dbg(FYI, "found a cached root file handle\n"); - *cfid = &tcon->crfid; - kref_get(&tcon->crfid.refcount); - mutex_unlock(&tcon->crfid.fid_mutex); - return 0; - } - - /* - * We do not hold the lock for the open because in case - * SMB2_open needs to reconnect, it will end up calling - * cifs_mark_open_files_invalid() which takes the lock again - * thus causing a deadlock - */ - - mutex_unlock(&tcon->crfid.fid_mutex); - - if (smb3_encryption_required(tcon)) - flags |= CIFS_TRANSFORM_REQ; - - if (!server->ops->new_lease_key) - return -EIO; - - pfid = tcon->crfid.fid; - server->ops->new_lease_key(pfid); - - memset(rqst, 0, sizeof(rqst)); - resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; - memset(rsp_iov, 0, sizeof(rsp_iov)); - - /* Open */ - memset(&open_iov, 0, sizeof(open_iov)); - rqst[0].rq_iov = open_iov; - rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; - - oparms.tcon = tcon; - oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE); - oparms.desired_access = FILE_READ_ATTRIBUTES; - oparms.disposition = FILE_OPEN; - oparms.fid = pfid; - oparms.reconnect = false; - - rc = SMB2_open_init(tcon, server, - &rqst[0], &oplock, &oparms, &utf16_path); - if (rc) - goto oshr_free; - smb2_set_next_command(tcon, &rqst[0]); - - memset(&qi_iov, 0, sizeof(qi_iov)); - rqst[1].rq_iov = qi_iov; - rqst[1].rq_nvec = 1; - - rc = SMB2_query_info_init(tcon, server, - &rqst[1], COMPOUND_FID, - COMPOUND_FID, FILE_ALL_INFORMATION, - SMB2_O_INFO_FILE, 0, - sizeof(struct smb2_file_all_info) + - PATH_MAX * 2, 0, NULL); - if (rc) - goto oshr_free; - - smb2_set_related(&rqst[1]); - - rc = compound_send_recv(xid, ses, server, - flags, 2, rqst, - resp_buftype, rsp_iov); - mutex_lock(&tcon->crfid.fid_mutex); - - /* - * Now we need to check again as the cached root might have - * been successfully re-opened from a concurrent process - */ - - if (tcon->crfid.is_valid) { - /* work was already done */ - - /* stash fids for close() later */ - struct cifs_fid fid = { - .persistent_fid = pfid->persistent_fid, - .volatile_fid = pfid->volatile_fid, - }; - - /* - * caller expects this func to set the fid in crfid to valid - * cached root, so increment the refcount. - */ - kref_get(&tcon->crfid.refcount); - - mutex_unlock(&tcon->crfid.fid_mutex); - - if (rc == 0) { - /* close extra handle outside of crit sec */ - SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); - } - rc = 0; - goto oshr_free; - } - - /* Cached root is still invalid, continue normaly */ - - if (rc) { - if (rc == -EREMCHG) { - tcon->need_reconnect = true; - pr_warn_once("server share %s deleted\n", - tcon->treeName); - } - goto oshr_exit; - } - - atomic_inc(&tcon->num_remote_opens); - - o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; - oparms.fid->persistent_fid = o_rsp->PersistentFileId; - oparms.fid->volatile_fid = o_rsp->VolatileFileId; -#ifdef CONFIG_CIFS_DEBUG2 - oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); -#endif /* CIFS_DEBUG2 */ - - tcon->crfid.tcon = tcon; - tcon->crfid.is_valid = true; - tcon->crfid.dentry = dentry; - dget(dentry); - kref_init(&tcon->crfid.refcount); - - /* BB TBD check to see if oplock level check can be removed below */ - if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) { - /* - * See commit 2f94a3125b87. Increment the refcount when we - * get a lease for root, release it if lease break occurs - */ - kref_get(&tcon->crfid.refcount); - tcon->crfid.has_lease = true; - smb2_parse_contexts(server, o_rsp, - &oparms.fid->epoch, - oparms.fid->lease_key, &oplock, - NULL, NULL); - } else - goto oshr_exit; - - qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; - if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) - goto oshr_exit; - if (!smb2_validate_and_copy_iov( - le16_to_cpu(qi_rsp->OutputBufferOffset), - sizeof(struct smb2_file_all_info), - &rsp_iov[1], sizeof(struct smb2_file_all_info), - (char *)&tcon->crfid.file_all_info)) - tcon->crfid.file_all_info_is_valid = true; - tcon->crfid.time = jiffies; - - -oshr_exit: - mutex_unlock(&tcon->crfid.fid_mutex); -oshr_free: - SMB2_open_free(&rqst[0]); - SMB2_query_info_free(&rqst[1]); - free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); - free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); - if (rc == 0) - *cfid = &tcon->crfid; - return rc; -} - -int open_cached_dir_by_dentry(struct cifs_tcon *tcon, - struct dentry *dentry, - struct cached_fid **cfid) -{ - mutex_lock(&tcon->crfid.fid_mutex); - if (tcon->crfid.dentry == dentry) { - cifs_dbg(FYI, "found a cached root file handle by dentry\n"); - *cfid = &tcon->crfid; - kref_get(&tcon->crfid.refcount); - mutex_unlock(&tcon->crfid.fid_mutex); - return 0; - } - mutex_unlock(&tcon->crfid.fid_mutex); - return -ENOENT; -} - -static void smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb) { @@ -1013,9 +720,9 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, oparms.fid = &fid; oparms.reconnect = false; - rc = open_cached_dir(xid, tcon, "", cifs_sb, &cfid); + rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid); if (rc == 0) - memcpy(&fid, cfid->fid, sizeof(struct cifs_fid)); + memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid)); else rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL, NULL); @@ -1076,9 +783,16 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_open_parms oparms; struct cifs_fid fid; + struct cached_fid *cfid; - if ((*full_path == 0) && tcon->crfid.is_valid) - return 0; + rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid); + if (!rc) { + if (cfid->is_valid) { + close_cached_dir(cfid); + return 0; + } + close_cached_dir(cfid); + } utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); if (!utf16_path) @@ -1145,9 +859,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, size_t name_len, value_len, user_name_len; while (src_size > 0) { - name = &src->ea_data[0]; name_len = (size_t)src->ea_name_length; - value = &src->ea_data[src->ea_name_length + 1]; value_len = (size_t)le16_to_cpu(src->ea_value_length); if (name_len == 0) @@ -1159,6 +871,9 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, goto out; } + name = &src->ea_data[0]; + value = &src->ea_data[src->ea_name_length + 1]; + if (ea_name) { if (ea_name_len == name_len && memcmp(ea_name, name, name_len) == 0) { @@ -1608,9 +1323,8 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, struct resume_key_req *res_key; rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, - FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, - NULL, 0 /* no input */, CIFSMaxBufSize, - (char **)&res_key, &ret_data_len); + FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */, + CIFSMaxBufSize, (char **)&res_key, &ret_data_len); if (rc == -EOPNOTSUPP) { pr_warn_once("Server share %s does not support copy range\n", tcon->treeName); @@ -1752,7 +1466,7 @@ smb2_ioctl_query_info(const unsigned int xid, rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, - qi.info_type, true, buffer, qi.output_buffer_length, + qi.info_type, buffer, qi.output_buffer_length, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); free_req1_func = SMB2_ioctl_free; @@ -1886,17 +1600,8 @@ smb2_copychunk_range(const unsigned int xid, int chunks_copied = 0; bool chunk_sizes_updated = false; ssize_t bytes_written, total_bytes_written = 0; - struct inode *inode; pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); - - /* - * We need to flush all unwritten data before we can send the - * copychunk ioctl to the server. - */ - inode = d_inode(trgtfile->dentry); - filemap_write_and_wait(inode->i_mapping); - if (pcchunk == NULL) return -ENOMEM; @@ -1928,9 +1633,8 @@ smb2_copychunk_range(const unsigned int xid, retbuf = NULL; rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, - true /* is_fsctl */, (char *)pcchunk, - sizeof(struct copychunk_ioctl), CIFSMaxBufSize, - (char **)&retbuf, &ret_data_len); + (char *)pcchunk, sizeof(struct copychunk_ioctl), + CIFSMaxBufSize, (char **)&retbuf, &ret_data_len); if (rc == 0) { if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) { @@ -2090,7 +1794,6 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_SPARSE, - true /* is_fctl */, &setsparse, 1, CIFSMaxBufSize, NULL, NULL); if (rc) { tcon->broken_sparse_sup = true; @@ -2173,7 +1876,6 @@ smb2_duplicate_extents(const unsigned int xid, rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_DUPLICATE_EXTENTS_TO_FILE, - true /* is_fsctl */, (char *)&dup_ext_buf, sizeof(struct duplicate_extents_to_file), CIFSMaxBufSize, NULL, @@ -2208,7 +1910,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_INTEGRITY_INFORMATION, - true /* is_fsctl */, (char *)&integr_info, sizeof(struct fsctl_set_integrity_information_req), CIFSMaxBufSize, NULL, @@ -2261,7 +1962,6 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SRV_ENUMERATE_SNAPSHOTS, - true /* is_fsctl */, NULL, 0 /* no input data */, max_response_size, (char **)&retbuf, &ret_data_len); @@ -2574,7 +2274,6 @@ static void smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server) { struct smb2_hdr *shdr = (struct smb2_hdr *)buf; - struct list_head *tmp, *tmp1; struct cifs_ses *ses; struct cifs_tcon *tcon; @@ -2582,12 +2281,12 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server) return; spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &server->smb_ses_list) { - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); - list_for_each(tmp1, &ses->tcon_list) { - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) { + spin_lock(&tcon->tc_lock); tcon->need_reconnect = true; + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); pr_warn_once("Server share %s deleted.\n", tcon->treeName); @@ -2723,8 +2422,12 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; memset(rsp_iov, 0, sizeof(rsp_iov)); + /* + * We can only call this for things we know are directories. + */ if (!strcmp(path, "")) - open_cached_dir(xid, tcon, path, cifs_sb, &cfid); /* cfid null if open dir failed */ + open_cached_dir(xid, tcon, path, cifs_sb, false, + &cfid); /* cfid null if open dir failed */ memset(&open_iov, 0, sizeof(open_iov)); rqst[0].rq_iov = open_iov; @@ -2750,8 +2453,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, if (cfid) { rc = SMB2_query_info_init(tcon, server, &rqst[1], - cfid->fid->persistent_fid, - cfid->fid->volatile_fid, + cfid->fid.persistent_fid, + cfid->fid.volatile_fid, class, type, 0, output_len, 0, NULL); @@ -2981,7 +2684,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, do { rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_DFS_GET_REFERRALS, - true /* is_fsctl */, (char *)dfs_req, dfs_req_size, CIFSMaxBufSize, (char **)&dfs_rsp, &dfs_rsp_size); if (!is_retryable_error(rc)) @@ -3188,8 +2890,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl_init(tcon, server, &rqst[1], fid.persistent_fid, - fid.volatile_fid, FSCTL_GET_REPARSE_POINT, - true /* is_fctl */, NULL, 0, + fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); @@ -3369,8 +3070,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, - COMPOUND_FID, FSCTL_GET_REPARSE_POINT, - true /* is_fctl */, NULL, 0, + COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); @@ -3598,26 +3298,43 @@ get_smb2_acl(struct cifs_sb_info *cifs_sb, return pntsd; } +static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon, + loff_t offset, loff_t len, unsigned int xid) +{ + struct cifsFileInfo *cfile = file->private_data; + struct file_zero_data_information fsctl_buf; + + cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); + + fsctl_buf.FileOffset = cpu_to_le64(offset); + fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); + + return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, + cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, + (char *)&fsctl_buf, + sizeof(struct file_zero_data_information), + 0, NULL, NULL); +} + static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, loff_t offset, loff_t len, bool keep_size) { struct cifs_ses *ses = tcon->ses; - struct inode *inode; - struct cifsInodeInfo *cifsi; + struct inode *inode = file_inode(file); + struct cifsInodeInfo *cifsi = CIFS_I(inode); struct cifsFileInfo *cfile = file->private_data; - struct file_zero_data_information fsctl_buf; long rc; unsigned int xid; __le64 eof; xid = get_xid(); - inode = d_inode(cfile->dentry); - cifsi = CIFS_I(inode); - trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid, ses->Suid, offset, len); + inode_lock(inode); + filemap_invalidate_lock(inode->i_mapping); + /* * We zero the range through ioctl, so we need remove the page caches * first, otherwise the data may be inconsistent with the server. @@ -3625,26 +3342,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, truncate_pagecache_range(inode, offset, offset + len - 1); /* if file not oplocked can't be sure whether asking to extend size */ - if (!CIFS_CACHE_READ(cifsi)) - if (keep_size == false) { - rc = -EOPNOTSUPP; - trace_smb3_zero_err(xid, cfile->fid.persistent_fid, - tcon->tid, ses->Suid, offset, len, rc); - free_xid(xid); - return rc; - } - - cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); - - fsctl_buf.FileOffset = cpu_to_le64(offset); - fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); + rc = -EOPNOTSUPP; + if (keep_size == false && !CIFS_CACHE_READ(cifsi)) + goto zero_range_exit; - rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true, - (char *)&fsctl_buf, - sizeof(struct file_zero_data_information), - 0, NULL, NULL); - if (rc) + rc = smb3_zero_data(file, tcon, offset, len, xid); + if (rc < 0) goto zero_range_exit; /* @@ -3657,6 +3360,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, } zero_range_exit: + filemap_invalidate_unlock(inode->i_mapping); + inode_unlock(inode); free_xid(xid); if (rc) trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid, @@ -3670,7 +3375,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, loff_t offset, loff_t len) { - struct inode *inode; + struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; struct file_zero_data_information fsctl_buf; long rc; @@ -3679,14 +3384,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, xid = get_xid(); - inode = d_inode(cfile->dentry); - + inode_lock(inode); /* Need to make file sparse, if not already, before freeing range. */ /* Consider adding equivalent for compressed since it could also work */ if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) { rc = -EOPNOTSUPP; - free_xid(xid); - return rc; + goto out; } filemap_invalidate_lock(inode->i_mapping); @@ -3703,11 +3406,13 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, - true /* is_fctl */, (char *)&fsctl_buf, + (char *)&fsctl_buf, sizeof(struct file_zero_data_information), CIFSMaxBufSize, NULL, NULL); - free_xid(xid); filemap_invalidate_unlock(inode->i_mapping); +out: + inode_unlock(inode); + free_xid(xid); return rc; } @@ -3763,7 +3468,7 @@ static int smb3_simple_fallocate_range(unsigned int xid, in_data.length = cpu_to_le64(len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -3964,39 +3669,50 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon, { int rc; unsigned int xid; - struct inode *inode; + struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; - struct cifsInodeInfo *cifsi; + struct cifsInodeInfo *cifsi = CIFS_I(inode); __le64 eof; + loff_t old_eof; xid = get_xid(); - inode = d_inode(cfile->dentry); - cifsi = CIFS_I(inode); + inode_lock(inode); - if (off >= i_size_read(inode) || - off + len >= i_size_read(inode)) { + old_eof = i_size_read(inode); + if ((off >= old_eof) || + off + len >= old_eof) { rc = -EINVAL; goto out; } + filemap_invalidate_lock(inode->i_mapping); + rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1); + if (rc < 0) + goto out_2; + + truncate_pagecache_range(inode, off, old_eof); + rc = smb2_copychunk_range(xid, cfile, cfile, off + len, - i_size_read(inode) - off - len, off); + old_eof - off - len, off); if (rc < 0) - goto out; + goto out_2; - eof = cpu_to_le64(i_size_read(inode) - len); + eof = cpu_to_le64(old_eof - len); rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, cfile->pid, &eof); if (rc < 0) - goto out; + goto out_2; rc = 0; cifsi->server_eof = i_size_read(inode) - len; truncate_setsize(inode, cifsi->server_eof); fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof); +out_2: + filemap_invalidate_unlock(inode->i_mapping); out: + inode_unlock(inode); free_xid(xid); return rc; } @@ -4007,34 +3723,47 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon, int rc; unsigned int xid; struct cifsFileInfo *cfile = file->private_data; + struct inode *inode = file_inode(file); __le64 eof; - __u64 count; + __u64 count, old_eof; xid = get_xid(); - if (off >= i_size_read(file->f_inode)) { + inode_lock(inode); + + old_eof = i_size_read(inode); + if (off >= old_eof) { rc = -EINVAL; goto out; } - count = i_size_read(file->f_inode) - off; - eof = cpu_to_le64(i_size_read(file->f_inode) + len); + count = old_eof - off; + eof = cpu_to_le64(old_eof + len); + + filemap_invalidate_lock(inode->i_mapping); + rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof + len - 1); + if (rc < 0) + goto out_2; + truncate_pagecache_range(inode, off, old_eof); rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, cfile->pid, &eof); if (rc < 0) - goto out; + goto out_2; rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len); if (rc < 0) - goto out; + goto out_2; - rc = smb3_zero_range(file, tcon, off, len, 1); + rc = smb3_zero_data(file, tcon, off, len, xid); if (rc < 0) - goto out; + goto out_2; rc = 0; +out_2: + filemap_invalidate_unlock(inode->i_mapping); out: + inode_unlock(inode); free_xid(xid); return rc; } @@ -4084,7 +3813,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -4144,7 +3873,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -4563,9 +4292,11 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { if (ses->Suid == ses_id) { + spin_lock(&ses->ses_lock); ses_enc_key = enc ? ses->smb3encryptionkey : ses->smb3decryptionkey; memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); + spin_unlock(&ses->ses_lock); spin_unlock(&cifs_tcp_ses_lock); return 0; } @@ -5080,23 +4811,24 @@ static void smb2_decrypt_offload(struct work_struct *work) mid->callback(mid); } else { - spin_lock(&cifs_tcp_ses_lock); - spin_lock(&GlobalMid_Lock); + spin_lock(&dw->server->srv_lock); if (dw->server->tcpStatus == CifsNeedReconnect) { + spin_lock(&dw->server->mid_lock); mid->mid_state = MID_RETRY_NEEDED; - spin_unlock(&GlobalMid_Lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&dw->server->mid_lock); + spin_unlock(&dw->server->srv_lock); mid->callback(mid); } else { + spin_lock(&dw->server->mid_lock); mid->mid_state = MID_REQUEST_SUBMITTED; mid->mid_flags &= ~(MID_DELETED); list_add_tail(&mid->qhead, &dw->server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&dw->server->mid_lock); + spin_unlock(&dw->server->srv_lock); } } - cifs_mid_q_entry_release(mid); + release_mid(mid); } free_pages: diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 12b4dddaedb0..6352ab32c7e7 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -39,6 +39,7 @@ #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs_cache.h" #endif +#include "cached_dir.h" /* * The following table defines the expected "StructureSize" of SMB2 requests @@ -162,7 +163,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) return 0; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_EXITING) { /* * only tree disconnect, open, and write, @@ -172,13 +173,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, if ((smb2_command != SMB2_WRITE) && (smb2_command != SMB2_CREATE) && (smb2_command != SMB2_TREE_DISCONNECT)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb2_command); return -ENODEV; } } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) || (!tcon->ses->server) || !server) return -EIO; @@ -217,12 +218,12 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, } /* are we still trying to reconnect? */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus != CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); break; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (retries && --retries) continue; @@ -256,13 +257,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, * and the server never sends an answer the socket will be closed * and tcpStatus set to reconnect. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = -EHOSTDOWN; goto out; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* * need to prevent multiple threads trying to simultaneously @@ -354,7 +355,7 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf, unsigned int *total_len) { - struct smb2_pdu *spdu = (struct smb2_pdu *)buf; + struct smb2_pdu *spdu = buf; /* lookup word count ie StructureSize from table */ __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; @@ -571,10 +572,6 @@ assemble_neg_contexts(struct smb2_negotiate_req *req, *total_len += ctxt_len; pneg_ctxt += ctxt_len; - build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); - *total_len += sizeof(struct smb2_posix_neg_context); - pneg_ctxt += sizeof(struct smb2_posix_neg_context); - /* * secondary channels don't have the hostname field populated * use the hostname field in the primary channel instead @@ -586,9 +583,14 @@ assemble_neg_contexts(struct smb2_negotiate_req *req, hostname); *total_len += ctxt_len; pneg_ctxt += ctxt_len; - neg_context_count = 4; - } else /* second channels do not have a hostname */ neg_context_count = 3; + } else + neg_context_count = 2; + + build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); + *total_len += sizeof(struct smb2_posix_neg_context); + pneg_ctxt += sizeof(struct smb2_posix_neg_context); + neg_context_count++; if (server->compress_algorithm) { build_compression_ctxt((struct smb2_compression_capabilities_context *) @@ -963,16 +965,17 @@ SMB2_negotiate(const unsigned int xid, } else if (rc != 0) goto neg_exit; + rc = -EIO; if (strcmp(server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { cifs_server_dbg(VFS, "SMB2 dialect returned but not requested\n"); - return -EIO; + goto neg_exit; } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { cifs_server_dbg(VFS, "SMB2.1 dialect returned but not requested\n"); - return -EIO; + goto neg_exit; } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { /* ops set to 3.0 by default for default so update */ server->ops = &smb311_operations; @@ -983,7 +986,7 @@ SMB2_negotiate(const unsigned int xid, if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { cifs_server_dbg(VFS, "SMB2 dialect returned but not requested\n"); - return -EIO; + goto neg_exit; } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { /* ops set to 3.0 by default for default so update */ server->ops = &smb21_operations; @@ -997,7 +1000,7 @@ SMB2_negotiate(const unsigned int xid, /* if requested single dialect ensure returned dialect matched */ cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n", le16_to_cpu(rsp->DialectRevision)); - return -EIO; + goto neg_exit; } cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); @@ -1015,9 +1018,10 @@ SMB2_negotiate(const unsigned int xid, else { cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n", le16_to_cpu(rsp->DialectRevision)); - rc = -EIO; goto neg_exit; } + + rc = 0; server->dialect = le16_to_cpu(rsp->DialectRevision); /* @@ -1171,7 +1175,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) } rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, - FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, + FSCTL_VALIDATE_NEGOTIATE_INFO, (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, (char **)&pneg_rsp, &rsplen); if (rc == -EOPNOTSUPP) { @@ -1926,7 +1930,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId); - strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); + strscpy(tcon->treeName, tree, sizeof(tcon->treeName)); if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) @@ -1977,7 +1981,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) } spin_unlock(&ses->chan_lock); - close_cached_dir_lease(&tcon->crfid); + invalidate_all_cached_dirs(tcon); rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server, (void **) &req, @@ -2570,19 +2574,15 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len, path_len = UniStrnlen((wchar_t *)path, PATH_MAX); - /* - * make room for one path separator between the treename and - * path - */ - *out_len = treename_len + 1 + path_len; + /* make room for one path separator only if @path isn't empty */ + *out_len = treename_len + (path[0] ? 1 : 0) + path_len; /* - * final path needs to be null-terminated UTF16 with a - * size aligned to 8 + * final path needs to be 8-byte aligned as specified in + * MS-SMB2 2.2.13 SMB2 CREATE Request. */ - - *out_size = roundup((*out_len+1)*2, 8); - *out_path = kzalloc(*out_size, GFP_KERNEL); + *out_size = roundup(*out_len * sizeof(__le16), 8); + *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL); if (!*out_path) return -ENOMEM; @@ -3054,7 +3054,7 @@ int SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + char *in_data, u32 indatalen, __u32 max_response_size) { struct smb2_ioctl_req *req; @@ -3129,10 +3129,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), SMB2_MAX_BUFFER_SIZE)); - if (is_fsctl) - req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); - else - req->Flags = 0; + /* always an FSCTL (for now) */ + req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) @@ -3159,9 +3157,9 @@ SMB2_ioctl_free(struct smb_rqst *rqst) */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, - u64 volatile_fid, u32 opcode, bool is_fsctl, - char *in_data, u32 indatalen, u32 max_out_data_len, - char **out_data, u32 *plen /* returned data len */) + u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, + u32 max_out_data_len, char **out_data, + u32 *plen /* returned data len */) { struct smb_rqst rqst; struct smb2_ioctl_rsp *rsp = NULL; @@ -3203,7 +3201,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, rc = SMB2_ioctl_init(tcon, server, &rqst, persistent_fid, volatile_fid, opcode, - is_fsctl, in_data, indatalen, max_out_data_len); + in_data, indatalen, max_out_data_len); if (rc) goto ioctl_exit; @@ -3295,7 +3293,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, - FSCTL_SET_COMPRESSION, true /* is_fsctl */, + FSCTL_SET_COMPRESSION, (char *)&fsctl_input /* data input */, 2 /* in data len */, CIFSMaxBufSize /* max out data */, &ret_data /* out data */, NULL); @@ -3775,7 +3773,7 @@ smb2_echo_callback(struct mid_q_entry *mid) credits.instance = server->reconnect_instance; } - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, CIFS_ECHO_OP); } @@ -3910,15 +3908,15 @@ SMB2_echo(struct TCP_Server_Info *server) cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->ops->need_neg && server->ops->need_neg(server)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* No need to send echo on newly established connections */ mod_delayed_work(cifsiod_wq, &server->reconnect, 0); return rc; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, (void **)&req, &total_len); @@ -4200,7 +4198,7 @@ smb2_readv_callback(struct mid_q_entry *mid) rdata->offset, rdata->got_bytes); queue_work(cifsiod_wq, &rdata->work); - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, 0); } @@ -4439,7 +4437,7 @@ smb2_writev_callback(struct mid_q_entry *mid) wdata->offset, wdata->bytes); queue_work(cifsiod_wq, &wdata->work); - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, 0); } diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index a69f1eed1cfe..3f740f24b96a 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -23,7 +23,7 @@ struct smb_rqst; extern int map_smb2_to_linux_error(char *buf, bool log_err); extern int smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *server); -extern unsigned int smb2_calc_size(void *buf, struct TCP_Server_Info *server); +extern unsigned int smb2_calc_size(void *buf); extern char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr); extern __le16 *cifs_convert_path_to_utf16(const char *from, @@ -54,16 +54,6 @@ extern bool smb2_is_valid_oplock_break(char *buffer, extern int smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid); -extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, - const char *path, - struct cifs_sb_info *cifs_sb, - struct cached_fid **cfid); -extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon, - struct dentry *dentry, - struct cached_fid **cfid); -extern void close_cached_dir(struct cached_fid *cfid); -extern void close_cached_dir_lease(struct cached_fid *cfid); -extern void close_cached_dir_lease_locked(struct cached_fid *cfid); extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst, struct smb2_file_all_info *src); extern int smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon, @@ -147,13 +137,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, extern void SMB2_open_free(struct smb_rqst *rqst); extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen, + char *in_data, u32 indatalen, u32 maxoutlen, char **out_data, u32 *plen /* returned data len */); extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + char *in_data, u32 indatalen, __u32 max_response_size); extern void SMB2_ioctl_free(struct smb_rqst *rqst); extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 55e79f6ee78d..1a5fc3314dbf 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -640,13 +640,13 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server) if (!is_signed) return 0; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->ops->need_neg && server->ops->need_neg(server)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return 0; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (!is_binding && !server->session_estab) { strncpy(shdr->Signature, "BSRSPYL", 8); return 0; @@ -750,7 +750,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *shdr, temp->callback = cifs_wake_up_task; temp->callback_data = current; - atomic_inc(&midCount); + atomic_inc(&mid_count); temp->mid_state = MID_REQUEST_ALLOCATED; trace_smb3_cmd_enter(le32_to_cpu(shdr->Id.SyncId.TreeId), le64_to_cpu(shdr->SessionId), @@ -762,28 +762,30 @@ static int smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server, struct smb2_hdr *shdr, struct mid_q_entry **mid) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } if (server->tcpStatus == CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_dbg(FYI, "tcp session dead - return to caller to retry\n"); return -EAGAIN; } if (server->tcpStatus == CifsNeedNegotiate && shdr->Command != SMB2_NEGOTIATE) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -EAGAIN; } + spin_unlock(&server->srv_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_NEW) { if ((shdr->Command != SMB2_SESSION_SETUP) && (shdr->Command != SMB2_NEGOTIATE)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are setting up session */ @@ -791,19 +793,19 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server, if (ses->ses_status == SES_EXITING) { if (shdr->Command != SMB2_LOGOFF) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are shutting down the session */ } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); *mid = smb2_mid_entry_alloc(shdr, server); if (*mid == NULL) return -ENOMEM; - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_add_tail(&(*mid)->qhead, &server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return 0; } @@ -854,7 +856,7 @@ smb2_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *server, rc = smb2_sign_rqst(rqst, server); if (rc) { revert_current_mid_from_hdr(server, shdr); - cifs_delete_mid(mid); + delete_mid(mid); return ERR_PTR(rc); } @@ -869,13 +871,13 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) (struct smb2_hdr *)rqst->rq_iov[0].iov_base; struct mid_q_entry *mid; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedNegotiate && shdr->Command != SMB2_NEGOTIATE) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return ERR_PTR(-EAGAIN); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); smb2_seq_num_into_buf(server, shdr); @@ -888,7 +890,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) rc = smb2_sign_rqst(rqst, server); if (rc) { revert_current_mid_from_hdr(server, shdr); - DeleteMidQEntry(mid); + release_mid(mid); return ERR_PTR(rc); } diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index bfc9bd55870a..9a2753e21170 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -21,6 +21,7 @@ #include <asm/processor.h> #include <linux/mempool.h> #include <linux/sched/signal.h> +#include <linux/task_io_accounting_ops.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" @@ -37,13 +38,13 @@ cifs_wake_up_task(struct mid_q_entry *mid) wake_up_process(mid->callback_data); } -struct mid_q_entry * -AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) +static struct mid_q_entry * +alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) { struct mid_q_entry *temp; if (server == NULL) { - cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n"); + cifs_dbg(VFS, "%s: null TCP session\n", __func__); return NULL; } @@ -68,12 +69,12 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) temp->callback = cifs_wake_up_task; temp->callback_data = current; - atomic_inc(&midCount); + atomic_inc(&mid_count); temp->mid_state = MID_REQUEST_ALLOCATED; return temp; } -static void _cifs_mid_q_entry_release(struct kref *refcount) +static void __release_mid(struct kref *refcount) { struct mid_q_entry *midEntry = container_of(refcount, struct mid_q_entry, refcount); @@ -91,7 +92,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount) server->ops->handle_cancelled_mid(midEntry, server); midEntry->mid_state = MID_FREE; - atomic_dec(&midCount); + atomic_dec(&mid_count); if (midEntry->large_buf) cifs_buf_release(midEntry->resp_buf); else @@ -152,29 +153,26 @@ static void _cifs_mid_q_entry_release(struct kref *refcount) mempool_free(midEntry, cifs_mid_poolp); } -void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) +void release_mid(struct mid_q_entry *mid) { - spin_lock(&GlobalMid_Lock); - kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); - spin_unlock(&GlobalMid_Lock); -} + struct TCP_Server_Info *server = mid->server; -void DeleteMidQEntry(struct mid_q_entry *midEntry) -{ - cifs_mid_q_entry_release(midEntry); + spin_lock(&server->mid_lock); + kref_put(&mid->refcount, __release_mid); + spin_unlock(&server->mid_lock); } void -cifs_delete_mid(struct mid_q_entry *mid) +delete_mid(struct mid_q_entry *mid) { - spin_lock(&GlobalMid_Lock); + spin_lock(&mid->server->mid_lock); if (!(mid->mid_flags & MID_DELETED)) { list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&mid->server->mid_lock); - DeleteMidQEntry(mid); + release_mid(mid); } /* @@ -196,10 +194,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, *sent = 0; - smb_msg->msg_name = (struct sockaddr *) &server->dstaddr; - smb_msg->msg_namelen = sizeof(struct sockaddr); - smb_msg->msg_control = NULL; - smb_msg->msg_controllen = 0; if (server->noblocksnd) smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; else @@ -263,8 +257,8 @@ smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst) int nvec; unsigned long buflen = 0; - if (server->vals->header_preamble_size == 0 && - rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) { + if (!is_smb1(server) && rqst->rq_nvec >= 2 && + rqst->rq_iov[0].iov_len == 4) { iov = &rqst->rq_iov[1]; nvec = rqst->rq_nvec - 1; } else { @@ -311,7 +305,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, sigset_t mask, oldmask; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; - struct msghdr smb_msg; + struct msghdr smb_msg = {}; __be32 rfc1002_marker; if (cifs_rdma_enabled(server)) { @@ -348,7 +342,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, sigprocmask(SIG_BLOCK, &mask, &oldmask); /* Generate a rfc1002 marker for SMB2+ */ - if (server->vals->header_preamble_size == 0) { + if (!is_smb1(server)) { struct kvec hiov = { .iov_base = &rfc1002_marker, .iov_len = 4 @@ -577,12 +571,12 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits, } else { spin_unlock(&server->req_lock); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* * For normal commands, reserve the last MAX_COMPOUND @@ -725,11 +719,11 @@ cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, struct mid_q_entry **ppmidQ) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_NEW) { if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && (in_buf->Command != SMB_COM_NEGOTIATE)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are setting up session */ @@ -738,19 +732,19 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, if (ses->ses_status == SES_EXITING) { /* check if SMB session is bad because we are setting it up */ if (in_buf->Command != SMB_COM_LOGOFF_ANDX) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are shutting down session */ } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); - *ppmidQ = AllocMidQEntry(in_buf, ses->server); + *ppmidQ = alloc_mid(in_buf, ses->server); if (*ppmidQ == NULL) return -ENOMEM; - spin_lock(&GlobalMid_Lock); + spin_lock(&ses->server->mid_lock); list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); + spin_unlock(&ses->server->mid_lock); return 0; } @@ -782,13 +776,13 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) if (server->sign) hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; - mid = AllocMidQEntry(hdr, server); + mid = alloc_mid(hdr, server); if (mid == NULL) return ERR_PTR(-ENOMEM); rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); if (rc) { - DeleteMidQEntry(mid); + release_mid(mid); return ERR_PTR(rc); } @@ -849,9 +843,9 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, mid->mid_state = MID_REQUEST_SUBMITTED; /* put it on the pending_mid_q */ - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_add_tail(&mid->qhead, &server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); /* * Need to store the time in mid before calling I/O. For call_async, @@ -865,7 +859,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, if (rc < 0) { revert_current_mid(server, mid->credits); server->sequence_number -= 2; - cifs_delete_mid(mid); + delete_mid(mid); } cifs_server_unlock(server); @@ -912,10 +906,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n", __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return rc; case MID_RETRY_NEEDED: rc = -EAGAIN; @@ -935,9 +929,9 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) __func__, mid->mid, mid->mid_state); rc = -EIO; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); - DeleteMidQEntry(mid); + release_mid(mid); return rc; } @@ -997,7 +991,7 @@ cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored, return ERR_PTR(rc); rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); if (rc) { - cifs_delete_mid(mid); + delete_mid(mid); return ERR_PTR(rc); } return mid; @@ -1026,7 +1020,7 @@ static void cifs_cancelled_callback(struct mid_q_entry *mid) { cifs_compound_callback(mid); - DeleteMidQEntry(mid); + release_mid(mid); } /* @@ -1078,12 +1072,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, return -EIO; } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* * Wait for all the requests to become available. @@ -1130,7 +1124,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, if (IS_ERR(midQ[i])) { revert_current_mid(server, i); for (j = 0; j < i; j++) - cifs_delete_mid(midQ[j]); + delete_mid(midQ[j]); cifs_server_unlock(server); /* Update # of requests on wire to server */ @@ -1186,17 +1180,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, /* * Compounding is never used during session establish. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); cifs_server_lock(server); smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec); cifs_server_unlock(server); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); for (i = 0; i < num_rqst; i++) { rc = wait_for_response(server, midQ[i]); @@ -1208,14 +1202,14 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", midQ[i]->mid, le16_to_cpu(midQ[i]->command)); send_cancel(server, &rqst[i], midQ[i]); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); midQ[i]->mid_flags |= MID_WAIT_CANCELLED; if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { midQ[i]->callback = cifs_cancelled_callback; cancelled_mid[i] = true; credits[i].value = 0; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); } } @@ -1240,7 +1234,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, buf = (char *)midQ[i]->resp_buf; resp_iov[i].iov_base = buf; resp_iov[i].iov_len = midQ[i]->resp_buf_size + - server->vals->header_preamble_size; + HEADER_PREAMBLE_SIZE(server); if (midQ[i]->large_buf) resp_buf_type[i] = CIFS_LARGE_BUFFER; @@ -1250,7 +1244,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, rc = server->ops->check_receive(midQ[i], server, flags & CIFS_LOG_ERROR); - /* mark it so buf will not be freed by cifs_delete_mid */ + /* mark it so buf will not be freed by delete_mid */ if ((flags & CIFS_NO_RSP_BUF) == 0) midQ[i]->resp_buf = NULL; @@ -1259,19 +1253,19 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, /* * Compounding is never used during session establish. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { struct kvec iov = { .iov_base = resp_iov[0].iov_base, .iov_len = resp_iov[0].iov_len }; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); cifs_server_lock(server); smb311_update_preauth_hash(ses, server, &iov, 1); cifs_server_unlock(server); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); out: /* @@ -1282,7 +1276,7 @@ out: */ for (i = 0; i < num_rqst; i++) { if (!cancelled_mid[i]) - cifs_delete_mid(midQ[i]); + delete_mid(midQ[i]); } return rc; @@ -1360,12 +1354,12 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, return -EIO; } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* Ensure that we do not send more than 50 overlapping requests to the same server. We may make this configurable later or @@ -1419,15 +1413,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, rc = wait_for_response(server, midQ); if (rc != 0) { send_cancel(server, &rqst, midQ); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED) { /* no longer considered to be "in-flight" */ - midQ->callback = DeleteMidQEntry; - spin_unlock(&GlobalMid_Lock); + midQ->callback = release_mid; + spin_unlock(&server->mid_lock); add_credits(server, &credits, 0); return rc; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); } rc = cifs_sync_mid_result(midQ, server); @@ -1447,7 +1441,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, server, 0); out: - cifs_delete_mid(midQ); + delete_mid(midQ); add_credits(server, &credits, 0); return rc; @@ -1505,12 +1499,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, return -EIO; } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* Ensure that we do not send more than 50 overlapping requests to the same server. We may make this configurable later or @@ -1540,7 +1534,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); if (rc) { - cifs_delete_mid(midQ); + delete_mid(midQ); cifs_server_unlock(server); return rc; } @@ -1557,7 +1551,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, cifs_server_unlock(server); if (rc < 0) { - cifs_delete_mid(midQ); + delete_mid(midQ); return rc; } @@ -1568,19 +1562,19 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, (server->tcpStatus != CifsNew))); /* Were we interrupted by a signal ? */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if ((rc == -ERESTARTSYS) && (midQ->mid_state == MID_REQUEST_SUBMITTED) && ((server->tcpStatus == CifsGood) || (server->tcpStatus == CifsNew))) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (in_buf->Command == SMB_COM_TRANSACTION2) { /* POSIX lock. We send a NT_CANCEL SMB to cause the blocking lock to return. */ rc = send_cancel(server, &rqst, midQ); if (rc) { - cifs_delete_mid(midQ); + delete_mid(midQ); return rc; } } else { @@ -1592,7 +1586,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, /* If we get -ENOLCK back the lock may have already been removed. Don't exit in this case. */ if (rc && rc != -ENOLCK) { - cifs_delete_mid(midQ); + delete_mid(midQ); return rc; } } @@ -1600,21 +1594,21 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, rc = wait_for_response(server, midQ); if (rc) { send_cancel(server, &rqst, midQ); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED) { /* no longer considered to be "in-flight" */ - midQ->callback = DeleteMidQEntry; - spin_unlock(&GlobalMid_Lock); + midQ->callback = release_mid; + spin_unlock(&server->mid_lock); return rc; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); } /* We got the response - restart system call. */ rstart = 1; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = cifs_sync_mid_result(midQ, server); if (rc != 0) @@ -1631,8 +1625,185 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, server, 0); out: - cifs_delete_mid(midQ); + delete_mid(midQ); if (rstart && rc == -EACCES) return -ERESTARTSYS; return rc; } + +/* + * Discard any remaining data in the current SMB. To do this, we borrow the + * current bigbuf. + */ +int +cifs_discard_remaining_data(struct TCP_Server_Info *server) +{ + unsigned int rfclen = server->pdu_size; + int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) - + server->total_read; + + while (remaining > 0) { + int length; + + length = cifs_discard_from_socket(server, + min_t(size_t, remaining, + CIFSMaxBufSize + MAX_HEADER_SIZE(server))); + if (length < 0) + return length; + server->total_read += length; + remaining -= length; + } + + return 0; +} + +static int +__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, + bool malformed) +{ + int length; + + length = cifs_discard_remaining_data(server); + dequeue_mid(mid, malformed); + mid->resp_buf = server->smallbuf; + server->smallbuf = NULL; + return length; +} + +static int +cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) +{ + struct cifs_readdata *rdata = mid->callback_data; + + return __cifs_readv_discard(server, mid, rdata->result); +} + +int +cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) +{ + int length, len; + unsigned int data_offset, data_len; + struct cifs_readdata *rdata = mid->callback_data; + char *buf = server->smallbuf; + unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server); + bool use_rdma_mr = false; + + cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", + __func__, mid->mid, rdata->offset, rdata->bytes); + + /* + * read the rest of READ_RSP header (sans Data array), or whatever we + * can if there's not enough data. At this point, we've read down to + * the Mid. + */ + len = min_t(unsigned int, buflen, server->vals->read_rsp_size) - + HEADER_SIZE(server) + 1; + + length = cifs_read_from_socket(server, + buf + HEADER_SIZE(server) - 1, len); + if (length < 0) + return length; + server->total_read += length; + + if (server->ops->is_session_expired && + server->ops->is_session_expired(buf)) { + cifs_reconnect(server, true); + return -1; + } + + if (server->ops->is_status_pending && + server->ops->is_status_pending(buf, server)) { + cifs_discard_remaining_data(server); + return -1; + } + + /* set up first two iov for signature check and to get credits */ + rdata->iov[0].iov_base = buf; + rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server); + rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server); + rdata->iov[1].iov_len = + server->total_read - HEADER_PREAMBLE_SIZE(server); + cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", + rdata->iov[0].iov_base, rdata->iov[0].iov_len); + cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", + rdata->iov[1].iov_base, rdata->iov[1].iov_len); + + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { + cifs_dbg(FYI, "%s: server returned error %d\n", + __func__, rdata->result); + /* normal error on read response */ + return __cifs_readv_discard(server, mid, false); + } + + /* Is there enough to get to the rest of the READ_RSP header? */ + if (server->total_read < server->vals->read_rsp_size) { + cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n", + __func__, server->total_read, + server->vals->read_rsp_size); + rdata->result = -EIO; + return cifs_readv_discard(server, mid); + } + + data_offset = server->ops->read_data_offset(buf) + + HEADER_PREAMBLE_SIZE(server); + if (data_offset < server->total_read) { + /* + * win2k8 sometimes sends an offset of 0 when the read + * is beyond the EOF. Treat it as if the data starts just after + * the header. + */ + cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", + __func__, data_offset); + data_offset = server->total_read; + } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { + /* data_offset is beyond the end of smallbuf */ + cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", + __func__, data_offset); + rdata->result = -EIO; + return cifs_readv_discard(server, mid); + } + + cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n", + __func__, server->total_read, data_offset); + + len = data_offset - server->total_read; + if (len > 0) { + /* read any junk before data into the rest of smallbuf */ + length = cifs_read_from_socket(server, + buf + server->total_read, len); + if (length < 0) + return length; + server->total_read += length; + } + + /* how much data is in the response? */ +#ifdef CONFIG_CIFS_SMB_DIRECT + use_rdma_mr = rdata->mr; +#endif + data_len = server->ops->read_data_length(buf, use_rdma_mr); + if (!use_rdma_mr && (data_offset + data_len > buflen)) { + /* data_len is corrupt -- discard frame */ + rdata->result = -EIO; + return cifs_readv_discard(server, mid); + } + + length = rdata->read_into_pages(server, rdata, data_len); + if (length < 0) + return length; + + server->total_read += length; + + cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", + server->total_read, buflen, data_len); + + /* discard anything left over */ + if (server->total_read < buflen) + return cifs_readv_discard(server, mid); + + dequeue_mid(mid, false); + mid->resp_buf = server->smallbuf; + server->smallbuf = NULL; + return length; +} diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 9d486fbbfbbd..998fa51f9b68 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -201,6 +201,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler, break; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY case XATTR_ACL_ACCESS: #ifdef CONFIG_CIFS_POSIX if (!value) @@ -224,6 +225,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler, cifs_remap(cifs_sb)); #endif /* CONFIG_CIFS_POSIX */ break; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ } out: @@ -364,7 +366,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler, } break; } - +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY case XATTR_ACL_ACCESS: #ifdef CONFIG_CIFS_POSIX if (sb->s_flags & SB_POSIXACL) @@ -384,6 +386,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler, cifs_remap(cifs_sb)); #endif /* CONFIG_CIFS_POSIX */ break; +#endif /* ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ } /* We could add an additional check for streams ie |