diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 14:48:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 14:48:37 -0700 |
commit | 6a492b0f23d28e1f946cdf08e54617484400dafb (patch) | |
tree | 58e5bb9a9c91b2e1a0726eba12835b0e631a464a /drivers/scsi/fcoe | |
parent | d85486d47123961bd8b08e94f6d4886c59a1fd76 (diff) | |
parent | 354a086d9369cb7471790fa047665884f2bc6d79 (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This update includes the usual round of driver updates (fcoe, lpfc,
ufs, qla2xxx, hisi_sas). The most important other change is removing
the flag to allow non-blk_mq on a per host basis (it's unused); there
is still a global module parameter for all of SCSI just in case.
The rest are an assortment of minor fixes and typo updates"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (101 commits)
scsi:libsas: fix oops caused by assigning a freed task to ->lldd_task
fnic: pci_dma_mapping_error() doesn't return an error code
scsi: lpfc: avoid harmless comparison warning
fcoe: implement FIP VLAN responder
fcoe: Rename 'fip_frame' to 'fip_vn2vn_notify_frame'
lpfc: call lpfc_sli_validate_fcp_iocb() with the hbalock held
scsi: ufs: remove unnecessary goto label
hpsa: change hpsa_passthru_ioctl timeout
hpsa: correct skipping masked peripherals
qla2xxx: Update driver version to 8.07.00.38-k
qla2xxx: Fix BBCR offset
qla2xxx: Fix duplicate message id.
qla2xxx: Disable the adapter and skip error recovery in case of register disconnect.
qla2xxx: Separate ISP type bits out from device type.
qla2xxx: Correction to function qla26xx_dport_diagnostics().
qla2xxx: Add support to handle Loop Init error Asynchronus event.
qla2xxx: Let DPORT be enabled purely by nvram.
qla2xxx: Add bsg interface to support statistics counter reset.
qla2xxx: Add bsg interface to support D_Port Diagnostics.
qla2xxx: Check for device state before unloading the driver.
...
Diffstat (limited to 'drivers/scsi/fcoe')
-rw-r--r-- | drivers/scsi/fcoe/fcoe.c | 322 | ||||
-rw-r--r-- | drivers/scsi/fcoe/fcoe_ctlr.c | 245 | ||||
-rw-r--r-- | drivers/scsi/fcoe/fcoe_sysfs.c | 39 | ||||
-rw-r--r-- | drivers/scsi/fcoe/fcoe_transport.c | 4 |
4 files changed, 337 insertions, 273 deletions
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0efe7112fc1f..c8a4305c7662 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -67,9 +67,6 @@ static DEFINE_MUTEX(fcoe_config_mutex); static struct workqueue_struct *fcoe_wq; -/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ -static DECLARE_COMPLETION(fcoe_flush_completion); - /* fcoe host list */ /* must only by accessed under the RTNL mutex */ static LIST_HEAD(fcoe_hostlist); @@ -80,7 +77,6 @@ static int fcoe_reset(struct Scsi_Host *); static int fcoe_xmit(struct fc_lport *, struct fc_frame *); static int fcoe_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); -static int fcoe_percpu_receive_thread(void *); static void fcoe_percpu_clean(struct fc_lport *); static int fcoe_link_ok(struct fc_lport *); @@ -107,12 +103,11 @@ static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, static int fcoe_ddp_done(struct fc_lport *, u16); static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, unsigned int); -static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); static int fcoe_dcb_app_notification(struct notifier_block *notifier, ulong event, void *ptr); static bool fcoe_match(struct net_device *netdev); -static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode); +static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode); static int fcoe_destroy(struct net_device *netdev); static int fcoe_enable(struct net_device *netdev); static int fcoe_disable(struct net_device *netdev); @@ -120,7 +115,7 @@ static int fcoe_disable(struct net_device *netdev); /* fcoe_syfs control interface handlers */ static int fcoe_ctlr_alloc(struct net_device *netdev); static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev); - +static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev); static struct fc_seq *fcoe_elsct_send(struct fc_lport *, u32 did, struct fc_frame *, @@ -136,11 +131,6 @@ static struct notifier_block fcoe_notifier = { .notifier_call = fcoe_device_notification, }; -/* notification function for CPU hotplug events */ -static struct notifier_block fcoe_cpu_notifier = { - .notifier_call = fcoe_cpu_callback, -}; - /* notification function for DCB events */ static struct notifier_block dcb_notifier = { .notifier_call = fcoe_dcb_app_notification, @@ -156,8 +146,9 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *); static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *); + static struct fcoe_sysfs_function_template fcoe_sysfs_templ = { - .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode, + .set_fcoe_ctlr_mode = fcoe_ctlr_mode, .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled, .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, @@ -682,6 +673,12 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) fcoe = port->priv; ctlr = fcoe_to_ctlr(fcoe); + /* Figure out the VLAN ID, if any */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + lport->vlan = vlan_dev_vlan_id(netdev); + else + lport->vlan = 0; + /* * Determine max frame size based on underlying device and optional * user-configured limit. If the MFS is too low, fcoe_link_ok() @@ -780,9 +777,6 @@ static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev) fcoe = port->priv; realdev = fcoe->realdev; - if (!realdev) - return; - /* No FDMI state m/c for NPIV ports */ if (lport->vport) return; @@ -1245,152 +1239,21 @@ static int __exit fcoe_if_exit(void) return 0; } -/** - * fcoe_percpu_thread_create() - Create a receive thread for an online CPU - * @cpu: The CPU index of the CPU to create a receive thread for - */ -static void fcoe_percpu_thread_create(unsigned int cpu) +static void fcoe_thread_cleanup_local(unsigned int cpu) { - struct fcoe_percpu_s *p; - struct task_struct *thread; - - p = &per_cpu(fcoe_percpu, cpu); - - thread = kthread_create_on_node(fcoe_percpu_receive_thread, - (void *)p, cpu_to_node(cpu), - "fcoethread/%d", cpu); - - if (likely(!IS_ERR(thread))) { - kthread_bind(thread, cpu); - wake_up_process(thread); - - spin_lock_bh(&p->fcoe_rx_list.lock); - p->thread = thread; - spin_unlock_bh(&p->fcoe_rx_list.lock); - } -} - -/** - * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU - * @cpu: The CPU index of the CPU whose receive thread is to be destroyed - * - * Destroys a per-CPU Rx thread. Any pending skbs are moved to the - * current CPU's Rx thread. If the thread being destroyed is bound to - * the CPU processing this context the skbs will be freed. - */ -static void fcoe_percpu_thread_destroy(unsigned int cpu) -{ - struct fcoe_percpu_s *p; - struct task_struct *thread; struct page *crc_eof; - struct sk_buff *skb; -#ifdef CONFIG_SMP - struct fcoe_percpu_s *p0; - unsigned targ_cpu = get_cpu(); -#endif /* CONFIG_SMP */ - - FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); + struct fcoe_percpu_s *p; - /* Prevent any new skbs from being queued for this CPU. */ - p = &per_cpu(fcoe_percpu, cpu); + p = per_cpu_ptr(&fcoe_percpu, cpu); spin_lock_bh(&p->fcoe_rx_list.lock); - thread = p->thread; - p->thread = NULL; crc_eof = p->crc_eof_page; p->crc_eof_page = NULL; p->crc_eof_offset = 0; spin_unlock_bh(&p->fcoe_rx_list.lock); -#ifdef CONFIG_SMP - /* - * Don't bother moving the skb's if this context is running - * on the same CPU that is having its thread destroyed. This - * can easily happen when the module is removed. - */ - if (cpu != targ_cpu) { - p0 = &per_cpu(fcoe_percpu, targ_cpu); - spin_lock_bh(&p0->fcoe_rx_list.lock); - if (p0->thread) { - FCOE_DBG("Moving frames from CPU %d to CPU %d\n", - cpu, targ_cpu); - - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - __skb_queue_tail(&p0->fcoe_rx_list, skb); - spin_unlock_bh(&p0->fcoe_rx_list.lock); - } else { - /* - * The targeted CPU is not initialized and cannot accept - * new skbs. Unlock the targeted CPU and drop the skbs - * on the CPU that is going offline. - */ - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p0->fcoe_rx_list.lock); - } - } else { - /* - * This scenario occurs when the module is being removed - * and all threads are being destroyed. skbs will continue - * to be shifted from the CPU thread that is being removed - * to the CPU thread associated with the CPU that is processing - * the module removal. Once there is only one CPU Rx thread it - * will reach this case and we will drop all skbs and later - * stop the thread. - */ - spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); - } - put_cpu(); -#else - /* - * This a non-SMP scenario where the singular Rx thread is - * being removed. Free all skbs and stop the thread. - */ - spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); -#endif - - if (thread) - kthread_stop(thread); - if (crc_eof) put_page(crc_eof); -} - -/** - * fcoe_cpu_callback() - Handler for CPU hotplug events - * @nfb: The callback data block - * @action: The event triggering the callback - * @hcpu: The index of the CPU that the event is for - * - * This creates or destroys per-CPU data for fcoe - * - * Returns NOTIFY_OK always. - */ -static int fcoe_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); - fcoe_percpu_thread_create(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); - fcoe_percpu_thread_destroy(cpu); - break; - default: - break; - } - return NOTIFY_OK; + flush_work(&p->work); } /** @@ -1509,26 +1372,6 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, fps = &per_cpu(fcoe_percpu, cpu); spin_lock(&fps->fcoe_rx_list.lock); - if (unlikely(!fps->thread)) { - /* - * The targeted CPU is not ready, let's target - * the first CPU now. For non-SMP systems this - * will check the same CPU twice. - */ - FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread " - "ready for incoming skb- using first online " - "CPU.\n"); - - spin_unlock(&fps->fcoe_rx_list.lock); - cpu = cpumask_first(cpu_online_mask); - fps = &per_cpu(fcoe_percpu, cpu); - spin_lock(&fps->fcoe_rx_list.lock); - if (!fps->thread) { - spin_unlock(&fps->fcoe_rx_list.lock); - goto err; - } - } - /* * We now have a valid CPU that we're targeting for * this skb. We also have this receive thread locked, @@ -1543,8 +1386,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, * in softirq context. */ __skb_queue_tail(&fps->fcoe_rx_list, skb); - if (fps->thread->state == TASK_INTERRUPTIBLE) - wake_up_process(fps->thread); + schedule_work_on(cpu, &fps->work); spin_unlock(&fps->fcoe_rx_list.lock); return NET_RX_SUCCESS; @@ -1713,15 +1555,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) } /** - * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion - * @skb: The completed skb (argument required by destructor) - */ -static void fcoe_percpu_flush_done(struct sk_buff *skb) -{ - complete(&fcoe_flush_completion); -} - -/** * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC * @lport: The local port the frame was received on * @fp: The received frame @@ -1792,8 +1625,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) fr = fcoe_dev_from_skb(skb); lport = fr->fr_dev; if (unlikely(!lport)) { - if (skb->destructor != fcoe_percpu_flush_done) - FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); + FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); kfree_skb(skb); return; } @@ -1857,40 +1689,28 @@ drop: } /** - * fcoe_percpu_receive_thread() - The per-CPU packet receive thread - * @arg: The per-CPU context + * fcoe_receive_work() - The per-CPU worker + * @work: The work struct * - * Return: 0 for success */ -static int fcoe_percpu_receive_thread(void *arg) +static void fcoe_receive_work(struct work_struct *work) { - struct fcoe_percpu_s *p = arg; + struct fcoe_percpu_s *p; struct sk_buff *skb; struct sk_buff_head tmp; + p = container_of(work, struct fcoe_percpu_s, work); skb_queue_head_init(&tmp); - set_user_nice(current, MIN_NICE); - - while (!kthread_should_stop()) { - - spin_lock_bh(&p->fcoe_rx_list.lock); - skb_queue_splice_init(&p->fcoe_rx_list, &tmp); - - if (!skb_queue_len(&tmp)) { - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_bh(&p->fcoe_rx_list.lock); - schedule(); - continue; - } - - spin_unlock_bh(&p->fcoe_rx_list.lock); + spin_lock_bh(&p->fcoe_rx_list.lock); + skb_queue_splice_init(&p->fcoe_rx_list, &tmp); + spin_unlock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&tmp)) != NULL) - fcoe_recv_frame(skb); + if (!skb_queue_len(&tmp)) + return; - } - return 0; + while ((skb = __skb_dequeue(&tmp))) + fcoe_recv_frame(skb); } /** @@ -2163,6 +1983,32 @@ static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev) } /** + * fcoe_ctlr_mode() - Switch FIP mode + * @cdev: The FCoE Controller that is being modified + * + * When the FIP mode has been changed we need to update + * the multicast addresses to ensure we get the correct + * frames. + */ +static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev) +{ + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr); + + if (ctlr_dev->mode == FIP_CONN_TYPE_VN2VN && + ctlr->mode != FIP_MODE_VN2VN) { + dev_mc_del(fcoe->netdev, FIP_ALL_ENODE_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_VN2VN_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_P2P_MACS); + } else if (ctlr->mode != FIP_MODE_FABRIC) { + dev_mc_del(fcoe->netdev, FIP_ALL_VN2VN_MACS); + dev_mc_del(fcoe->netdev, FIP_ALL_P2P_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_ENODE_MACS); + } + fcoe_ctlr_set_fip_mode(ctlr_dev); +} + +/** * fcoe_destroy() - Destroy a FCoE interface * @netdev : The net_device object the Ethernet interface to create on * @@ -2317,7 +2163,7 @@ enum fcoe_create_link_state { * consolidation of code can be done when that interface is * removed. */ -static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, +static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode, enum fcoe_create_link_state link_state) { int rc = 0; @@ -2406,7 +2252,7 @@ out: * * Returns: 0 for success */ -static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) +static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode) { return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP); } @@ -2450,36 +2296,19 @@ static int fcoe_link_ok(struct fc_lport *lport) * * Must be called with fcoe_create_mutex held to single-thread completion. * - * This flushes the pending skbs by adding a new skb to each queue and - * waiting until they are all freed. This assures us that not only are - * there no packets that will be handled by the lport, but also that any - * threads already handling packet have returned. + * This flushes the pending skbs by flush the work item for each CPU. The work + * item on each possible CPU is flushed because we may have used the per-CPU + * struct of an offline CPU. */ static void fcoe_percpu_clean(struct fc_lport *lport) { struct fcoe_percpu_s *pp; - struct sk_buff *skb; unsigned int cpu; for_each_possible_cpu(cpu) { pp = &per_cpu(fcoe_percpu, cpu); - if (!pp->thread || !cpu_online(cpu)) - continue; - - skb = dev_alloc_skb(0); - if (!skb) - continue; - - skb->destructor = fcoe_percpu_flush_done; - - spin_lock_bh(&pp->fcoe_rx_list.lock); - __skb_queue_tail(&pp->fcoe_rx_list, skb); - if (pp->fcoe_rx_list.qlen == 1) - wake_up_process(pp->thread); - spin_unlock_bh(&pp->fcoe_rx_list.lock); - - wait_for_completion(&fcoe_flush_completion); + flush_work(&pp->work); } } @@ -2625,22 +2454,11 @@ static int __init fcoe_init(void) mutex_lock(&fcoe_config_mutex); for_each_possible_cpu(cpu) { - p = &per_cpu(fcoe_percpu, cpu); + p = per_cpu_ptr(&fcoe_percpu, cpu); + INIT_WORK(&p->work, fcoe_receive_work); skb_queue_head_init(&p->fcoe_rx_list); } - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) - fcoe_percpu_thread_create(cpu); - - /* Initialize per CPU interrupt thread */ - rc = __register_hotcpu_notifier(&fcoe_cpu_notifier); - if (rc) - goto out_free; - - cpu_notifier_register_done(); - /* Setup link change notification */ fcoe_dev_setup(); @@ -2652,12 +2470,6 @@ static int __init fcoe_init(void) return 0; out_free: - for_each_online_cpu(cpu) { - fcoe_percpu_thread_destroy(cpu); - } - - cpu_notifier_register_done(); - mutex_unlock(&fcoe_config_mutex); destroy_workqueue(fcoe_wq); return rc; @@ -2690,14 +2502,8 @@ static void __exit fcoe_exit(void) } rtnl_unlock(); - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) - fcoe_percpu_thread_destroy(cpu); - - __unregister_hotcpu_notifier(&fcoe_cpu_notifier); - - cpu_notifier_register_done(); + for_each_possible_cpu(cpu) + fcoe_thread_cleanup_local(cpu); mutex_unlock(&fcoe_config_mutex); diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 3e83d485f743..a569c65f22b1 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -59,6 +59,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *); static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *); +static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *, struct sk_buff *); + static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; @@ -149,6 +151,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) { fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); fip->mode = mode; + fip->fip_resp = false; INIT_LIST_HEAD(&fip->fcfs); mutex_init(&fip->ctlr_mutex); spin_lock_init(&fip->ctlr_lock); @@ -991,7 +994,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) return -EINVAL; break; } @@ -1232,7 +1235,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) goto drop; if (desc_cnt <= 2) { LIBFCOE_FIP_DBG(fip, "FIP descriptors " @@ -1410,7 +1413,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, break; default: /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) goto err; break; } @@ -1513,6 +1516,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) struct fip_header *fiph; struct ethhdr *eh; enum fip_state state; + bool fip_vlan_resp = false; u16 op; u8 sub; @@ -1546,11 +1550,17 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) state = FIP_ST_ENABLED; LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); } + fip_vlan_resp = fip->fip_resp; mutex_unlock(&fip->ctlr_mutex); if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN) return fcoe_ctlr_vn_recv(fip, skb); + if (fip_vlan_resp && op == FIP_OP_VLAN) { + LIBFCOE_FIP_DBG(fip, "fip vlan discovery\n"); + return fcoe_ctlr_vlan_recv(fip, skb); + } + if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP && state != FIP_ST_VNMP_CLAIM) goto drop; @@ -1989,7 +1999,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, const u8 *dest, size_t min_len) { struct sk_buff *skb; - struct fip_frame { + struct fip_vn2vn_probe_frame { struct ethhdr eth; struct fip_header fip; struct fip_mac_desc mac; @@ -2016,7 +2026,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, if (!skb) return; - frame = (struct fip_frame *)skb->data; + frame = (struct fip_vn2vn_probe_frame *)skb->data; memset(frame, 0, len); memcpy(frame->eth.h_dest, dest, ETH_ALEN); @@ -2338,7 +2348,7 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP probe\n", dtype); /* standard says ignore unknown descriptors >= 128 */ - if (dtype < FIP_DT_VENDOR_BASE) + if (dtype < FIP_DT_NON_CRITICAL) return -EINVAL; break; } @@ -2496,14 +2506,13 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) struct fcoe_rport *frport; int ret = -1; - rcu_read_lock(); rdata = lport->tt.rport_lookup(lport, port_id); if (rdata) { frport = fcoe_ctlr_rport(rdata); memcpy(mac, frport->enode_mac, ETH_ALEN); ret = 0; + kref_put(&rdata->kref, lport->tt.rport_destroy); } - rcu_read_unlock(); return ret; } @@ -2585,11 +2594,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; } - mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, new->ids.port_id); - if (rdata) - kref_get(&rdata->kref); - mutex_unlock(&lport->disc.disc_mutex); if (rdata) { if (rdata->ids.node_name == new->ids.node_name && rdata->ids.port_name == new->ids.port_name) { @@ -2709,6 +2714,220 @@ drop: } /** + * fcoe_ctlr_vlan_parse - parse vlan discovery request or response + * @fip: The FCoE controller + * @skb: incoming packet + * @rdata: buffer for resulting parsed VLAN entry plus fcoe_rport + * + * Returns non-zero error number on error. + * Does not consume the packet. + */ +static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, + struct sk_buff *skb, + struct fc_rport_priv *rdata) +{ + struct fip_header *fiph; + struct fip_desc *desc = NULL; + struct fip_mac_desc *macd = NULL; + struct fip_wwn_desc *wwn = NULL; + struct fcoe_rport *frport; + size_t rlen; + size_t dlen; + u32 desc_mask = 0; + u32 dtype; + u8 sub; + + memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); + frport = fcoe_ctlr_rport(rdata); + + fiph = (struct fip_header *)skb->data; + frport->flags = ntohs(fiph->fip_flags); + + sub = fiph->fip_subcode; + switch (sub) { + case FIP_SC_VL_REQ: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub); + return -EINVAL; + } + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return -EINVAL; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + return -EINVAL; + + dtype = desc->fip_dtype; + if (dtype < 32) { + if (!(desc_mask & BIT(dtype))) { + LIBFCOE_FIP_DBG(fip, + "unexpected or duplicated desc " + "desc type %u in " + "FIP VN2VN subtype %u\n", + dtype, sub); + return -EINVAL; + } + desc_mask &= ~BIT(dtype); + } + + switch (dtype) { + case FIP_DT_MAC: + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + macd = (struct fip_mac_desc *)desc; + if (!is_valid_ether_addr(macd->fd_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP VN2VN\n", + macd->fd_mac); + return -EINVAL; + } + memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN); + break; + case FIP_DT_NAME: + if (dlen != sizeof(struct fip_wwn_desc)) + goto len_err; + wwn = (struct fip_wwn_desc *)desc; + rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + break; + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP probe\n", dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (dtype < FIP_DT_NON_CRITICAL) + return -EINVAL; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + return 0; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + dtype, dlen); + return -EINVAL; +} + +/** + * fcoe_ctlr_vlan_send() - Send a FIP VLAN Notification + * @fip: The FCoE controller + * @sub: sub-opcode for vlan notification or vn2vn vlan notification + * @dest: The destination Ethernet MAC address + * @min_len: minimum size of the Ethernet payload to be sent + */ +static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip, + enum fip_vlan_subcode sub, + const u8 *dest) +{ + struct sk_buff *skb; + struct fip_vlan_notify_frame { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac; + struct fip_vlan_desc vlan; + } __packed * frame; + size_t len; + size_t dlen; + + len = sizeof(*frame); + dlen = sizeof(frame->mac) + sizeof(frame->vlan); + len = max(len, sizeof(struct ethhdr)); + + skb = dev_alloc_skb(len); + if (!skb) + return; + + LIBFCOE_FIP_DBG(fip, "fip %s vlan notification, vlan %d\n", + fip->mode == FIP_MODE_VN2VN ? "vn2vn" : "fcf", + fip->lp->vlan); + + frame = (struct fip_vlan_notify_frame *)skb->data; + memset(frame, 0, len); + memcpy(frame->eth.h_dest, dest, ETH_ALEN); + + memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + frame->eth.h_proto = htons(ETH_P_FIP); + + frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + frame->fip.fip_op = htons(FIP_OP_VLAN); + frame->fip.fip_subcode = sub; + frame->fip.fip_dl_len = htons(dlen / FIP_BPW); + + frame->mac.fd_desc.fip_dtype = FIP_DT_MAC; + frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW; + memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + frame->vlan.fd_desc.fip_dtype = FIP_DT_VLAN; + frame->vlan.fd_desc.fip_dlen = sizeof(frame->vlan) / FIP_BPW; + put_unaligned_be16(fip->lp->vlan, &frame->vlan.fd_vlan); + + skb_put(skb, len); + skb->protocol = htons(ETH_P_FIP); + skb->priority = fip->priority; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + fip->send(fip, skb); +} + +/** + * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification. + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip, + struct fc_rport_priv *rdata) +{ + struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); + enum fip_vlan_subcode sub = FIP_SC_VL_NOTE; + + if (fip->mode == FIP_MODE_VN2VN) + sub = FIP_SC_VL_VN2VN_NOTE; + + fcoe_ctlr_vlan_send(fip, sub, frport->enode_mac); +} + +/** + * fcoe_ctlr_vlan_recv - vlan request receive handler for VN2VN mode. + * @lport: The local port + * @fp: The received frame + * + */ +static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + enum fip_vlan_subcode sub; + struct { + struct fc_rport_priv rdata; + struct fcoe_rport frport; + } buf; + int rc; + + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata); + if (rc) { + LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); + goto drop; + } + mutex_lock(&fip->ctlr_mutex); + if (sub == FIP_SC_VL_REQ) + fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata); + mutex_unlock(&fip->ctlr_mutex); + +drop: + kfree(skb); + return rc; +} + +/** * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode. * @lport: The local port * @fp: The received frame @@ -2869,7 +3088,7 @@ unlock: * when nothing is happening. */ static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, - enum fip_state fip_mode) + enum fip_mode fip_mode) { void *priv; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 045c4e11ee54..0675fd128734 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -385,6 +385,44 @@ static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR, show_ctlr_enabled_state, store_ctlr_enabled); +static ssize_t store_ctlr_fip_resp(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); + + mutex_lock(&fip->ctlr_mutex); + if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { + if (buf[0] == '1') { + fip->fip_resp = 1; + mutex_unlock(&fip->ctlr_mutex); + return count; + } + if (buf[0] == '0') { + fip->fip_resp = 0; + mutex_unlock(&fip->ctlr_mutex); + return count; + } + } + mutex_unlock(&fip->ctlr_mutex); + return -EINVAL; +} + +static ssize_t show_ctlr_fip_resp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); + + return sprintf(buf, "%d\n", fip->fip_resp ? 1 : 0); +} + +static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR, + show_ctlr_fip_resp, + store_ctlr_fip_resp); + static ssize_t store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, @@ -467,6 +505,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = { }; static struct attribute *fcoe_ctlr_attrs[] = { + &device_attr_fcoe_ctlr_fip_vlan_responder.attr, &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, &device_attr_fcoe_ctlr_enabled.attr, &device_attr_fcoe_ctlr_mode.attr, diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 641c60e8fda3..7028dd37e5dd 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -133,10 +133,10 @@ int fcoe_link_speed_update(struct fc_lport *lport) case SPEED_10000: lport->link_speed = FC_PORTSPEED_10GBIT; break; - case 20000: + case SPEED_20000: lport->link_speed = FC_PORTSPEED_20GBIT; break; - case 40000: + case SPEED_40000: lport->link_speed = FC_PORTSPEED_40GBIT; break; default: |