diff options
author | Andy Yan <ayan@marvell.com> | 2009-05-08 17:46:40 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-05-20 17:21:12 -0500 |
commit | 20b09c2992fefbe78f8cede7b404fb143a413c52 (patch) | |
tree | c7e2368e4dd3f38b66db95fa4982ef009e2df00a /drivers/scsi/mvsas/mv_sas.c | |
parent | dd4969a892ea522ecf9d7d826ba1531ce044d46f (diff) |
[SCSI] mvsas: add support for 94xx; layout change; bug fixes
This version contains following main changes
- Switch to new layout to support more types of ASIC.
- SSP TMF supported and related Error Handing enhanced.
- Support flash feature with delay 2*HZ when PHY changed.
- Support Marvell 94xx series ASIC for 6G SAS/SATA, which has 2
88SE64xx chips but any different register description.
- Support SPI flash for HBA-related configuration info.
- Other patch enhanced from kernel side such as increasing PHY type
[jejb: fold back in DMA_BIT_MASK changes]
Signed-off-by: Ying Chu <jasonchu@marvell.com>
Signed-off-by: Andy Yan <ayan@marvell.com>
Signed-off-by: Ke Wei <kewei@marvell.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/mvsas/mv_sas.c')
-rw-r--r-- | drivers/scsi/mvsas/mv_sas.c | 2117 |
1 files changed, 1228 insertions, 889 deletions
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 6a583c19c6e5..d79ac179eaff 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -1,97 +1,65 @@ /* - mv_sas.c - Marvell 88SE6440 SAS/SATA support - - Copyright 2007 Red Hat, Inc. - Copyright 2008 Marvell. <kewei@marvell.com> - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2, - or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public - License along with this program; see the file COPYING. If not, - write to the Free Software Foundation, 675 Mass Ave, Cambridge, - MA 02139, USA. - - --------------------------------------------------------------- - - Random notes: - * hardware supports controlling the endian-ness of data - structures. this permits elimination of all the le32_to_cpu() - and cpu_to_le32() conversions. - - */ + * Marvell 88SE64xx/88SE94xx main function + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ #include "mv_sas.h" -#include "mv_64xx.h" -#include "mv_chips.h" - -/* offset for D2H FIS in the Received FIS List Structure */ -#define SATA_RECEIVED_D2H_FIS(reg_set) \ - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) -#define SATA_RECEIVED_PIO_FIS(reg_set) \ - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) -#define UNASSOC_D2H_FIS(id) \ - ((void *) mvi->rx_fis + 0x100 * id) - -struct mvs_task_exec_info { - struct sas_task *task; - struct mvs_cmd_hdr *hdr; - struct mvs_port *port; - u32 tag; - int n_elem; -}; - -static void mvs_release_task(struct mvs_info *mvi, int phy_no); -static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, - int get_st); -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); -static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx); static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) { if (task->lldd_task) { struct mvs_slot_info *slot; slot = (struct mvs_slot_info *) task->lldd_task; - *tag = slot - mvi->slot_info; + *tag = slot->slot_tag; return 1; } return 0; } -static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +void mvs_tag_clear(struct mvs_info *mvi, u32 tag) { void *bitmap = (void *) &mvi->tags; clear_bit(tag, bitmap); } -static void mvs_tag_free(struct mvs_info *mvi, u32 tag) +void mvs_tag_free(struct mvs_info *mvi, u32 tag) { mvs_tag_clear(mvi, tag); } -static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) { void *bitmap = (void *) &mvi->tags; set_bit(tag, bitmap); } -static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) { unsigned int index, tag; void *bitmap = (void *) &mvi->tags; - index = find_first_zero_bit(bitmap, MVS_SLOTS); + index = find_first_zero_bit(bitmap, mvi->tags_num); tag = index; - if (tag >= MVS_SLOTS) + if (tag >= mvi->tags_num) return -SAS_QUEUE_FULL; mvs_tag_set(mvi, tag); *tag_out = tag; @@ -101,11 +69,11 @@ static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) void mvs_tag_init(struct mvs_info *mvi) { int i; - for (i = 0; i < MVS_SLOTS; ++i) + for (i = 0; i < mvi->tags_num; ++i) mvs_tag_clear(mvi, i); } -static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) +void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) { u32 i; u32 run; @@ -113,7 +81,7 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) offset = 0; while (size) { - printk("%08X : ", baseaddr + offset); + printk(KERN_DEBUG"%08X : ", baseaddr + offset); if (size >= 16) run = 16; else @@ -121,31 +89,31 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) size -= run; for (i = 0; i < 16; i++) { if (i < run) - printk("%02X ", (u32)data[i]); + printk(KERN_DEBUG"%02X ", (u32)data[i]); else - printk(" "); + printk(KERN_DEBUG" "); } - printk(": "); + printk(KERN_DEBUG": "); for (i = 0; i < run; i++) - printk("%c", isalnum(data[i]) ? data[i] : '.'); - printk("\n"); + printk(KERN_DEBUG"%c", + isalnum(data[i]) ? data[i] : '.'); + printk(KERN_DEBUG"\n"); data = &data[16]; offset += run; } - printk("\n"); + printk(KERN_DEBUG"\n"); } -#if _MV_DUMP +#if (_MV_DUMP > 1) static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, enum sas_protocol proto) { u32 offset; - struct pci_dev *pdev = mvi->pdev; struct mvs_slot_info *slot = &mvi->slot_info[tag]; offset = slot->cmd_size + MVS_OAF_SZ + - sizeof(struct mvs_prd) * slot->n_elem; - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", + MVS_CHIP_DISP->prd_size() * slot->n_elem; + dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n", tag); mvs_hexdump(32, (u8 *) slot->response, (u32) slot->buf_dma + offset); @@ -155,47 +123,45 @@ static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, enum sas_protocol proto) { -#if _MV_DUMP +#if (_MV_DUMP > 1) u32 sz, w_ptr; u64 addr; - void __iomem *regs = mvi->regs; - struct pci_dev *pdev = mvi->pdev; struct mvs_slot_info *slot = &mvi->slot_info[tag]; /*Delivery Queue */ - sz = mr32(TX_CFG) & TX_RING_SZ_MASK; + sz = MVS_CHIP_SLOT_SZ; w_ptr = slot->tx; - addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); - dev_printk(KERN_DEBUG, &pdev->dev, + addr = mvi->tx_dma; + dev_printk(KERN_DEBUG, mvi->dev, "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); - dev_printk(KERN_DEBUG, &pdev->dev, + dev_printk(KERN_DEBUG, mvi->dev, "Delivery Queue Base Address=0x%llX (PA)" "(tx_dma=0x%llX), Entry=%04d\n", - addr, mvi->tx_dma, w_ptr); + addr, (unsigned long long)mvi->tx_dma, w_ptr); mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), (u32) mvi->tx_dma + sizeof(u32) * w_ptr); /*Command List */ addr = mvi->slot_dma; - dev_printk(KERN_DEBUG, &pdev->dev, + dev_printk(KERN_DEBUG, mvi->dev, "Command List Base Address=0x%llX (PA)" "(slot_dma=0x%llX), Header=%03d\n", - addr, slot->buf_dma, tag); - dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); + addr, (unsigned long long)slot->buf_dma, tag); + dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag); /*mvs_cmd_hdr */ mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); /*1.command table area */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); + dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n"); mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); /*2.open address frame area */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); + dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n"); mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, (u32) slot->buf_dma + slot->cmd_size); /*3.status buffer */ mvs_hba_sb_dump(mvi, tag, proto); /*4.PRD table */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); - mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, + dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n"); + mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem, (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); #endif @@ -206,15 +172,14 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi) #if (_MV_DUMP > 2) u64 addr; void __iomem *regs = mvi->regs; - struct pci_dev *pdev = mvi->pdev; u32 entry = mvi->rx_cons + 1; u32 rx_desc = le32_to_cpu(mvi->rx[entry]); /*Completion Queue */ addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); - dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n", + dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n", mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); - dev_printk(KERN_DEBUG, &pdev->dev, + dev_printk(KERN_DEBUG, mvi->dev, "Completion List Base Address=0x%llX (PA), " "CQ_Entry=%04d, CQ_WP=0x%08X\n", addr, entry - 1, mvi->rx[0]); @@ -223,62 +188,174 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi) #endif } -/* FIXME: locking? */ -int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) +void mvs_get_sas_addr(void *buf, u32 buflen) { - struct mvs_info *mvi = sas_phy->ha->lldd_ha; - int rc = 0, phy_id = sas_phy->id; - u32 tmp; + /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/ +} - tmp = mvs_read_phy_ctl(mvi, phy_id); +struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) +{ + unsigned long i = 0, j = 0, hi = 0; + struct sas_ha_struct *sha = dev->port->ha; + struct mvs_info *mvi = NULL; + struct asd_sas_phy *phy; + + while (sha->sas_port[i]) { + if (sha->sas_port[i] == dev->port) { + phy = container_of(sha->sas_port[i]->phy_list.next, + struct asd_sas_phy, port_phy_el); + j = 0; + while (sha->sas_phy[j]) { + if (sha->sas_phy[j] == phy) + break; + j++; + } + break; + } + i++; + } + hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; - switch (func) { - case PHY_FUNC_SET_LINK_RATE:{ - struct sas_phy_linkrates *rates = funcdata; - u32 lrmin = 0, lrmax = 0; + return mvi; - lrmin = (rates->minimum_linkrate << 8); - lrmax = (rates->maximum_linkrate << 12); +} - if (lrmin) { - tmp &= ~(0xf << 8); - tmp |= lrmin; - } - if (lrmax) { - tmp &= ~(0xf << 12); - tmp |= lrmax; +/* FIXME */ +int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) +{ + unsigned long i = 0, j = 0, n = 0, num = 0; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + struct sas_ha_struct *sha = dev->port->ha; + + while (sha->sas_port[i]) { + if (sha->sas_port[i] == dev->port) { + struct asd_sas_phy *phy; + list_for_each_entry(phy, + &sha->sas_port[i]->phy_list, port_phy_el) { + j = 0; + while (sha->sas_phy[j]) { + if (sha->sas_phy[j] == phy) + break; + j++; + } + phyno[n] = (j >= mvi->chip->n_phy) ? + (j - mvi->chip->n_phy) : j; + num++; + n++; } - mvs_write_phy_ctl(mvi, phy_id, tmp); break; } + i++; + } + return num; +} + +static inline void mvs_free_reg_set(struct mvs_info *mvi, + struct mvs_device *dev) +{ + if (!dev) { + mv_printk("device has been free.\n"); + return; + } + if (dev->runing_req != 0) + return; + if (dev->taskfileset == MVS_ID_NOT_MAPPED) + return; + MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); +} + +static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, + struct mvs_device *dev) +{ + if (dev->taskfileset != MVS_ID_NOT_MAPPED) + return 0; + return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); +} + +void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) +{ + u32 no; + for_each_phy(phy_mask, phy_mask, no) { + if (!(phy_mask & 1)) + continue; + MVS_CHIP_DISP->phy_reset(mvi, no, hard); + } +} + +/* FIXME: locking? */ +int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + int rc = 0, phy_id = sas_phy->id; + u32 tmp, i = 0, hi; + struct sas_ha_struct *sha = sas_phy->ha; + struct mvs_info *mvi = NULL; + + while (sha->sas_phy[i]) { + if (sha->sas_phy[i] == sas_phy) + break; + i++; + } + hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; + + switch (func) { + case PHY_FUNC_SET_LINK_RATE: + MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); + break; case PHY_FUNC_HARD_RESET: + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); if (tmp & PHY_RST_HARD) break; - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); + MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); break; case PHY_FUNC_LINK_RESET: - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); + MVS_CHIP_DISP->phy_enable(mvi, phy_id); + MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); break; case PHY_FUNC_DISABLE: + MVS_CHIP_DISP->phy_disable(mvi, phy_id); + break; case PHY_FUNC_RELEASE_SPINUP_HOLD: default: rc = -EOPNOTSUPP; } - + msleep(200); return rc; } +void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, + u32 off_lo, u32 off_hi, u64 sas_addr) +{ + u32 lo = (u32)sas_addr; + u32 hi = (u32)(sas_addr>>32); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); +} + static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) { struct mvs_phy *phy = &mvi->phy[i]; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_ha_struct *sas_ha; if (!phy->phy_attached) return; + if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) + && phy->phy_type & PORT_TYPE_SAS) { + return; + } + + sas_ha = mvi->sas; + sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); + if (sas_phy->phy) { struct sas_phy *sphy = sas_phy->phy; @@ -286,7 +363,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) sphy->minimum_linkrate = phy->minimum_linkrate; sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; sphy->maximum_linkrate = phy->maximum_linkrate; - sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); } if (phy->phy_type & PORT_TYPE_SAS) { @@ -297,13 +374,31 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) id->initiator_bits = SAS_PROTOCOL_ALL; id->target_bits = phy->identify.target_port_protocols; } else if (phy->phy_type & PORT_TYPE_SATA) { - /* TODO */ + /*Nothing*/ } - mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; - mvi->sas.notify_port_event(mvi->sas.sas_phy[i], + mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); + + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; + + mvi->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED); } +int mvs_slave_alloc(struct scsi_device *scsi_dev) +{ + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + if (dev_is_sata(dev)) { + /* We don't need to rescan targets + * if REPORT_LUNS request is failed + */ + if (scsi_dev->lun > 0) + return -ENXIO; + scsi_dev->tagged_supported = 1; + } + + return sas_slave_alloc(scsi_dev); +} + int mvs_slave_configure(struct scsi_device *sdev) { struct domain_device *dev = sdev_to_domain_dev(sdev); @@ -311,25 +406,31 @@ int mvs_slave_configure(struct scsi_device *sdev) if (ret) return ret; - if (dev_is_sata(dev)) { - /* struct ata_port *ap = dev->sata_dev.ap; */ - /* struct ata_device *adev = ap->link.device; */ - - /* clamp at no NCQ for the time being */ - /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ + /* may set PIO mode */ + #if MV_DISABLE_NCQ + struct ata_port *ap = dev->sata_dev.ap; + struct ata_device *adev = ap->link.device; + adev->flags |= ATA_DFLAG_NCQ_OFF; scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); + #endif } return 0; } void mvs_scan_start(struct Scsi_Host *shost) { - int i; - struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; + int i, j; + unsigned short core_nr; + struct mvs_info *mvi; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - for (i = 0; i < mvi->chip->n_phy; ++i) { - mvs_bytes_dmaed(mvi, i); + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + + for (j = 0; j < core_nr; j++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; + for (i = 0; i < mvi->chip->n_phy; ++i) + mvs_bytes_dmaed(mvi, i); } } @@ -350,15 +451,15 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, int elem, rc, i; struct sas_task *task = tei->task; struct mvs_cmd_hdr *hdr = tei->hdr; + struct domain_device *dev = task->dev; + struct asd_sas_port *sas_port = dev->port; struct scatterlist *sg_req, *sg_resp; u32 req_len, resp_len, tag = tei->tag; void *buf_tmp; u8 *buf_oaf; dma_addr_t buf_tmp_dma; - struct mvs_prd *buf_prd; - struct scatterlist *sg; + void *buf_prd; struct mvs_slot_info *slot = &mvi->slot_info[tag]; - struct asd_sas_port *sas_port = task->dev->port; u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); #if _MV_DUMP u8 *buf_cmd; @@ -368,18 +469,18 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, * DMA-map SMP request, response buffers */ sg_req = &task->smp_task.smp_req; - elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); + elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE); if (!elem) return -ENOMEM; req_len = sg_dma_len(sg_req); sg_resp = &task->smp_task.smp_resp; - elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); + elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); if (!elem) { rc = -ENOMEM; goto err_out; } - resp_len = sg_dma_len(sg_resp); + resp_len = SB_RFB_MAX; /* must be in dwords */ if ((req_len & 0x3) || (resp_len & 0x3)) { @@ -391,7 +492,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs */ - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ buf_tmp = slot->buf; buf_tmp_dma = slot->buf_dma; @@ -412,20 +513,22 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, buf_tmp += MVS_OAF_SZ; buf_tmp_dma += MVS_OAF_SZ; - /* region 3: PRD table ********************************************* */ + /* region 3: PRD table *********************************** */ buf_prd = buf_tmp; if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; - i = sizeof(struct mvs_prd) * tei->n_elem; + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; /* * Fill in TX ring and command slot header @@ -441,17 +544,14 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, hdr->data_len = 0; /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; + /* initiator, SMP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; + buf_oaf[1] = dev->linkrate & 0xf; *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); #if _MV_DUMP /* copy cmd table */ @@ -462,10 +562,10 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, return 0; err_out_2: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); err_out: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); return rc; } @@ -490,30 +590,41 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, { struct sas_task *task = tei->task; struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = + (struct mvs_device *)dev->lldd_dev; struct mvs_cmd_hdr *hdr = tei->hdr; struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; - struct scatterlist *sg; - struct mvs_prd *buf_prd; - struct mvs_port *port = tei->port; - u32 tag = tei->tag; - u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); + void *buf_prd; + u32 tag = tei->tag, hdr_tag; + u32 flags, del_q; void *buf_tmp; u8 *buf_cmd, *buf_oaf; dma_addr_t buf_tmp_dma; u32 i, req_len, resp_len; const u32 max_resp_len = SB_RFB_MAX; - if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) + if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { + mv_dprintk("Have not enough regiset for dev %d.\n", + mvi_dev->device_id); return -EBUSY; - + } slot = &mvi->slot_info[tag]; slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | - (TXQ_CMD_STP << TXQ_CMD_SHIFT) | - (sas_port->phy_mask << TXQ_PHY_SHIFT) | - (port->taskfileset << TXQ_SRS_SHIFT)); - + del_q = TXQ_MODE_I | tag | + (TXQ_CMD_STP << TXQ_CMD_SHIFT) | + (sas_port->phy_mask << TXQ_PHY_SHIFT) | + (mvi_dev->taskfileset << TXQ_SRS_SHIFT); + mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); + +#ifndef DISABLE_HOTPLUG_DMA_FIX + if (task->data_dir == DMA_FROM_DEVICE) + flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); + else + flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#else + flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#endif if (task->ata_task.use_ncq) flags |= MCH_FPDMA; if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { @@ -526,10 +637,13 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, hdr->flags = cpu_to_le32(flags); /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ - if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags)) - task->ata_task.fis.sector_count |= hdr->tags << 3; + if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); else - hdr->tags = cpu_to_le32(tag); + hdr_tag = tag; + + hdr->tags = cpu_to_le32(hdr_tag); + hdr->data_len = cpu_to_le32(task->total_xfer_len); /* @@ -558,12 +672,13 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, /* region 3: PRD table ********************************************* */ buf_prd = buf_tmp; + if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; + i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); - i = sizeof(struct mvs_prd) * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; @@ -573,6 +688,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; req_len = sizeof(struct host_to_dev_fis); resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - @@ -582,7 +699,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, resp_len = min(resp_len, max_resp_len); hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); - task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + if (likely(!task->ata_task.device_control_reg_update)) + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ /* fill in command FIS and ATAPI CDB */ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) @@ -590,30 +708,35 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, task->ata_task.atapi_packet, 16); /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + /* initiator, STP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; + buf_oaf[1] = dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } - + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); +#ifndef DISABLE_HOTPLUG_DMA_FIX + if (task->data_dir == DMA_FROM_DEVICE) + MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, + TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); +#endif return 0; } static int mvs_task_prep_ssp(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) + struct mvs_task_exec_info *tei, int is_tmf, + struct mvs_tmf_task *tmf) { struct sas_task *task = tei->task; struct mvs_cmd_hdr *hdr = tei->hdr; struct mvs_port *port = tei->port; + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = + (struct mvs_device *)dev->lldd_dev; + struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; - struct scatterlist *sg; - struct mvs_prd *buf_prd; + void *buf_prd; struct ssp_frame_hdr *ssp_hdr; void *buf_tmp; u8 *buf_cmd, *buf_oaf, fburst = 0; @@ -621,12 +744,13 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, u32 flags; u32 resp_len, req_len, i, tag = tei->tag; const u32 max_resp_len = SB_RFB_MAX; - u8 phy_mask; + u32 phy_mask; slot = &mvi->slot_info[tag]; - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : - task->dev->port->phy_mask; + phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : + sas_port->phy_mask) & TXQ_PHY_MASK; + slot->tx = mvi->tx_prod; mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | @@ -640,7 +764,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT) | (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); - hdr->tags = cpu_to_le32(tag); hdr->data_len = cpu_to_le32(task->total_xfer_len); @@ -674,13 +797,15 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, else hdr->prd_tbl = 0; - i = sizeof(struct mvs_prd) * tei->n_elem; + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - sizeof(struct mvs_err_info) - i; @@ -692,57 +817,105 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + /* initiator, SSP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; + buf_oaf[1] = dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in SSP frame header (Command Table.SSP frame header) */ ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; - ssp_hdr->frame_type = SSP_COMMAND; - memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, + + if (is_tmf) + ssp_hdr->frame_type = SSP_TASK; + else + ssp_hdr->frame_type = SSP_COMMAND; + + memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); memcpy(ssp_hdr->hashed_src_addr, - task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); ssp_hdr->tag = cpu_to_be16(tag); - /* fill in command frame IU */ + /* fill in IU for TASK and Command Frame */ buf_cmd += sizeof(*ssp_hdr); memcpy(buf_cmd, &task->ssp_task.LUN, 8); - buf_cmd[9] = fburst | task->ssp_task.task_attr | - (task->ssp_task.task_prio << 3); - memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); - /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; + if (ssp_hdr->frame_type != SSP_TASK) { + buf_cmd[9] = fburst | task->ssp_task.task_attr | + (task->ssp_task.task_prio << 3); + memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); + } else{ + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } } - + /* fill in PRD (scatter/gather) table, if any */ + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); return 0; } -int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) +#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) +static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, + struct completion *completion, int lock, + int is_tmf, struct mvs_tmf_task *tmf) { struct domain_device *dev = task->dev; - struct mvs_info *mvi = dev->port->ha->lldd_ha; - struct pci_dev *pdev = mvi->pdev; - void __iomem *regs = mvi->regs; + struct mvs_info *mvi; + struct mvs_device *mvi_dev; struct mvs_task_exec_info tei; struct sas_task *t = task; struct mvs_slot_info *slot; u32 tag = 0xdeadbeef, rc, n_elem = 0; - unsigned long flags; u32 n = num, pass = 0; + unsigned long flags = 0; - spin_lock_irqsave(&mvi->lock, flags); + if (!dev->port) { + struct task_status_struct *tsm = &t->task_status; + + tsm->resp = SAS_TASK_UNDELIVERED; + tsm->stat = SAS_PHY_DOWN; + t->task_done(t); + return 0; + } + + mvi = mvs_find_dev_mvi(task->dev); + + if (lock) + spin_lock_irqsave(&mvi->lock, flags); do { dev = t->dev; - tei.port = &mvi->port[dev->port->id]; + mvi_dev = (struct mvs_device *)dev->lldd_dev; + if (DEV_IS_GONE(mvi_dev)) { + if (mvi_dev) + mv_dprintk("device %d not ready.\n", + mvi_dev->device_id); + else + mv_dprintk("device %016llx not ready.\n", + SAS_ADDR(dev->sas_addr)); + + rc = SAS_PHY_DOWN; + goto out_done; + } + + if (dev->port->id >= mvi->chip->n_phy) + tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy]; + else + tei.port = &mvi->port[dev->port->id]; if (!tei.port->port_attached) { if (sas_protocol_ata(t->task_proto)) { + mv_dprintk("port %d does not" + "attached device.\n", dev->port->id); rc = SAS_PHY_DOWN; goto out_done; } else { @@ -759,7 +932,8 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) if (!sas_protocol_ata(t->task_proto)) { if (t->num_scatter) { - n_elem = pci_map_sg(mvi->pdev, t->scatter, + n_elem = dma_map_sg(mvi->dev, + t->scatter, t->num_scatter, t->data_dir); if (!n_elem) { @@ -776,20 +950,23 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) goto err_out; slot = &mvi->slot_info[tag]; + + t->lldd_task = NULL; slot->n_elem = n_elem; + slot->slot_tag = tag; memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + tei.task = t; tei.hdr = &mvi->slot[tag]; tei.tag = tag; tei.n_elem = n_elem; - switch (t->task_proto) { case SAS_PROTOCOL_SMP: rc = mvs_task_prep_smp(mvi, &tei); break; case SAS_PROTOCOL_SSP: - rc = mvs_task_prep_ssp(mvi, &tei); + rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: @@ -797,52 +974,61 @@ int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) rc = mvs_task_prep_ata(mvi, &tei); break; default: - dev_printk(KERN_ERR, &pdev->dev, + dev_printk(KERN_ERR, mvi->dev, "unknown sas_task proto: 0x%x\n", t->task_proto); rc = -EINVAL; break; } - if (rc) + if (rc) { + mv_dprintk("rc is %x\n", rc); goto err_out_tag; - + } slot->task = t; slot->port = tei.port; t->lldd_task = (void *) slot; - list_add_tail(&slot->list, &slot->port->list); + list_add_tail(&slot->entry, &tei.port->list); /* TODO: select normal or high priority */ - spin_lock(&t->task_state_lock); t->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock(&t->task_state_lock); mvs_hba_memory_dump(mvi, tag, t->task_proto); - + mvi_dev->runing_req++; ++pass; mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); if (n > 1) t = list_entry(t->list.next, struct sas_task, list); } while (--n); - rc = 0; goto out_done; err_out_tag: mvs_tag_free(mvi, tag); err_out: - dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); + + dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); if (!sas_protocol_ata(t->task_proto)) if (n_elem) - pci_unmap_sg(mvi->pdev, t->scatter, n_elem, + dma_unmap_sg(mvi->dev, t->scatter, n_elem, t->data_dir); out_done: - if (pass) - mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); - spin_unlock_irqrestore(&mvi->lock, flags); + if (likely(pass)) { + MVS_CHIP_DISP->start_delivery(mvi, + (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); + } + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); return rc; } +int mvs_queue_command(struct sas_task *task, const int num, + gfp_t gfp_flags) +{ + return mvs_task_exec(task, num, gfp_flags, NULL, 1, 0, NULL); +} + static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; @@ -852,16 +1038,18 @@ static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, struct mvs_slot_info *slot, u32 slot_idx) { + if (!slot->task) + return; if (!sas_protocol_ata(task->task_proto)) if (slot->n_elem) - pci_unmap_sg(mvi->pdev, task->scatter, + dma_unmap_sg(mvi->dev, task->scatter, slot->n_elem, task->data_dir); switch (task->task_proto) { case SAS_PROTOCOL_SMP: - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, + dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, + dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); break; @@ -872,10 +1060,12 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, /* do nothing */ break; } - list_del(&slot->list); + list_del_init(&slot->entry); task->lldd_task = NULL; slot->task = NULL; slot->port = NULL; + slot->slot_tag = 0xFFFFFFFF; + mvs_slot_free(mvi, slot_idx); } static void mvs_update_wideport(struct mvs_info *mvi, int i) @@ -884,25 +1074,28 @@ static void mvs_update_wideport(struct mvs_info *mvi, int i) struct mvs_port *port = phy->port; int j, no; - for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) - if (no & 1) { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, + for_each_phy(port->wide_port_phymap, j, no) { + if (j & 1) { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, + PHYR_WIDE_PORT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, port->wide_port_phymap); } else { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, 0); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, + PHYR_WIDE_PORT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, + 0); } + } } static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) { u32 tmp; struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port;; - - tmp = mvs_read_phy_ctl(mvi, i); + struct mvs_port *port = phy->port; + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { if (!port) phy->phy_attached = 1; @@ -917,7 +1110,6 @@ static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) mvs_update_wideport(mvi, i); } else if (phy->phy_type & PORT_TYPE_SATA) port->port_attached = 0; - mvs_free_reg_set(mvi, phy->port); phy->port = NULL; phy->phy_attached = 0; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); @@ -932,17 +1124,21 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) if (!s) return NULL; - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); - s[3] = mvs_read_port_cfg_data(mvi, i); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); + s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); + s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); - s[2] = mvs_read_port_cfg_data(mvi, i); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); + s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); - s[1] = mvs_read_port_cfg_data(mvi, i); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); + s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); - s[0] = mvs_read_port_cfg_data(mvi, i); + /* Workaround: take some ATAPI devices for ATA */ + if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) + s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); return (void *)s; } @@ -952,56 +1148,53 @@ static u32 mvs_is_sig_fis_received(u32 irq_status) return irq_status & PHYEV_SIG_FIS; } -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, - int get_st) +void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) { struct mvs_phy *phy = &mvi->phy[i]; - struct pci_dev *pdev = mvi->pdev; - u32 tmp; - u64 tmp64; - - mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); - phy->dev_info = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); - phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; + struct sas_identify_frame *id; - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); - phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + id = (struct sas_identify_frame *)phy->frame_rcvd; if (get_st) { - phy->irq_status = mvs_read_port_irq_stat(mvi, i); + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); phy->phy_status = mvs_is_phy_ready(mvi, i); } if (phy->phy_status) { - u32 phy_st; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - - mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); - phy_st = mvs_read_port_cfg_data(mvi, i); - - sas_phy->linkrate = - (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; - phy->minimum_linkrate = - (phy->phy_status & - PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; - phy->maximum_linkrate = - (phy->phy_status & - PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; + int oob_done = 0; + struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; - if (phy->phy_type & PORT_TYPE_SAS) { - /* Updated attached_sas_addr */ - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); - phy->att_dev_sas_addr = - (u64) mvs_read_port_cfg_data(mvi, i) << 32; - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); - phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); - phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); + oob_done = MVS_CHIP_DISP->oob_done(mvi, i); + + MVS_CHIP_DISP->fix_phy_info(mvi, i, id); + if (phy->phy_type & PORT_TYPE_SATA) { + phy->identify.target_port_protocols = SAS_PROTOCOL_STP; + if (mvs_is_sig_fis_received(phy->irq_status)) { + phy->phy_attached = 1; + phy->att_dev_sas_addr = + i + mvi->id * mvi->chip->n_phy; + if (oob_done) + sas_phy->oob_mode = SATA_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct dev_to_host_fis); + mvs_get_d2h_reg(mvi, i, (void *)id); + } else { + u32 tmp; + dev_printk(KERN_DEBUG, mvi->dev, + "Phy%d : No sig fis\n", i); + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); + MVS_CHIP_DISP->write_port_irq_mask(mvi, i, + tmp | PHYEV_SIG_FIS); + phy->phy_attached = 0; + phy->phy_type &= ~PORT_TYPE_SATA; + MVS_CHIP_DISP->phy_reset(mvi, i, 0); + goto out_done; + } + } else if (phy->phy_type & PORT_TYPE_SAS + || phy->att_dev_info & PORT_SSP_INIT_MASK) { + phy->phy_attached = 1; phy->identify.device_type = - phy->att_dev_info & PORT_DEV_TYPE_MASK; + phy->att_dev_info & PORT_DEV_TYPE_MASK; if (phy->identify.device_type == SAS_END_DEV) phy->identify.target_port_protocols = @@ -1009,93 +1202,522 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i, else if (phy->identify.device_type != NO_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; - if (phy_st & PHY_OOB_DTCTD) + if (oob_done) sas_phy->oob_mode = SAS_OOB_MODE; phy->frame_rcvd_size = sizeof(struct sas_identify_frame); - } else if (phy->phy_type & PORT_TYPE_SATA) { - phy->identify.target_port_protocols = SAS_PROTOCOL_STP; - if (mvs_is_sig_fis_received(phy->irq_status)) { - phy->att_dev_sas_addr = i; /* temp */ - if (phy_st & PHY_OOB_DTCTD) - sas_phy->oob_mode = SATA_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct dev_to_host_fis); - mvs_get_d2h_reg(mvi, i, - (void *)sas_phy->frame_rcvd); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "No sig fis\n"); - phy->phy_type &= ~(PORT_TYPE_SATA); - goto out_done; - } } - tmp64 = cpu_to_be64(phy->att_dev_sas_addr); - memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); - - dev_printk(KERN_DEBUG, &pdev->dev, - "phy[%d] Get Attached Address 0x%llX ," - " SAS Address 0x%llX\n", - i, - (unsigned long long)phy->att_dev_sas_addr, - (unsigned long long)phy->dev_sas_addr); - dev_printk(KERN_DEBUG, &pdev->dev, - "Rate = %x , type = %d\n", - sas_phy->linkrate, phy->phy_type); - - /* workaround for HW phy decoding error on 1.5g disk drive */ - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); - tmp = mvs_read_port_vsr_data(mvi, i); - if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == - SAS_LINK_RATE_1_5_GBPS) - tmp &= ~PHY_MODE6_LATECLK; - else - tmp |= PHY_MODE6_LATECLK; - mvs_write_port_vsr_data(mvi, i, tmp); + memcpy(sas_phy->attached_sas_addr, + &phy->att_dev_sas_addr, SAS_ADDR_SIZE); + if (MVS_CHIP_DISP->phy_work_around) + MVS_CHIP_DISP->phy_work_around(mvi, i); } + mv_dprintk("port %d attach dev info is %x\n", + i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); + mv_dprintk("port %d attach sas addr is %llx\n", + i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); out_done: if (get_st) - mvs_write_port_irq_stat(mvi, i, phy->irq_status); + MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); } -void mvs_port_formed(struct asd_sas_phy *sas_phy) +static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) { struct sas_ha_struct *sas_ha = sas_phy->ha; - struct mvs_info *mvi = sas_ha->lldd_ha; - struct asd_sas_port *sas_port = sas_phy->port; + struct mvs_info *mvi = NULL; int i = 0, hi; struct mvs_phy *phy = sas_phy->lldd_phy; - struct mvs_port *port = &mvi->port[sas_port->id]; - unsigned long flags; + struct asd_sas_port *sas_port = sas_phy->port; + struct mvs_port *port; + unsigned long flags = 0; + if (!sas_port) + return; - spin_lock_irqsave(&mvi->lock, flags); + while (sas_ha->sas_phy[i]) { + if (sas_ha->sas_phy[i] == sas_phy) + break; + i++; + } + hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; + if (sas_port->id >= mvi->chip->n_phy) + port = &mvi->port[sas_port->id - mvi->chip->n_phy]; + else + port = &mvi->port[sas_port->id]; + if (lock) + spin_lock_irqsave(&mvi->lock, flags); port->port_attached = 1; phy->port = port; - port->taskfileset = MVS_ID_NOT_MAPPED; if (phy->phy_type & PORT_TYPE_SAS) { port->wide_port_phymap = sas_port->phy_mask; + mv_printk("set wide port phy map %x\n", sas_port->phy_mask); mvs_update_wideport(mvi, sas_phy->id); } - spin_unlock_irqrestore(&mvi->lock, flags); + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); +} + +static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) +{ + /*Nothing*/ +} + + +void mvs_port_formed(struct asd_sas_phy *sas_phy) +{ + mvs_port_notify_formed(sas_phy, 1); +} + +void mvs_port_deformed(struct asd_sas_phy *sas_phy) +{ + mvs_port_notify_deformed(sas_phy, 1); +} + +struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) +{ + u32 dev; + for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { + if (mvi->devices[dev].dev_type == NO_DEVICE) { + mvi->devices[dev].device_id = dev; + return &mvi->devices[dev]; + } + } + + if (dev == MVS_MAX_DEVICES) + mv_printk("max support %d devices, ignore ..\n", + MVS_MAX_DEVICES); + + return NULL; +} + +void mvs_free_dev(struct mvs_device *mvi_dev) +{ + u32 id = mvi_dev->device_id; + memset(mvi_dev, 0, sizeof(*mvi_dev)); + mvi_dev->device_id = id; + mvi_dev->dev_type = NO_DEVICE; + mvi_dev->dev_status = MVS_DEV_NORMAL; + mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; +} + +int mvs_dev_found_notify(struct domain_device *dev, int lock) +{ + unsigned long flags = 0; + int res = 0; + struct mvs_info *mvi = NULL; + struct domain_device *parent_dev = dev->parent; + struct mvs_device *mvi_device; + + mvi = mvs_find_dev_mvi(dev); + + if (lock) + spin_lock_irqsave(&mvi->lock, flags); + + mvi_device = mvs_alloc_dev(mvi); + if (!mvi_device) { + res = -1; + goto found_out; + } + dev->lldd_dev = (void *)mvi_device; + mvi_device->dev_type = dev->dev_type; + + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { + int phy_id; + u8 phy_num = parent_dev->ex_dev.num_phys; + struct ex_phy *phy; + for (phy_id = 0; phy_id < phy_num; phy_id++) { + phy = &parent_dev->ex_dev.ex_phy[phy_id]; + if (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(dev->sas_addr)) { + mvi_device->attached_phy = phy_id; + break; + } + } + + if (phy_id == phy_num) { + mv_printk("Error: no attached dev:%016llx" + "at ex:%016llx.\n", + SAS_ADDR(dev->sas_addr), + SAS_ADDR(parent_dev->sas_addr)); + res = -1; + } + } + +found_out: + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); + return res; +} + +int mvs_dev_found(struct domain_device *dev) +{ + return mvs_dev_found_notify(dev, 1); +} + +void mvs_dev_gone_notify(struct domain_device *dev, int lock) +{ + unsigned long flags = 0; + struct mvs_info *mvi; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + + mvi = mvs_find_dev_mvi(dev); + + if (lock) + spin_lock_irqsave(&mvi->lock, flags); + + if (mvi_dev) { + mv_dprintk("found dev[%d:%x] is gone.\n", + mvi_dev->device_id, mvi_dev->dev_type); + mvs_free_reg_set(mvi, mvi_dev); + mvs_free_dev(mvi_dev); + } else { + mv_dprintk("found dev has gone.\n"); + } + dev->lldd_dev = NULL; + + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); +} + + +void mvs_dev_gone(struct domain_device *dev) +{ + mvs_dev_gone_notify(dev, 1); +} + +static struct sas_task *mvs_alloc_task(void) +{ + struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL); + + if (task) { + INIT_LIST_HEAD(&task->list); + spin_lock_init(&task->task_state_lock); + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_timer(&task->timer); + init_completion(&task->completion); + } + return task; +} + +static void mvs_free_task(struct sas_task *task) +{ + if (task) { + BUG_ON(!list_empty(&task->list)); + kfree(task); + } +} + +static void mvs_task_done(struct sas_task *task) +{ + if (!del_timer(&task->timer)) + return; + complete(&task->completion); +} + +static void mvs_tmf_timedout(unsigned long data) +{ + struct sas_task *task = (struct sas_task *)data; + + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + complete(&task->completion); +} + +/* XXX */ +#define MVS_TASK_TIMEOUT 20 +static int mvs_exec_internal_tmf_task(struct domain_device *dev, + void *parameter, u32 para_len, struct mvs_tmf_task *tmf) +{ + int res, retry; + struct sas_task *task = NULL; + + for (retry = 0; retry < 3; retry++) { + task = mvs_alloc_task(); + if (!task) + return -ENOMEM; + + task->dev = dev; + task->task_proto = dev->tproto; + + memcpy(&task->ssp_task, parameter, para_len); + task->task_done = mvs_task_done; + + task->timer.data = (unsigned long) task; + task->timer.function = mvs_tmf_timedout; + task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; + add_timer(&task->timer); + + res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 0, 1, tmf); + + if (res) { + del_timer(&task->timer); + mv_printk("executing internel task failed:%d\n", res); + goto ex_err; + } + + wait_for_completion(&task->completion); + res = -TMF_RESP_FUNC_FAILED; + /* Even TMF timed out, return direct. */ + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + mv_printk("TMF task[%x] timeout.\n", tmf->tmf); + goto ex_err; + } + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAM_GOOD) { + res = TMF_RESP_FUNC_COMPLETE; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_UNDERRUN) { + /* no error, but return the number of bytes of + * underrun */ + res = task->task_status.residual; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_OVERRUN) { + mv_dprintk("blocked task error.\n"); + res = -EMSGSIZE; + break; + } else { + mv_dprintk(" task to dev %016llx response: 0x%x " + "status 0x%x\n", + SAS_ADDR(dev->sas_addr), + task->task_status.resp, + task->task_status.stat); + mvs_free_task(task); + task = NULL; + + } + } +ex_err: + BUG_ON(retry == 3 && task != NULL); + if (task != NULL) + mvs_free_task(task); + return res; +} + +static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, + u8 *lun, struct mvs_tmf_task *tmf) +{ + struct sas_ssp_task ssp_task; + DECLARE_COMPLETION_ONSTACK(completion); + if (!(dev->tproto & SAS_PROTOCOL_SSP)) + return TMF_RESP_FUNC_ESUPP; + + strncpy((u8 *)&ssp_task.LUN, lun, 8); + + return mvs_exec_internal_tmf_task(dev, &ssp_task, + sizeof(ssp_task), tmf); +} + + +/* Standard mandates link reset for ATA (type 0) + and hard reset for SSP (type 1) , only for RECOVERY */ +static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) +{ + int rc; + struct sas_phy *phy = sas_find_local_phy(dev); + int reset_type = (dev->dev_type == SATA_DEV || + (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; + rc = sas_phy_reset(phy, reset_type); + msleep(2000); + return rc; +} + +/* mandatory SAM-3 */ +int mvs_lu_reset(struct domain_device *dev, u8 *lun) +{ + unsigned long flags; + int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; + + tmf_task.tmf = TMF_LU_RESET; + mvi_dev->dev_status = MVS_DEV_EH; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); + if (rc == TMF_RESP_FUNC_COMPLETE) { + num = mvs_find_dev_phyno(dev, phyno); + spin_lock_irqsave(&mvi->lock, flags); + for (i = 0; i < num; i++) + mvs_release_task(mvi, phyno[i], dev); + spin_unlock_irqrestore(&mvi->lock, flags); + } + /* If failed, fall-through I_T_Nexus reset */ + mv_printk("%s for device[%x]:rc= %d\n", __func__, + mvi_dev->device_id, rc); + return rc; } int mvs_I_T_nexus_reset(struct domain_device *dev) { - return TMF_RESP_FUNC_FAILED; + unsigned long flags; + int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + + if (mvi_dev->dev_status != MVS_DEV_EH) + return TMF_RESP_FUNC_COMPLETE; + rc = mvs_debug_I_T_nexus_reset(dev); + mv_printk("%s for device[%x]:rc= %d\n", + __func__, mvi_dev->device_id, rc); + + /* housekeeper */ + num = mvs_find_dev_phyno(dev, phyno); + spin_lock_irqsave(&mvi->lock, flags); + for (i = 0; i < num; i++) + mvs_release_task(mvi, phyno[i], dev); + spin_unlock_irqrestore(&mvi->lock, flags); + + return rc; +} +/* optional SAM-3 */ +int mvs_query_task(struct sas_task *task) +{ + u32 tag; + struct scsi_lun lun; + struct mvs_tmf_task tmf_task; + int rc = TMF_RESP_FUNC_FAILED; + + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; + struct domain_device *dev = task->dev; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + + int_to_scsilun(cmnd->device->lun, &lun); + rc = mvs_find_tag(mvi, task, &tag); + if (rc == 0) { + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + + tmf_task.tmf = TMF_QUERY_TASK; + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); + + rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); + switch (rc) { + /* The task is still in Lun, release it then */ + case TMF_RESP_FUNC_SUCC: + /* The task is not in Lun or failed, reset the phy */ + case TMF_RESP_FUNC_FAILED: + case TMF_RESP_FUNC_COMPLETE: + break; + } + } + mv_printk("%s:rc= %d\n", __func__, rc); + return rc; +} + +/* mandatory SAM-3, still need free task/slot info */ +int mvs_abort_task(struct sas_task *task) +{ + struct scsi_lun lun; + struct mvs_tmf_task tmf_task; + struct domain_device *dev = task->dev; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + int rc = TMF_RESP_FUNC_FAILED; + unsigned long flags; + u32 tag; + if (mvi->exp_req) + mvi->exp_req--; + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + rc = TMF_RESP_FUNC_COMPLETE; + goto out; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; + + int_to_scsilun(cmnd->device->lun, &lun); + rc = mvs_find_tag(mvi, task, &tag); + if (rc == 0) { + mv_printk("No such tag in %s\n", __func__); + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + + tmf_task.tmf = TMF_ABORT_TASK; + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); + + rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); + + /* if successful, clear the task and callback forwards.*/ + if (rc == TMF_RESP_FUNC_COMPLETE) { + u32 slot_no; + struct mvs_slot_info *slot; + struct mvs_info *mvi = mvs_find_dev_mvi(dev); + + if (task->lldd_task) { + slot = (struct mvs_slot_info *)task->lldd_task; + slot_no = (u32) (slot - mvi->slot_info); + mvs_slot_complete(mvi, slot_no, 1); + } + } + } else if (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP) { + /* to do free register_set */ + } else { + /* SMP */ + + } +out: + if (rc != TMF_RESP_FUNC_COMPLETE) + mv_printk("%s:rc= %d\n", __func__, rc); + return rc; +} + +int mvs_abort_task_set(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; + + tmf_task.tmf = TMF_ABORT_TASK_SET; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); + + return rc; +} + +int mvs_clear_aca(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; + + tmf_task.tmf = TMF_CLEAR_ACA; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); + + return rc; +} + +int mvs_clear_task_set(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; + + tmf_task.tmf = TMF_CLEAR_TASK_SET; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); + + return rc; } static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, u32 slot_idx, int err) { - struct mvs_port *port = mvi->slot_info[slot_idx].port; + struct mvs_device *mvi_dev = (struct mvs_device *)task->dev->lldd_dev; struct task_status_struct *tstat = &task->task_status; struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; int stat = SAM_GOOD; + resp->frame_len = sizeof(struct dev_to_host_fis); memcpy(&resp->ending_fis[0], - SATA_RECEIVED_D2H_FIS(port->taskfileset), + SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), sizeof(struct dev_to_host_fis)); tstat->buf_valid_size = sizeof(*resp); if (unlikely(err)) @@ -1107,75 +1729,104 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, u32 slot_idx) { struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + int stat; u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); - u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); - int stat = SAM_CHECK_COND; + u32 tfs = 0; + enum mvs_port_type type = PORT_TYPE_SAS; - if (err_dw1 & SLOT_BSY_ERR) { - stat = SAS_QUEUE_FULL; - mvs_slot_reset(mvi, task, slot_idx); - } + if (err_dw0 & CMD_ISS_STPD) + MVS_CHIP_DISP->issue_stop(mvi, type, tfs); + + MVS_CHIP_DISP->command_active(mvi, slot_idx); + + stat = SAM_CHECK_COND; switch (task->task_proto) { case SAS_PROTOCOL_SSP: + stat = SAS_ABORTED_TASK; break; case SAS_PROTOCOL_SMP: + stat = SAM_CHECK_COND; break; + case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - if (err_dw0 & TFILE_ERR) - stat = mvs_sata_done(mvi, task, slot_idx, 1); + { + if (err_dw0 == 0x80400002) + mv_printk("find reserved error, why?\n"); + + task->ata_task.use_ncq = 0; + stat = SAS_PROTO_RESPONSE; + mvs_sata_done(mvi, task, slot_idx, 1); + + } break; default: break; } - mvs_hexdump(16, (u8 *) slot->response, 0); return stat; } -static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) +int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; struct sas_task *task = slot->task; + struct mvs_device *mvi_dev = NULL; struct task_status_struct *tstat; - struct mvs_port *port; + bool aborted; void *to; + enum exec_status sts; + if (mvi->exp_req) + mvi->exp_req--; if (unlikely(!task || !task->lldd_task)) return -1; + tstat = &task->task_status; + mvi_dev = (struct mvs_device *)task->dev->lldd_dev; + mvs_hba_cq_dump(mvi); spin_lock(&task->task_state_lock); + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + task->task_state_flags |= SAS_TASK_STATE_DONE; + /* race condition*/ aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; - if (!aborted) { - task->task_state_flags &= - ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); - task->task_state_flags |= SAS_TASK_STATE_DONE; - } spin_unlock(&task->task_state_lock); - if (aborted) { + memset(tstat, 0, sizeof(*tstat)); + tstat->resp = SAS_TASK_COMPLETE; + + if (unlikely(aborted)) { + tstat->stat = SAS_ABORTED_TASK; + if (mvi_dev) + mvi_dev->runing_req--; + if (sas_protocol_ata(task->task_proto)) + mvs_free_reg_set(mvi, mvi_dev); + mvs_slot_task_free(mvi, task, slot, slot_idx); - mvs_slot_free(mvi, rx_desc); return -1; } - port = slot->port; - tstat = &task->task_status; - memset(tstat, 0, sizeof(*tstat)); - tstat->resp = SAS_TASK_COMPLETE; - - if (unlikely(!port->port_attached || flags)) { - mvs_slot_err(mvi, task, slot_idx); - if (!sas_protocol_ata(task->task_proto)) - tstat->stat = SAS_PHY_DOWN; + if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) { + mv_dprintk("port has not device.\n"); + tstat->stat = SAS_PHY_DOWN; goto out; } + /* + if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) { + mv_dprintk("Find device[%016llx] RXQ_ERR %X, + err info:%016llx\n", + SAS_ADDR(task->dev->sas_addr), + rx_desc, (u64)(*(u64 *) slot->response)); + } + */ + /* error info record present */ if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { tstat->stat = mvs_slot_err(mvi, task, slot_idx); @@ -1191,13 +1842,10 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) } /* response frame present */ else if (rx_desc & RXQ_RSP) { - struct ssp_response_iu *iu = - slot->response + sizeof(struct mvs_err_info); - sas_ssp_task_response(&mvi->pdev->dev, task, iu); - } - - /* should never happen? */ - else + struct ssp_response_iu *iu = slot->response + + sizeof(struct mvs_err_info); + sas_ssp_task_response(mvi->dev, task, iu); + } else tstat->stat = SAM_CHECK_COND; break; @@ -1225,105 +1873,245 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) } out: + if (mvi_dev) + mvi_dev->runing_req--; + if (sas_protocol_ata(task->task_proto)) + mvs_free_reg_set(mvi, mvi_dev); + mvs_slot_task_free(mvi, task, slot, slot_idx); - if (unlikely(tstat->stat != SAS_QUEUE_FULL)) - mvs_slot_free(mvi, rx_desc); + sts = tstat->stat; spin_unlock(&mvi->lock); - task->task_done(task); + if (task->task_done) + task->task_done(task); + else + mv_dprintk("why has not task_done.\n"); spin_lock(&mvi->lock); - return tstat->stat; + + return sts; } -static void mvs_release_task(struct mvs_info *mvi, int phy_no) +void mvs_release_task(struct mvs_info *mvi, + int phy_no, struct domain_device *dev) { - struct list_head *pos, *n; - struct mvs_slot_info *slot; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct mvs_port *port = phy->port; - u32 rx_desc; + int i = 0; u32 slot_idx; + struct mvs_phy *phy; + struct mvs_port *port; + struct mvs_slot_info *slot, *slot2; + phy = &mvi->phy[phy_no]; + port = phy->port; if (!port) return; - list_for_each_safe(pos, n, &port->list) { - slot = container_of(pos, struct mvs_slot_info, list); - rx_desc = (u32) (slot - mvi->slot_info); - mvs_slot_complete(mvi, rx_desc, 1); + list_for_each_entry_safe(slot, slot2, &port->list, entry) { + struct sas_task *task; + slot_idx = (u32) (slot - mvi->slot_info); + task = slot->task; + + if (dev && task->dev != dev) + continue; + + mv_printk("Release slot [%x] tag[%x], task [%p]:\n", + slot_idx, slot->slot_tag, task); + + if (task->task_proto & SAS_PROTOCOL_SSP) { + mv_printk("attached with SSP task CDB["); + for (i = 0; i < 16; i++) + mv_printk(" %02x", task->ssp_task.cdb[i]); + mv_printk(" ]\n"); + } + + mvs_slot_complete(mvi, slot_idx, 1); + } +} + +static void mvs_phy_disconnected(struct mvs_phy *phy) +{ + phy->phy_attached = 0; + phy->att_dev_info = 0; + phy->att_dev_sas_addr = 0; +} + +static void mvs_work_queue(struct work_struct *work) +{ + struct delayed_work *dw = container_of(work, struct delayed_work, work); + struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); + struct mvs_info *mvi = mwq->mvi; + unsigned long flags; + + spin_lock_irqsave(&mvi->lock, flags); + if (mwq->handler & PHY_PLUG_EVENT) { + u32 phy_no = (unsigned long) mwq->data; + struct sas_ha_struct *sas_ha = mvi->sas; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (phy->phy_event & PHY_PLUG_OUT) { + u32 tmp; + struct sas_identify_frame *id; + id = (struct sas_identify_frame *)phy->frame_rcvd; + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); + phy->phy_event &= ~PHY_PLUG_OUT; + if (!(tmp & PHY_READY_MASK)) { + sas_phy_disconnected(sas_phy); + mvs_phy_disconnected(phy); + sas_ha->notify_phy_event(sas_phy, + PHYE_LOSS_OF_SIGNAL); + mv_dprintk("phy%d Removed Device\n", phy_no); + } else { + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); + mvs_update_phyinfo(mvi, phy_no, 1); + mvs_bytes_dmaed(mvi, phy_no); + mvs_port_notify_formed(sas_phy, 0); + mv_dprintk("phy%d Attached Device\n", phy_no); + } + } + } + list_del(&mwq->entry); + spin_unlock_irqrestore(&mvi->lock, flags); + kfree(mwq); +} + +static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) +{ + struct mvs_wq *mwq; + int ret = 0; + + mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); + if (mwq) { + mwq->mvi = mvi; + mwq->data = data; + mwq->handler = handler; + MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); + list_add_tail(&mwq->entry, &mvi->wq_list); + schedule_delayed_work(&mwq->work_q, HZ * 2); + } else + ret = -ENOMEM; + + return ret; +} + +static void mvs_sig_time_out(unsigned long tphy) +{ + struct mvs_phy *phy = (struct mvs_phy *)tphy; + struct mvs_info *mvi = phy->mvi; + u8 phy_no; + + for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { + if (&mvi->phy[phy_no] == phy) { + mv_dprintk("Get signature time out, reset phy %d\n", + phy_no+mvi->id*mvi->chip->n_phy); + MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); + } } } -static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) +static void mvs_sig_remove_timer(struct mvs_phy *phy) { - struct pci_dev *pdev = mvi->pdev; - struct sas_ha_struct *sas_ha = &mvi->sas; + if (phy->timer.function) + del_timer(&phy->timer); + phy->timer.function = NULL; +} + +void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) +{ + u32 tmp; + struct sas_ha_struct *sas_ha = mvi->sas; struct mvs_phy *phy = &mvi->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; - phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); + mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, + MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); + mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, + phy->irq_status); + /* * events is port event now , * we need check the interrupt status which belongs to per port. */ - dev_printk(KERN_DEBUG, &pdev->dev, - "Port %d Event = %X\n", - phy_no, phy->irq_status); - - if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { - mvs_release_task(mvi, phy_no); - if (!mvs_is_phy_ready(mvi, phy_no)) { - sas_phy_disconnected(sas_phy); - sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); - dev_printk(KERN_INFO, &pdev->dev, - "Port %d Unplug Notice\n", phy_no); - } else - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); - } - if (!(phy->irq_status & PHYEV_DEC_ERR)) { - if (phy->irq_status & PHYEV_COMWAKE) { - u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); - mvs_write_port_irq_mask(mvi, phy_no, - tmp | PHYEV_SIG_FIS); + if (phy->irq_status & PHYEV_DCDR_ERR) + mv_dprintk("port %d STP decoding error.\n", + phy_no+mvi->id*mvi->chip->n_phy); + + if (phy->irq_status & PHYEV_POOF) { + if (!(phy->phy_event & PHY_PLUG_OUT)) { + int dev_sata = phy->phy_type & PORT_TYPE_SATA; + int ready; + mvs_release_task(mvi, phy_no, NULL); + phy->phy_event |= PHY_PLUG_OUT; + mvs_handle_event(mvi, + (void *)(unsigned long)phy_no, + PHY_PLUG_EVENT); + ready = mvs_is_phy_ready(mvi, phy_no); + if (!ready) + mv_dprintk("phy%d Unplug Notice\n", + phy_no + + mvi->id * mvi->chip->n_phy); + if (ready || dev_sata) { + if (MVS_CHIP_DISP->stp_reset) + MVS_CHIP_DISP->stp_reset(mvi, + phy_no); + else + MVS_CHIP_DISP->phy_reset(mvi, + phy_no, 0); + return; + } } - if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { - phy->phy_status = mvs_is_phy_ready(mvi, phy_no); - if (phy->phy_status) { - mvs_detect_porttype(mvi, phy_no); - - if (phy->phy_type & PORT_TYPE_SATA) { - u32 tmp = mvs_read_port_irq_mask(mvi, - phy_no); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_mask(mvi, - phy_no, tmp); - } + } - mvs_update_phyinfo(mvi, phy_no, 0); - sas_ha->notify_phy_event(sas_phy, - PHYE_OOB_DONE); - mvs_bytes_dmaed(mvi, phy_no); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "plugin interrupt but phy is gone\n"); - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, - NULL); + if (phy->irq_status & PHYEV_COMWAKE) { + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); + MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, + tmp | PHYEV_SIG_FIS); + if (phy->timer.function == NULL) { + phy->timer.data = (unsigned long)phy; + phy->timer.function = mvs_sig_time_out; + phy->timer.expires = jiffies + 10*HZ; + add_timer(&phy->timer); + } + } + if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { + phy->phy_status = mvs_is_phy_ready(mvi, phy_no); + mvs_sig_remove_timer(phy); + mv_dprintk("notify plug in on phy[%d]\n", phy_no); + if (phy->phy_status) { + mdelay(10); + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); + if (phy->phy_type & PORT_TYPE_SATA) { + tmp = MVS_CHIP_DISP->read_port_irq_mask( + mvi, phy_no); + tmp &= ~PHYEV_SIG_FIS; + MVS_CHIP_DISP->write_port_irq_mask(mvi, + phy_no, tmp); + } + mvs_update_phyinfo(mvi, phy_no, 0); + mvs_bytes_dmaed(mvi, phy_no); + /* whether driver is going to handle hot plug */ + if (phy->phy_event & PHY_PLUG_OUT) { + mvs_port_notify_formed(sas_phy, 0); + phy->phy_event &= ~PHY_PLUG_OUT; } - } else if (phy->irq_status & PHYEV_BROAD_CH) { - mvs_release_task(mvi, phy_no); - sas_ha->notify_port_event(sas_phy, - PORTE_BROADCAST_RCVD); + } else { + mv_dprintk("plugin interrupt but phy%d is gone\n", + phy_no + mvi->id*mvi->chip->n_phy); } + } else if (phy->irq_status & PHYEV_BROAD_CH) { + mv_dprintk("port %d broadcast change.\n", + phy_no + mvi->id*mvi->chip->n_phy); + /* exception for Samsung disk drive*/ + mdelay(1000); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); } - mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); + MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); } -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) +int mvs_int_rx(struct mvs_info *mvi, bool self_clear) { - void __iomem *regs = mvi->regs; u32 rx_prod_idx, rx_desc; bool attn = false; - struct pci_dev *pdev = mvi->pdev; /* the first dword in the RX ring is special: it contains * a mirror of the hardware's RX producer index, so that @@ -1339,480 +2127,31 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) * note: if coalescing is enabled, * it will need to read from register every time for sure */ - if (mvi->rx_cons == rx_prod_idx) - mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; + if (unlikely(mvi->rx_cons == rx_prod_idx)) + mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; if (mvi->rx_cons == rx_prod_idx) return 0; while (mvi->rx_cons != rx_prod_idx) { - /* increment our internal RX consumer pointer */ rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); - rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); if (likely(rx_desc & RXQ_DONE)) mvs_slot_complete(mvi, rx_desc, 0); if (rx_desc & RXQ_ATTN) { attn = true; - dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", - rx_desc); } else if (rx_desc & RXQ_ERR) { if (!(rx_desc & RXQ_DONE)) mvs_slot_complete(mvi, rx_desc, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", - rx_desc); } else if (rx_desc & RXQ_SLOT_RESET) { - dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", - rx_desc); mvs_slot_free(mvi, rx_desc); } } if (attn && self_clear) - mvs_int_full(mvi); - + MVS_CHIP_DISP->int_full(mvi); return 0; } -#ifndef MVS_DISABLE_NVRAM -static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) -{ - int timeout = 1000; - - if (addr & ~SPI_ADDR_MASK) - return -EINVAL; - - writel(addr, regs + SPI_CMD); - writel(TWSI_RD, regs + SPI_CTL); - - while (timeout-- > 0) { - if (readl(regs + SPI_CTL) & TWSI_RDY) { - *data = readl(regs + SPI_DATA); - return 0; - } - - udelay(10); - } - - return -EBUSY; -} - -static int mvs_eep_read_buf(void __iomem *regs, u32 addr, - void *buf, u32 buflen) -{ - u32 addr_end, tmp_addr, i, j; - u32 tmp = 0; - int rc; - u8 *tmp8, *buf8 = buf; - - addr_end = addr + buflen; - tmp_addr = ALIGN(addr, 4); - if (addr > 0xff) - return -EINVAL; - - j = addr & 0x3; - if (j) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - tmp8 = (u8 *)&tmp; - for (i = j; i < 4; i++) - *buf8++ = tmp8[i]; - - tmp_addr += 4; - } - - for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - memcpy(buf8, &tmp, 4); - buf8 += 4; - } - - if (tmp_addr < addr_end) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - tmp8 = (u8 *)&tmp; - j = addr_end - tmp_addr; - for (i = 0; i < j; i++) - *buf8++ = tmp8[i]; - - tmp_addr += 4; - } - - return 0; -} -#endif - -int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen) -{ -#ifndef MVS_DISABLE_NVRAM - void __iomem *regs = mvi->regs; - int rc, i; - u32 sum; - u8 hdr[2], *tmp; - const char *msg; - - rc = mvs_eep_read_buf(regs, addr, &hdr, 2); - if (rc) { - msg = "nvram hdr read failed"; - goto err_out; - } - rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); - if (rc) { - msg = "nvram read failed"; - goto err_out; - } - - if (hdr[0] != 0x5A) { - /* entry id */ - msg = "invalid nvram entry id"; - rc = -ENOENT; - goto err_out; - } - - tmp = buf; - sum = ((u32)hdr[0]) + ((u32)hdr[1]); - for (i = 0; i < buflen; i++) - sum += ((u32)tmp[i]); - - if (sum) { - msg = "nvram checksum failure"; - rc = -EILSEQ; - goto err_out; - } - - return 0; - -err_out: - dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); - return rc; -#else - /* FIXME , For SAS target mode */ - memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); - return 0; -#endif -} - -static void mvs_int_sata(struct mvs_info *mvi) -{ - u32 tmp; - void __iomem *regs = mvi->regs; - tmp = mr32(INT_STAT_SRS); - mw32(INT_STAT_SRS, tmp & 0xFFFF); -} - -static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) -{ - void __iomem *regs = mvi->regs; - struct domain_device *dev = task->dev; - struct asd_sas_port *sas_port = dev->port; - struct mvs_port *port = mvi->slot_info[slot_idx].port; - u32 reg_set, phy_mask; - - if (!sas_protocol_ata(task->task_proto)) { - reg_set = 0; - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : - sas_port->phy_mask; - } else { - reg_set = port->taskfileset; - phy_mask = sas_port->phy_mask; - } - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | - (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | - (phy_mask << TXQ_PHY_SHIFT) | - (reg_set << TXQ_SRS_SHIFT)); - - mw32(TX_PROD_IDX, mvi->tx_prod); - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); -} - -void mvs_int_full(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp, stat; - int i; - - stat = mr32(INT_STAT); - - mvs_int_rx(mvi, false); - - for (i = 0; i < MVS_MAX_PORTS; i++) { - tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); - if (tmp) - mvs_int_port(mvi, i, tmp); - } - - if (stat & CINT_SRS) - mvs_int_sata(mvi); - - mw32(INT_STAT, stat); -} - -#ifndef MVS_DISABLE_MSI -static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) -{ - struct mvs_info *mvi = opaque; - -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); - - mvs_int_rx(mvi, true); - - spin_unlock(&mvi->lock); -#else - tasklet_schedule(&mvi->tasklet); -#endif - return IRQ_HANDLED; -} -#endif - -int mvs_task_abort(struct sas_task *task) -{ - int rc; - unsigned long flags; - struct mvs_info *mvi = task->dev->port->ha->lldd_ha; - struct pci_dev *pdev = mvi->pdev; - int tag; - - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_STATE_DONE) { - rc = TMF_RESP_FUNC_COMPLETE; - spin_unlock_irqrestore(&task->task_state_lock, flags); - goto out_done; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); - - switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); - break; - case SAS_PROTOCOL_SSP: - dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ - dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); -#if _MV_DUMP - dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); - mvs_hexdump(sizeof(struct host_to_dev_fis), - (void *)&task->ata_task.fis, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); - mvs_hexdump(16, task->ata_task.atapi_packet, 0); -#endif - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { - /* TODO */ - ; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); - break; - } - default: - break; - } - - if (mvs_find_tag(mvi, task, &tag)) { - spin_lock_irqsave(&mvi->lock, flags); - mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); - spin_unlock_irqrestore(&mvi->lock, flags); - } - if (!mvs_task_exec(task, 1, GFP_ATOMIC)) - rc = TMF_RESP_FUNC_COMPLETE; - else - rc = TMF_RESP_FUNC_FAILED; -out_done: - return rc; -} - -int __devinit mvs_hw_init(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - int i; - u32 tmp, cctl; - - /* make sure interrupts are masked immediately (paranoia) */ - mw32(GBL_CTL, 0); - tmp = mr32(GBL_CTL); - - /* Reset Controller */ - if (!(tmp & HBA_RST)) { - if (mvi->flags & MVF_PHY_PWR_FIX) { - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp &= ~PCTL_PWR_ON; - tmp |= PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp &= ~PCTL_PWR_ON; - tmp |= PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - } - - /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ - mw32_f(GBL_CTL, HBA_RST); - } - - /* wait for reset to finish; timeout is just a guess */ - i = 1000; - while (i-- > 0) { - msleep(10); - - if (!(mr32(GBL_CTL) & HBA_RST)) - break; - } - if (mr32(GBL_CTL) & HBA_RST) { - dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); - return -EBUSY; - } - - /* Init Chip */ - /* make sure RST is set; HBA_RST /should/ have done that for us */ - cctl = mr32(CTL); - if (cctl & CCTL_RST) - cctl &= ~CCTL_RST; - else - mw32_f(CTL, cctl | CCTL_RST); - - /* write to device control _AND_ device status register? - A.C. */ - pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); - tmp &= ~PRD_REQ_MASK; - tmp |= PRD_REQ_SIZE; - pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp |= PCTL_PWR_ON; - tmp &= ~PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp |= PCTL_PWR_ON; - tmp &= ~PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - - mw32_f(CTL, cctl); - - /* reset control */ - mw32(PCS, 0); /*MVS_PCS */ - - mvs_phy_hacks(mvi); - - mw32(CMD_LIST_LO, mvi->slot_dma); - mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); - - mw32(RX_FIS_LO, mvi->rx_fis_dma); - mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); - - mw32(TX_CFG, MVS_CHIP_SLOT_SZ); - mw32(TX_LO, mvi->tx_dma); - mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); - - mw32(RX_CFG, MVS_RX_RING_SZ); - mw32(RX_LO, mvi->rx_dma); - mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); - - /* enable auto port detection */ - mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); - msleep(1100); - /* init and reset phys */ - for (i = 0; i < mvi->chip->n_phy; i++) { - u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); - u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); - - mvs_detect_porttype(mvi, i); - - /* set phy local SAS address */ - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); - mvs_write_port_cfg_data(mvi, i, lo); - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); - mvs_write_port_cfg_data(mvi, i, hi); - - /* reset phy */ - tmp = mvs_read_phy_ctl(mvi, i); - tmp |= PHY_RST; - mvs_write_phy_ctl(mvi, i, tmp); - } - - msleep(100); - - for (i = 0; i < mvi->chip->n_phy; i++) { - /* clear phy int status */ - tmp = mvs_read_port_irq_stat(mvi, i); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_stat(mvi, i, tmp); - - /* set phy int mask */ - tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | - PHYEV_ID_DONE | PHYEV_DEC_ERR; - mvs_write_port_irq_mask(mvi, i, tmp); - - msleep(100); - mvs_update_phyinfo(mvi, i, 1); - mvs_enable_xmt(mvi, i); - } - - /* FIXME: update wide port bitmaps */ - - /* little endian for open address and command table, etc. */ - /* A.C. - * it seems that ( from the spec ) turning on big-endian won't - * do us any good on big-endian machines, need further confirmation - */ - cctl = mr32(CTL); - cctl |= CCTL_ENDIAN_CMD; - cctl |= CCTL_ENDIAN_DATA; - cctl &= ~CCTL_ENDIAN_OPEN; - cctl |= CCTL_ENDIAN_RSP; - mw32_f(CTL, cctl); - - /* reset CMD queue */ - tmp = mr32(PCS); - tmp |= PCS_CMD_RST; - mw32(PCS, tmp); - /* interrupt coalescing may cause missing HW interrput in some case, - * and the max count is 0x1ff, while our max slot is 0x200, - * it will make count 0. - */ - tmp = 0; - mw32(INT_COAL, tmp); - - tmp = 0x100; - mw32(INT_COAL_TMOUT, tmp); - - /* ladies and gentlemen, start your engines */ - mw32(TX_CFG, 0); - mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); - mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); - /* enable CMD/CMPL_Q/RESP mode */ - mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); - - /* enable completion queue interrupt */ - tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS); - mw32(INT_MASK, tmp); - - /* Enable SRS interrupt */ - mw32(INT_MASK_SRS, 0xFF); - return 0; -} - -void __devinit mvs_print_info(struct mvs_info *mvi) -{ - struct pci_dev *pdev = mvi->pdev; - static int printed_version; - - if (!printed_version++) - dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); - - dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", - mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); -} - |