diff options
Diffstat (limited to 'drivers')
172 files changed, 8324 insertions, 2305 deletions
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 6174965d9a4d..f916ddf63938 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -781,7 +781,8 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci) sk_for_each(s, node, head) { vcc = atm_sk(s); if (vcc->dev == dev && vcc->vci == vci && - vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE) + vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE && + test_bit(ATM_VF_READY, &vcc->flags)) goto out; } vcc = NULL; @@ -907,6 +908,10 @@ static void pclose(struct atm_vcc *vcc) clear_bit(ATM_VF_ADDR, &vcc->flags); clear_bit(ATM_VF_READY, &vcc->flags); + /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the + tasklet has finished processing any incoming packets (and, more to + the point, using the vcc pointer). */ + tasklet_unlock_wait(&card->tlet); return; } diff --git a/drivers/base/node.c b/drivers/base/node.c index 2bdd8a94ec94..2872e86837b2 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -66,8 +66,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, struct sysinfo i; si_meminfo_node(&i, nid); - - n = sprintf(buf, "\n" + n = sprintf(buf, "Node %d MemTotal: %8lu kB\n" "Node %d MemFree: %8lu kB\n" "Node %d MemUsed: %8lu kB\n" @@ -78,13 +77,33 @@ static ssize_t node_read_meminfo(struct sys_device * dev, "Node %d Active(file): %8lu kB\n" "Node %d Inactive(file): %8lu kB\n" "Node %d Unevictable: %8lu kB\n" - "Node %d Mlocked: %8lu kB\n" + "Node %d Mlocked: %8lu kB\n", + nid, K(i.totalram), + nid, K(i.freeram), + nid, K(i.totalram - i.freeram), + nid, K(node_page_state(nid, NR_ACTIVE_ANON) + + node_page_state(nid, NR_ACTIVE_FILE)), + nid, K(node_page_state(nid, NR_INACTIVE_ANON) + + node_page_state(nid, NR_INACTIVE_FILE)), + nid, K(node_page_state(nid, NR_ACTIVE_ANON)), + nid, K(node_page_state(nid, NR_INACTIVE_ANON)), + nid, K(node_page_state(nid, NR_ACTIVE_FILE)), + nid, K(node_page_state(nid, NR_INACTIVE_FILE)), + nid, K(node_page_state(nid, NR_UNEVICTABLE)), + nid, K(node_page_state(nid, NR_MLOCK))); + #ifdef CONFIG_HIGHMEM + n += sprintf(buf + n, "Node %d HighTotal: %8lu kB\n" "Node %d HighFree: %8lu kB\n" "Node %d LowTotal: %8lu kB\n" - "Node %d LowFree: %8lu kB\n" + "Node %d LowFree: %8lu kB\n", + nid, K(i.totalhigh), + nid, K(i.freehigh), + nid, K(i.totalram - i.totalhigh), + nid, K(i.freeram - i.freehigh)); #endif + n += sprintf(buf + n, "Node %d Dirty: %8lu kB\n" "Node %d Writeback: %8lu kB\n" "Node %d FilePages: %8lu kB\n" @@ -99,25 +118,6 @@ static ssize_t node_read_meminfo(struct sys_device * dev, "Node %d Slab: %8lu kB\n" "Node %d SReclaimable: %8lu kB\n" "Node %d SUnreclaim: %8lu kB\n", - nid, K(i.totalram), - nid, K(i.freeram), - nid, K(i.totalram - i.freeram), - nid, K(node_page_state(nid, NR_ACTIVE_ANON) + - node_page_state(nid, NR_ACTIVE_FILE)), - nid, K(node_page_state(nid, NR_INACTIVE_ANON) + - node_page_state(nid, NR_INACTIVE_FILE)), - nid, K(node_page_state(nid, NR_ACTIVE_ANON)), - nid, K(node_page_state(nid, NR_INACTIVE_ANON)), - nid, K(node_page_state(nid, NR_ACTIVE_FILE)), - nid, K(node_page_state(nid, NR_INACTIVE_FILE)), - nid, K(node_page_state(nid, NR_UNEVICTABLE)), - nid, K(node_page_state(nid, NR_MLOCK)), -#ifdef CONFIG_HIGHMEM - nid, K(i.totalhigh), - nid, K(i.freehigh), - nid, K(i.totalram - i.totalhigh), - nid, K(i.freeram - i.freehigh), -#endif nid, K(node_page_state(nid, NR_FILE_DIRTY)), nid, K(node_page_state(nid, NR_WRITEBACK)), nid, K(node_page_state(nid, NR_FILE_PAGES)), diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 094bdc355b1f..ff68e7c34ce7 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2176,6 +2176,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, info->io.addr_data = res->start; info->io.regspacing = DEFAULT_REGSPACING; + res = pnp_get_resource(dev, + (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? + IORESOURCE_IO : IORESOURCE_MEM, + 1); + if (res) { + if (res->start > info->io.addr_data) + info->io.regspacing = res->start - info->io.addr_data; + } info->io.regsize = DEFAULT_REGSPACING; info->io.regshift = 0; diff --git a/drivers/char/misc.c b/drivers/char/misc.c index cd650ca8c679..abdafd488980 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -242,7 +242,7 @@ int misc_deregister(struct miscdevice *misc) { int i = DYNAMIC_MINORS - misc->minor - 1; - if (list_empty(&misc->list)) + if (WARN_ON(list_empty(&misc->list))) return -EINVAL; mutex_lock(&misc_mtx); diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c index 65920163f53d..9fe538347932 100644 --- a/drivers/char/pcmcia/ipwireless/network.c +++ b/drivers/char/pcmcia/ipwireless/network.c @@ -239,7 +239,7 @@ static int ipwireless_ppp_ioctl(struct ppp_channel *ppp_channel, return err; } -static struct ppp_channel_ops ipwireless_ppp_channel_ops = { +static const struct ppp_channel_ops ipwireless_ppp_channel_ops = { .start_xmit = ipwireless_ppp_start_xmit, .ioctl = ipwireless_ppp_ioctl }; diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 4a9eb3044e52..44f03ddd8871 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -837,9 +837,10 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, unsigned int cols, unsigned int lines) { unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0; + unsigned long end; unsigned int old_cols, old_rows, old_row_size, old_screen_size; unsigned int new_cols, new_rows, new_row_size, new_screen_size; - unsigned int end, user; + unsigned int user; unsigned short *newscreen; WARN_CONSOLE_UNLOCKED(); @@ -3065,7 +3066,8 @@ static int bind_con_driver(const struct consw *csw, int first, int last, old_was_color = vc->vc_can_do_color; vc->vc_sw->con_deinit(vc); - vc->vc_origin = (unsigned long)vc->vc_screenbuf; + if (!vc->vc_origin) + vc->vc_origin = (unsigned long)vc->vc_screenbuf; visual_init(vc, i, 0); set_origin(vc); update_attr(vc); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index dbefe15bd582..a50710843378 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -74,6 +74,17 @@ static void cpuidle_idle_call(void) */ hrtimer_peek_ahead_timers(); #endif + + /* + * Call the device's prepare function before calling the + * governor's select function. ->prepare gives the device's + * cpuidle driver a chance to update any dynamic information + * of its cpuidle states for the current idle period, e.g. + * state availability, latencies, residencies, etc. + */ + if (dev->prepare) + dev->prepare(dev); + /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(dev); if (need_resched()) { @@ -282,6 +293,26 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) poll_idle_init(dev); + /* + * cpuidle driver should set the dev->power_specified bit + * before registering the device if the driver provides + * power_usage numbers. + * + * For those devices whose ->power_specified is not set, + * we fill in power_usage with decreasing values as the + * cpuidle code has an implicit assumption that state Cn + * uses less power than C(n-1). + * + * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned + * an power value of -1. So we use -2, -3, etc, for other + * c-states. + */ + if (!dev->power_specified) { + int i; + for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) + dev->states[i].power_usage = -1 - i; + } + per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); if ((ret = cpuidle_add_sysfs(sys_dev))) { diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 1b128702d300..c2408bbe9c2e 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -234,6 +234,7 @@ static int menu_select(struct cpuidle_device *dev) { struct menu_device *data = &__get_cpu_var(menu_devices); int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); + unsigned int power_usage = -1; int i; int multiplier; @@ -278,19 +279,27 @@ static int menu_select(struct cpuidle_device *dev) if (data->expected_us > 5) data->last_state_idx = CPUIDLE_DRIVER_STATE_START; - - /* find the deepest idle state that satisfies our constraints */ + /* + * Find the idle state with the lowest power while satisfying + * our constraints. + */ for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { struct cpuidle_state *s = &dev->states[i]; + if (s->flags & CPUIDLE_FLAG_IGNORE) + continue; if (s->target_residency > data->predicted_us) - break; + continue; if (s->exit_latency > latency_req) - break; + continue; if (s->exit_latency * multiplier > data->predicted_us) - break; - data->exit_us = s->exit_latency; - data->last_state_idx = i; + continue; + + if (s->power_usage < power_usage) { + power_usage = s->power_usage; + data->last_state_idx = i; + data->exit_us = s->exit_latency; + } } return data->last_state_idx; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 9e01e96fee94..fed57634b6c1 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -33,6 +33,19 @@ if DMADEVICES comment "DMA Devices" +config INTEL_MID_DMAC + tristate "Intel MID DMA support for Peripheral DMA controllers" + depends on PCI && X86 + select DMA_ENGINE + default n + help + Enable support for the Intel(R) MID DMA engine present + in Intel MID chipsets. + + Say Y here if you have such a chipset. + + If unsure, say N. + config ASYNC_TX_DISABLE_CHANNEL_SWITCH bool @@ -175,6 +188,13 @@ config PL330_DMA You need to provide platform specific settings via platform_data for a dma-pl330 device. +config PCH_DMA + tristate "Topcliff PCH DMA support" + depends on PCI && X86 + select DMA_ENGINE + help + Enable support for the Topcliff PCH DMA engine. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 0fe5ebbfda5d..72bd70384d8a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -7,6 +7,7 @@ endif obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_NET_DMA) += iovlock.o +obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o @@ -23,3 +24,4 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_PL330_DMA) += pl330.o +obj-$(CONFIG_PCH_DMA) += pch_dma.o diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index e88076022a7a..a0f3e6a06e06 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -790,12 +790,12 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, list_splice_init(&atchan->queue, &list); list_splice_init(&atchan->active_list, &list); - spin_unlock_bh(&atchan->lock); - /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_chain_complete(atchan, desc); + spin_unlock_bh(&atchan->lock); + return 0; } diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index a724e6be1b4d..557e2272e5b3 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -72,6 +72,9 @@ struct coh901318_chan { unsigned long nbr_active_done; unsigned long busy; + u32 runtime_addr; + u32 runtime_ctrl; + struct coh901318_base *base; }; @@ -190,6 +193,9 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan) static inline dma_addr_t cohc_dev_addr(struct coh901318_chan *cohc) { + /* Runtime supplied address will take precedence */ + if (cohc->runtime_addr) + return cohc->runtime_addr; return cohc->base->platform->chan_conf[cohc->id].dev_addr; } @@ -1055,6 +1061,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, params = cohc_chan_param(cohc); config = params->config; + /* + * Add runtime-specific control on top, make + * sure the bits you set per peripheral channel are + * cleared in the default config from the platform. + */ + ctrl_chained |= cohc->runtime_ctrl; + ctrl_last |= cohc->runtime_ctrl; + ctrl |= cohc->runtime_ctrl; if (direction == DMA_TO_DEVICE) { u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | @@ -1113,6 +1127,12 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (ret) goto err_lli_fill; + /* + * Set the default ctrl for the channel to the one from the lli, + * things may have changed due to odd buffer alignment etc. + */ + coh901318_set_ctrl(cohc, lli->control); + COH_DBG(coh901318_list_print(cohc, lli)); /* Pick a descriptor to handle this transfer */ @@ -1175,6 +1195,146 @@ coh901318_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&cohc->lock, flags); } +/* + * Here we wrap in the runtime dma control interface + */ +struct burst_table { + int burst_8bit; + int burst_16bit; + int burst_32bit; + u32 reg; +}; + +static const struct burst_table burst_sizes[] = { + { + .burst_8bit = 64, + .burst_16bit = 32, + .burst_32bit = 16, + .reg = COH901318_CX_CTRL_BURST_COUNT_64_BYTES, + }, + { + .burst_8bit = 48, + .burst_16bit = 24, + .burst_32bit = 12, + .reg = COH901318_CX_CTRL_BURST_COUNT_48_BYTES, + }, + { + .burst_8bit = 32, + .burst_16bit = 16, + .burst_32bit = 8, + .reg = COH901318_CX_CTRL_BURST_COUNT_32_BYTES, + }, + { + .burst_8bit = 16, + .burst_16bit = 8, + .burst_32bit = 4, + .reg = COH901318_CX_CTRL_BURST_COUNT_16_BYTES, + }, + { + .burst_8bit = 8, + .burst_16bit = 4, + .burst_32bit = 2, + .reg = COH901318_CX_CTRL_BURST_COUNT_8_BYTES, + }, + { + .burst_8bit = 4, + .burst_16bit = 2, + .burst_32bit = 1, + .reg = COH901318_CX_CTRL_BURST_COUNT_4_BYTES, + }, + { + .burst_8bit = 2, + .burst_16bit = 1, + .burst_32bit = 0, + .reg = COH901318_CX_CTRL_BURST_COUNT_2_BYTES, + }, + { + .burst_8bit = 1, + .burst_16bit = 0, + .burst_32bit = 0, + .reg = COH901318_CX_CTRL_BURST_COUNT_1_BYTE, + }, +}; + +static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct coh901318_chan *cohc = to_coh901318_chan(chan); + dma_addr_t addr; + enum dma_slave_buswidth addr_width; + u32 maxburst; + u32 runtime_ctrl = 0; + int i = 0; + + /* We only support mem to per or per to mem transfers */ + if (config->direction == DMA_FROM_DEVICE) { + addr = config->src_addr; + addr_width = config->src_addr_width; + maxburst = config->src_maxburst; + } else if (config->direction == DMA_TO_DEVICE) { + addr = config->dst_addr; + addr_width = config->dst_addr_width; + maxburst = config->dst_maxburst; + } else { + dev_err(COHC_2_DEV(cohc), "illegal channel mode\n"); + return; + } + + dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n", + addr_width); + switch (addr_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + runtime_ctrl |= + COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS | + COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS; + + while (i < ARRAY_SIZE(burst_sizes)) { + if (burst_sizes[i].burst_8bit <= maxburst) + break; + i++; + } + + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + runtime_ctrl |= + COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS | + COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS; + + while (i < ARRAY_SIZE(burst_sizes)) { + if (burst_sizes[i].burst_16bit <= maxburst) + break; + i++; + } + + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + /* Direction doesn't matter here, it's 32/32 bits */ + runtime_ctrl |= + COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | + COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS; + + while (i < ARRAY_SIZE(burst_sizes)) { + if (burst_sizes[i].burst_32bit <= maxburst) + break; + i++; + } + + break; + default: + dev_err(COHC_2_DEV(cohc), + "bad runtimeconfig: alien address width\n"); + return; + } + + runtime_ctrl |= burst_sizes[i].reg; + dev_dbg(COHC_2_DEV(cohc), + "selected burst size %d bytes for address width %d bytes, maxburst %d\n", + burst_sizes[i].burst_8bit, addr_width, maxburst); + + cohc->runtime_addr = addr; + cohc->runtime_ctrl = runtime_ctrl; +} + static int coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) @@ -1184,6 +1344,14 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct coh901318_desc *cohd; void __iomem *virtbase = cohc->base->virtbase; + if (cmd == DMA_SLAVE_CONFIG) { + struct dma_slave_config *config = + (struct dma_slave_config *) arg; + + coh901318_dma_set_runtimeconfig(chan, config); + return 0; + } + if (cmd == DMA_PAUSE) { coh901318_pause(chan); return 0; @@ -1240,6 +1408,7 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return 0; } + void coh901318_base_init(struct dma_device *dma, const int *pick_chans, struct coh901318_base *base) { diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 68d58c414cf0..5589358b684d 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -540,7 +540,7 @@ static int dmatest_add_channel(struct dma_chan *chan) struct dmatest_chan *dtc; struct dma_device *dma_dev = chan->device; unsigned int thread_count = 0; - unsigned int cnt; + int cnt; dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c new file mode 100644 index 000000000000..c2591e8d9b6e --- /dev/null +++ b/drivers/dma/intel_mid_dma.c @@ -0,0 +1,1143 @@ +/* + * intel_mid_dma.c - Intel Langwell DMA Drivers + * + * Copyright (C) 2008-10 Intel Corp + * Author: Vinod Koul <vinod.koul@intel.com> + * The driver design is based on dw_dmac driver + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * + */ +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/intel_mid_dma.h> + +#define MAX_CHAN 4 /*max ch across controllers*/ +#include "intel_mid_dma_regs.h" + +#define INTEL_MID_DMAC1_ID 0x0814 +#define INTEL_MID_DMAC2_ID 0x0813 +#define INTEL_MID_GP_DMAC2_ID 0x0827 +#define INTEL_MFLD_DMAC1_ID 0x0830 +#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 +#define LNW_PERIPHRAL_MASK_SIZE 0x10 +#define LNW_PERIPHRAL_STATUS 0x0 +#define LNW_PERIPHRAL_MASK 0x8 + +struct intel_mid_dma_probe_info { + u8 max_chan; + u8 ch_base; + u16 block_size; + u32 pimr_mask; +}; + +#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ + ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ + .max_chan = (_max_chan), \ + .ch_base = (_ch_base), \ + .block_size = (_block_size), \ + .pimr_mask = (_pimr_mask), \ + }) + +/***************************************************************************** +Utility Functions*/ +/** + * get_ch_index - convert status to channel + * @status: status mask + * @base: dma ch base value + * + * Modify the status mask and return the channel index needing + * attention (or -1 if neither) + */ +static int get_ch_index(int *status, unsigned int base) +{ + int i; + for (i = 0; i < MAX_CHAN; i++) { + if (*status & (1 << (i + base))) { + *status = *status & ~(1 << (i + base)); + pr_debug("MDMA: index %d New status %x\n", i, *status); + return i; + } + } + return -1; +} + +/** + * get_block_ts - calculates dma transaction length + * @len: dma transfer length + * @tx_width: dma transfer src width + * @block_size: dma controller max block size + * + * Based on src width calculate the DMA trsaction length in data items + * return data items or FFFF if exceeds max length for block + */ +static int get_block_ts(int len, int tx_width, int block_size) +{ + int byte_width = 0, block_ts = 0; + + switch (tx_width) { + case LNW_DMA_WIDTH_8BIT: + byte_width = 1; + break; + case LNW_DMA_WIDTH_16BIT: + byte_width = 2; + break; + case LNW_DMA_WIDTH_32BIT: + default: + byte_width = 4; + break; + } + + block_ts = len/byte_width; + if (block_ts > block_size) + block_ts = 0xFFFF; + return block_ts; +} + +/***************************************************************************** +DMAC1 interrupt Functions*/ + +/** + * dmac1_mask_periphral_intr - mask the periphral interrupt + * @midc: dma channel for which masking is required + * + * Masks the DMA periphral interrupt + * this is valid for DMAC1 family controllers only + * This controller should have periphral mask registers already mapped + */ +static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc) +{ + u32 pimr; + struct middma_device *mid = to_middma_device(midc->chan.device); + + if (mid->pimr_mask) { + pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); + pimr |= mid->pimr_mask; + writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); + } + return; +} + +/** + * dmac1_unmask_periphral_intr - unmask the periphral interrupt + * @midc: dma channel for which masking is required + * + * UnMasks the DMA periphral interrupt, + * this is valid for DMAC1 family controllers only + * This controller should have periphral mask registers already mapped + */ +static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) +{ + u32 pimr; + struct middma_device *mid = to_middma_device(midc->chan.device); + + if (mid->pimr_mask) { + pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); + pimr &= ~mid->pimr_mask; + writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); + } + return; +} + +/** + * enable_dma_interrupt - enable the periphral interrupt + * @midc: dma channel for which enable interrupt is required + * + * Enable the DMA periphral interrupt, + * this is valid for DMAC1 family controllers only + * This controller should have periphral mask registers already mapped + */ +static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) +{ + dmac1_unmask_periphral_intr(midc); + + /*en ch interrupts*/ + iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); + iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); + return; +} + +/** + * disable_dma_interrupt - disable the periphral interrupt + * @midc: dma channel for which disable interrupt is required + * + * Disable the DMA periphral interrupt, + * this is valid for DMAC1 family controllers only + * This controller should have periphral mask registers already mapped + */ +static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) +{ + /*Check LPE PISR, make sure fwd is disabled*/ + dmac1_mask_periphral_intr(midc); + iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); + iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); + iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); + return; +} + +/***************************************************************************** +DMA channel helper Functions*/ +/** + * mid_desc_get - get a descriptor + * @midc: dma channel for which descriptor is required + * + * Obtain a descriptor for the channel. Returns NULL if none are free. + * Once the descriptor is returned it is private until put on another + * list or freed + */ +static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) +{ + struct intel_mid_dma_desc *desc, *_desc; + struct intel_mid_dma_desc *ret = NULL; + + spin_lock_bh(&midc->lock); + list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + } + spin_unlock_bh(&midc->lock); + return ret; +} + +/** + * mid_desc_put - put a descriptor + * @midc: dma channel for which descriptor is required + * @desc: descriptor to put + * + * Return a descriptor from lwn_desc_get back to the free pool + */ +static void midc_desc_put(struct intel_mid_dma_chan *midc, + struct intel_mid_dma_desc *desc) +{ + if (desc) { + spin_lock_bh(&midc->lock); + list_add_tail(&desc->desc_node, &midc->free_list); + spin_unlock_bh(&midc->lock); + } +} +/** + * midc_dostart - begin a DMA transaction + * @midc: channel for which txn is to be started + * @first: first descriptor of series + * + * Load a transaction into the engine. This must be called with midc->lock + * held and bh disabled. + */ +static void midc_dostart(struct intel_mid_dma_chan *midc, + struct intel_mid_dma_desc *first) +{ + struct middma_device *mid = to_middma_device(midc->chan.device); + + /* channel is idle */ + if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) { + /*error*/ + pr_err("ERR_MDMA: channel is busy in start\n"); + /* The tasklet will hopefully advance the queue... */ + return; + } + + /*write registers and en*/ + iowrite32(first->sar, midc->ch_regs + SAR); + iowrite32(first->dar, midc->ch_regs + DAR); + iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); + iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); + iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); + iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); + pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", + (int)first->sar, (int)first->dar, first->cfg_hi, + first->cfg_lo, first->ctl_hi, first->ctl_lo); + + iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); + first->status = DMA_IN_PROGRESS; +} + +/** + * midc_descriptor_complete - process completed descriptor + * @midc: channel owning the descriptor + * @desc: the descriptor itself + * + * Process a completed descriptor and perform any callbacks upon + * the completion. The completion handling drops the lock during the + * callbacks but must be called with the lock held. + */ +static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, + struct intel_mid_dma_desc *desc) +{ + struct dma_async_tx_descriptor *txd = &desc->txd; + dma_async_tx_callback callback_txd = NULL; + void *param_txd = NULL; + + midc->completed = txd->cookie; + callback_txd = txd->callback; + param_txd = txd->callback_param; + + list_move(&desc->desc_node, &midc->free_list); + + spin_unlock_bh(&midc->lock); + if (callback_txd) { + pr_debug("MDMA: TXD callback set ... calling\n"); + callback_txd(param_txd); + spin_lock_bh(&midc->lock); + return; + } + spin_lock_bh(&midc->lock); + +} +/** + * midc_scan_descriptors - check the descriptors in channel + * mark completed when tx is completete + * @mid: device + * @midc: channel to scan + * + * Walk the descriptor chain for the device and process any entries + * that are complete. + */ +static void midc_scan_descriptors(struct middma_device *mid, + struct intel_mid_dma_chan *midc) +{ + struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; + + /*tx is complete*/ + list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { + if (desc->status == DMA_IN_PROGRESS) { + desc->status = DMA_SUCCESS; + midc_descriptor_complete(midc, desc); + } + } + return; +} + +/***************************************************************************** +DMA engine callback Functions*/ +/** + * intel_mid_dma_tx_submit - callback to submit DMA transaction + * @tx: dma engine descriptor + * + * Submit the DMA trasaction for this descriptor, start if ch idle + */ +static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); + struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); + dma_cookie_t cookie; + + spin_lock_bh(&midc->lock); + cookie = midc->chan.cookie; + + if (++cookie < 0) + cookie = 1; + + midc->chan.cookie = cookie; + desc->txd.cookie = cookie; + + + if (list_empty(&midc->active_list)) { + midc_dostart(midc, desc); + list_add_tail(&desc->desc_node, &midc->active_list); + } else { + list_add_tail(&desc->desc_node, &midc->queue); + } + spin_unlock_bh(&midc->lock); + + return cookie; +} + +/** + * intel_mid_dma_issue_pending - callback to issue pending txn + * @chan: chan where pending trascation needs to be checked and submitted + * + * Call for scan to issue pending descriptors + */ +static void intel_mid_dma_issue_pending(struct dma_chan *chan) +{ + struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); + + spin_lock_bh(&midc->lock); + if (!list_empty(&midc->queue)) + midc_scan_descriptors(to_middma_device(chan->device), midc); + spin_unlock_bh(&midc->lock); +} + +/** + * intel_mid_dma_tx_status - Return status of txn + * @chan: chan for where status needs to be checked + * @cookie: cookie for txn + * @txstate: DMA txn state + * + * Return status of DMA txn + */ +static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + int ret; + + last_complete = midc->completed; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret != DMA_SUCCESS) { + midc_scan_descriptors(to_middma_device(chan->device), midc); + + last_complete = midc->completed; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + } + + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } + return ret; +} + +/** + * intel_mid_dma_device_control - DMA device control + * @chan: chan for DMA control + * @cmd: control cmd + * @arg: cmd arg value + * + * Perform DMA control command + */ +static int intel_mid_dma_device_control(struct dma_chan *chan, + enum dma_ctrl_cmd cmd, unsigned long arg) +{ + struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); + struct middma_device *mid = to_middma_device(chan->device); + struct intel_mid_dma_desc *desc, *_desc; + LIST_HEAD(list); + + if (cmd != DMA_TERMINATE_ALL) + return -ENXIO; + + spin_lock_bh(&midc->lock); + if (midc->in_use == false) { + spin_unlock_bh(&midc->lock); + return 0; + } + list_splice_init(&midc->free_list, &list); + midc->descs_allocated = 0; + midc->slave = NULL; + + /* Disable interrupts */ + disable_dma_interrupt(midc); + + spin_unlock_bh(&midc->lock); + list_for_each_entry_safe(desc, _desc, &list, desc_node) { + pr_debug("MDMA: freeing descriptor %p\n", desc); + pci_pool_free(mid->dma_pool, desc, desc->txd.phys); + } + return 0; +} + +/** + * intel_mid_dma_prep_slave_sg - Prep slave sg txn + * @chan: chan for DMA transfer + * @sgl: scatter gather list + * @sg_len: length of sg txn + * @direction: DMA transfer dirtn + * @flags: DMA flags + * + * Do DMA sg txn: NOT supported now + */ +static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags) +{ + /*not supported now*/ + return NULL; +} + +/** + * intel_mid_dma_prep_memcpy - Prep memcpy txn + * @chan: chan for DMA transfer + * @dest: destn address + * @src: src address + * @len: DMA transfer len + * @flags: DMA flags + * + * Perform a DMA memcpy. Note we support slave periphral DMA transfers only + * The periphral txn details should be filled in slave structure properly + * Returns the descriptor for this txn + */ +static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( + struct dma_chan *chan, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct intel_mid_dma_chan *midc; + struct intel_mid_dma_desc *desc = NULL; + struct intel_mid_dma_slave *mids; + union intel_mid_dma_ctl_lo ctl_lo; + union intel_mid_dma_ctl_hi ctl_hi; + union intel_mid_dma_cfg_lo cfg_lo; + union intel_mid_dma_cfg_hi cfg_hi; + enum intel_mid_dma_width width = 0; + + pr_debug("MDMA: Prep for memcpy\n"); + WARN_ON(!chan); + if (!len) + return NULL; + + mids = chan->private; + WARN_ON(!mids); + + midc = to_intel_mid_dma_chan(chan); + WARN_ON(!midc); + + pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", + midc->dma->pci_id, midc->ch_id, len); + pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", + mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); + + /*calculate CFG_LO*/ + if (mids->hs_mode == LNW_DMA_SW_HS) { + cfg_lo.cfg_lo = 0; + cfg_lo.cfgx.hs_sel_dst = 1; + cfg_lo.cfgx.hs_sel_src = 1; + } else if (mids->hs_mode == LNW_DMA_HW_HS) + cfg_lo.cfg_lo = 0x00000; + + /*calculate CFG_HI*/ + if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { + /*SW HS only*/ + cfg_hi.cfg_hi = 0; + } else { + cfg_hi.cfg_hi = 0; + if (midc->dma->pimr_mask) { + cfg_hi.cfgx.protctl = 0x0; /*default value*/ + cfg_hi.cfgx.fifo_mode = 1; + if (mids->dirn == DMA_TO_DEVICE) { + cfg_hi.cfgx.src_per = 0; + if (mids->device_instance == 0) + cfg_hi.cfgx.dst_per = 3; + if (mids->device_instance == 1) + cfg_hi.cfgx.dst_per = 1; + } else if (mids->dirn == DMA_FROM_DEVICE) { + if (mids->device_instance == 0) + cfg_hi.cfgx.src_per = 2; + if (mids->device_instance == 1) + cfg_hi.cfgx.src_per = 0; + cfg_hi.cfgx.dst_per = 0; + } + } else { + cfg_hi.cfgx.protctl = 0x1; /*default value*/ + cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = + midc->ch_id - midc->dma->chan_base; + } + } + + /*calculate CTL_HI*/ + ctl_hi.ctlx.reser = 0; + width = mids->src_width; + + ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); + pr_debug("MDMA:calc len %d for block size %d\n", + ctl_hi.ctlx.block_ts, midc->dma->block_size); + /*calculate CTL_LO*/ + ctl_lo.ctl_lo = 0; + ctl_lo.ctlx.int_en = 1; + ctl_lo.ctlx.dst_tr_width = mids->dst_width; + ctl_lo.ctlx.src_tr_width = mids->src_width; + ctl_lo.ctlx.dst_msize = mids->src_msize; + ctl_lo.ctlx.src_msize = mids->dst_msize; + + if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { + ctl_lo.ctlx.tt_fc = 0; + ctl_lo.ctlx.sinc = 0; + ctl_lo.ctlx.dinc = 0; + } else { + if (mids->dirn == DMA_TO_DEVICE) { + ctl_lo.ctlx.sinc = 0; + ctl_lo.ctlx.dinc = 2; + ctl_lo.ctlx.tt_fc = 1; + } else if (mids->dirn == DMA_FROM_DEVICE) { + ctl_lo.ctlx.sinc = 2; + ctl_lo.ctlx.dinc = 0; + ctl_lo.ctlx.tt_fc = 2; + } + } + + pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", + ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); + + enable_dma_interrupt(midc); + + desc = midc_desc_get(midc); + if (desc == NULL) + goto err_desc_get; + desc->sar = src; + desc->dar = dest ; + desc->len = len; + desc->cfg_hi = cfg_hi.cfg_hi; + desc->cfg_lo = cfg_lo.cfg_lo; + desc->ctl_lo = ctl_lo.ctl_lo; + desc->ctl_hi = ctl_hi.ctl_hi; + desc->width = width; + desc->dirn = mids->dirn; + return &desc->txd; + +err_desc_get: + pr_err("ERR_MDMA: Failed to get desc\n"); + midc_desc_put(midc, desc); + return NULL; +} + +/** + * intel_mid_dma_free_chan_resources - Frees dma resources + * @chan: chan requiring attention + * + * Frees the allocated resources on this DMA chan + */ +static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) +{ + struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); + struct middma_device *mid = to_middma_device(chan->device); + struct intel_mid_dma_desc *desc, *_desc; + + if (true == midc->in_use) { + /*trying to free ch in use!!!!!*/ + pr_err("ERR_MDMA: trying to free ch in use\n"); + } + + spin_lock_bh(&midc->lock); + midc->descs_allocated = 0; + list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { + list_del(&desc->desc_node); + pci_pool_free(mid->dma_pool, desc, desc->txd.phys); + } + list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { + list_del(&desc->desc_node); + pci_pool_free(mid->dma_pool, desc, desc->txd.phys); + } + list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { + list_del(&desc->desc_node); + pci_pool_free(mid->dma_pool, desc, desc->txd.phys); + } + spin_unlock_bh(&midc->lock); + midc->in_use = false; + /* Disable CH interrupts */ + iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); + iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); +} + +/** + * intel_mid_dma_alloc_chan_resources - Allocate dma resources + * @chan: chan requiring attention + * + * Allocates DMA resources on this chan + * Return the descriptors allocated + */ +static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); + struct middma_device *mid = to_middma_device(chan->device); + struct intel_mid_dma_desc *desc; + dma_addr_t phys; + int i = 0; + + + /* ASSERT: channel is idle */ + if (test_ch_en(mid->dma_base, midc->ch_id)) { + /*ch is not idle*/ + pr_err("ERR_MDMA: ch not idle\n"); + return -EIO; + } + midc->completed = chan->cookie = 1; + + spin_lock_bh(&midc->lock); + while (midc->descs_allocated < DESCS_PER_CHANNEL) { + spin_unlock_bh(&midc->lock); + desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); + if (!desc) { + pr_err("ERR_MDMA: desc failed\n"); + return -ENOMEM; + /*check*/ + } + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = intel_mid_dma_tx_submit; + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.phys = phys; + spin_lock_bh(&midc->lock); + i = ++midc->descs_allocated; + list_add_tail(&desc->desc_node, &midc->free_list); + } + spin_unlock_bh(&midc->lock); + midc->in_use = false; + pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); + return i; +} + +/** + * midc_handle_error - Handle DMA txn error + * @mid: controller where error occured + * @midc: chan where error occured + * + * Scan the descriptor for error + */ +static void midc_handle_error(struct middma_device *mid, + struct intel_mid_dma_chan *midc) +{ + midc_scan_descriptors(mid, midc); +} + +/** + * dma_tasklet - DMA interrupt tasklet + * @data: tasklet arg (the controller structure) + * + * Scan the controller for interrupts for completion/error + * Clear the interrupt and call for handling completion/error + */ +static void dma_tasklet(unsigned long data) +{ + struct middma_device *mid = NULL; + struct intel_mid_dma_chan *midc = NULL; + u32 status; + int i; + + mid = (struct middma_device *)data; + if (mid == NULL) { + pr_err("ERR_MDMA: tasklet Null param\n"); + return; + } + pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); + status = ioread32(mid->dma_base + RAW_TFR); + pr_debug("MDMA:RAW_TFR %x\n", status); + status &= mid->intr_mask; + while (status) { + /*txn interrupt*/ + i = get_ch_index(&status, mid->chan_base); + if (i < 0) { + pr_err("ERR_MDMA:Invalid ch index %x\n", i); + return; + } + midc = &mid->ch[i]; + if (midc == NULL) { + pr_err("ERR_MDMA:Null param midc\n"); + return; + } + pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", + status, midc->ch_id, i); + /*clearing this interrupts first*/ + iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); + iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); + + spin_lock_bh(&midc->lock); + midc_scan_descriptors(mid, midc); + pr_debug("MDMA:Scan of desc... complete, unmasking\n"); + iowrite32(UNMASK_INTR_REG(midc->ch_id), + mid->dma_base + MASK_TFR); + spin_unlock_bh(&midc->lock); + } + + status = ioread32(mid->dma_base + RAW_ERR); + status &= mid->intr_mask; + while (status) { + /*err interrupt*/ + i = get_ch_index(&status, mid->chan_base); + if (i < 0) { + pr_err("ERR_MDMA:Invalid ch index %x\n", i); + return; + } + midc = &mid->ch[i]; + if (midc == NULL) { + pr_err("ERR_MDMA:Null param midc\n"); + return; + } + pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", + status, midc->ch_id, i); + + iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); + spin_lock_bh(&midc->lock); + midc_handle_error(mid, midc); + iowrite32(UNMASK_INTR_REG(midc->ch_id), + mid->dma_base + MASK_ERR); + spin_unlock_bh(&midc->lock); + } + pr_debug("MDMA:Exiting takslet...\n"); + return; +} + +static void dma_tasklet1(unsigned long data) +{ + pr_debug("MDMA:in takslet1...\n"); + return dma_tasklet(data); +} + +static void dma_tasklet2(unsigned long data) +{ + pr_debug("MDMA:in takslet2...\n"); + return dma_tasklet(data); +} + +/** + * intel_mid_dma_interrupt - DMA ISR + * @irq: IRQ where interrupt occurred + * @data: ISR cllback data (the controller structure) + * + * See if this is our interrupt if so then schedule the tasklet + * otherwise ignore + */ +static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) +{ + struct middma_device *mid = data; + u32 status; + int call_tasklet = 0; + + /*DMA Interrupt*/ + pr_debug("MDMA:Got an interrupt on irq %d\n", irq); + if (!mid) { + pr_err("ERR_MDMA:null pointer mid\n"); + return -EINVAL; + } + + status = ioread32(mid->dma_base + RAW_TFR); + pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask); + status &= mid->intr_mask; + if (status) { + /*need to disable intr*/ + iowrite32((status << 8), mid->dma_base + MASK_TFR); + pr_debug("MDMA: Calling tasklet %x\n", status); + call_tasklet = 1; + } + status = ioread32(mid->dma_base + RAW_ERR); + status &= mid->intr_mask; + if (status) { + iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR); + call_tasklet = 1; + } + if (call_tasklet) + tasklet_schedule(&mid->tasklet); + + return IRQ_HANDLED; +} + +static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) +{ + return intel_mid_dma_interrupt(irq, data); +} + +static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) +{ + return intel_mid_dma_interrupt(irq, data); +} + +/** + * mid_setup_dma - Setup the DMA controller + * @pdev: Controller PCI device structure + * + * Initilize the DMA controller, channels, registers with DMA engine, + * ISR. Initilize DMA controller channels. + */ +static int mid_setup_dma(struct pci_dev *pdev) +{ + struct middma_device *dma = pci_get_drvdata(pdev); + int err, i; + unsigned int irq_level; + + /* DMA coherent memory pool for DMA descriptor allocations */ + dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, + sizeof(struct intel_mid_dma_desc), + 32, 0); + if (NULL == dma->dma_pool) { + pr_err("ERR_MDMA:pci_pool_create failed\n"); + err = -ENOMEM; + kfree(dma); + goto err_dma_pool; + } + + INIT_LIST_HEAD(&dma->common.channels); + dma->pci_id = pdev->device; + if (dma->pimr_mask) { + dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, + LNW_PERIPHRAL_MASK_SIZE); + if (dma->mask_reg == NULL) { + pr_err("ERR_MDMA:Cant map periphral intr space !!\n"); + return -ENOMEM; + } + } else + dma->mask_reg = NULL; + + pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); + /*init CH structures*/ + dma->intr_mask = 0; + for (i = 0; i < dma->max_chan; i++) { + struct intel_mid_dma_chan *midch = &dma->ch[i]; + + midch->chan.device = &dma->common; + midch->chan.cookie = 1; + midch->chan.chan_id = i; + midch->ch_id = dma->chan_base + i; + pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); + + midch->dma_base = dma->dma_base; + midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; + midch->dma = dma; + dma->intr_mask |= 1 << (dma->chan_base + i); + spin_lock_init(&midch->lock); + + INIT_LIST_HEAD(&midch->active_list); + INIT_LIST_HEAD(&midch->queue); + INIT_LIST_HEAD(&midch->free_list); + /*mask interrupts*/ + iowrite32(MASK_INTR_REG(midch->ch_id), + dma->dma_base + MASK_BLOCK); + iowrite32(MASK_INTR_REG(midch->ch_id), + dma->dma_base + MASK_SRC_TRAN); + iowrite32(MASK_INTR_REG(midch->ch_id), + dma->dma_base + MASK_DST_TRAN); + iowrite32(MASK_INTR_REG(midch->ch_id), + dma->dma_base + MASK_ERR); + iowrite32(MASK_INTR_REG(midch->ch_id), + dma->dma_base + MASK_TFR); + + disable_dma_interrupt(midch); + list_add_tail(&midch->chan.device_node, &dma->common.channels); + } + pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); + + /*init dma structure*/ + dma_cap_zero(dma->common.cap_mask); + dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); + dma_cap_set(DMA_SLAVE, dma->common.cap_mask); + dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); + dma->common.dev = &pdev->dev; + dma->common.chancnt = dma->max_chan; + + dma->common.device_alloc_chan_resources = + intel_mid_dma_alloc_chan_resources; + dma->common.device_free_chan_resources = + intel_mid_dma_free_chan_resources; + + dma->common.device_tx_status = intel_mid_dma_tx_status; + dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; + dma->common.device_issue_pending = intel_mid_dma_issue_pending; + dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; + dma->common.device_control = intel_mid_dma_device_control; + + /*enable dma cntrl*/ + iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); + + /*register irq */ + if (dma->pimr_mask) { + irq_level = IRQF_SHARED; + pr_debug("MDMA:Requesting irq shared for DMAC1\n"); + err = request_irq(pdev->irq, intel_mid_dma_interrupt1, + IRQF_SHARED, "INTEL_MID_DMAC1", dma); + if (0 != err) + goto err_irq; + } else { + dma->intr_mask = 0x03; + irq_level = 0; + pr_debug("MDMA:Requesting irq for DMAC2\n"); + err = request_irq(pdev->irq, intel_mid_dma_interrupt2, + 0, "INTEL_MID_DMAC2", dma); + if (0 != err) + goto err_irq; + } + /*register device w/ engine*/ + err = dma_async_device_register(&dma->common); + if (0 != err) { + pr_err("ERR_MDMA:device_register failed: %d\n", err); + goto err_engine; + } + if (dma->pimr_mask) { + pr_debug("setting up tasklet1 for DMAC1\n"); + tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); + } else { + pr_debug("setting up tasklet2 for DMAC2\n"); + tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); + } + return 0; + +err_engine: + free_irq(pdev->irq, dma); +err_irq: + pci_pool_destroy(dma->dma_pool); + kfree(dma); +err_dma_pool: + pr_err("ERR_MDMA:setup_dma failed: %d\n", err); + return err; + +} + +/** + * middma_shutdown - Shutdown the DMA controller + * @pdev: Controller PCI device structure + * + * Called by remove + * Unregister DMa controller, clear all structures and free interrupt + */ +static void middma_shutdown(struct pci_dev *pdev) +{ + struct middma_device *device = pci_get_drvdata(pdev); + + dma_async_device_unregister(&device->common); + pci_pool_destroy(device->dma_pool); + if (device->mask_reg) + iounmap(device->mask_reg); + if (device->dma_base) + iounmap(device->dma_base); + free_irq(pdev->irq, device); + return; +} + +/** + * intel_mid_dma_probe - PCI Probe + * @pdev: Controller PCI device structure + * @id: pci device id structure + * + * Initilize the PCI device, map BARs, query driver data. + * Call setup_dma to complete contoller and chan initilzation + */ +static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct middma_device *device; + u32 base_addr, bar_size; + struct intel_mid_dma_probe_info *info; + int err; + + pr_debug("MDMA: probe for %x\n", pdev->device); + info = (void *)id->driver_data; + pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", + info->max_chan, info->ch_base, + info->block_size, info->pimr_mask); + + err = pci_enable_device(pdev); + if (err) + goto err_enable_device; + + err = pci_request_regions(pdev, "intel_mid_dmac"); + if (err) + goto err_request_regions; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + goto err_set_dma_mask; + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + goto err_set_dma_mask; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) { + pr_err("ERR_MDMA:kzalloc failed probe\n"); + err = -ENOMEM; + goto err_kzalloc; + } + device->pdev = pci_dev_get(pdev); + + base_addr = pci_resource_start(pdev, 0); + bar_size = pci_resource_len(pdev, 0); + device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); + if (!device->dma_base) { + pr_err("ERR_MDMA:ioremap failed\n"); + err = -ENOMEM; + goto err_ioremap; + } + pci_set_drvdata(pdev, device); + pci_set_master(pdev); + device->max_chan = info->max_chan; + device->chan_base = info->ch_base; + device->block_size = info->block_size; + device->pimr_mask = info->pimr_mask; + + err = mid_setup_dma(pdev); + if (err) + goto err_dma; + + return 0; + +err_dma: + iounmap(device->dma_base); +err_ioremap: + pci_dev_put(pdev); + kfree(device); +err_kzalloc: +err_set_dma_mask: + pci_release_regions(pdev); + pci_disable_device(pdev); +err_request_regions: +err_enable_device: + pr_err("ERR_MDMA:Probe failed %d\n", err); + return err; +} + +/** + * intel_mid_dma_remove - PCI remove + * @pdev: Controller PCI device structure + * + * Free up all resources and data + * Call shutdown_dma to complete contoller and chan cleanup + */ +static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) +{ + struct middma_device *device = pci_get_drvdata(pdev); + middma_shutdown(pdev); + pci_dev_put(pdev); + kfree(device); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +/****************************************************************************** +* PCI stuff +*/ +static struct pci_device_id intel_mid_dma_ids[] = { + { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, + { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, + { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, + { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); + +static struct pci_driver intel_mid_dma_pci = { + .name = "Intel MID DMA", + .id_table = intel_mid_dma_ids, + .probe = intel_mid_dma_probe, + .remove = __devexit_p(intel_mid_dma_remove), +}; + +static int __init intel_mid_dma_init(void) +{ + pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", + INTEL_MID_DMA_DRIVER_VERSION); + return pci_register_driver(&intel_mid_dma_pci); +} +fs_initcall(intel_mid_dma_init); + +static void __exit intel_mid_dma_exit(void) +{ + pci_unregister_driver(&intel_mid_dma_pci); +} +module_exit(intel_mid_dma_exit); + +MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); +MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h new file mode 100644 index 000000000000..d81aa658ab09 --- /dev/null +++ b/drivers/dma/intel_mid_dma_regs.h @@ -0,0 +1,260 @@ +/* + * intel_mid_dma_regs.h - Intel MID DMA Drivers + * + * Copyright (C) 2008-10 Intel Corp + * Author: Vinod Koul <vinod.koul@intel.com> + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * + */ +#ifndef __INTEL_MID_DMAC_REGS_H__ +#define __INTEL_MID_DMAC_REGS_H__ + +#include <linux/dmaengine.h> +#include <linux/dmapool.h> +#include <linux/pci_ids.h> + +#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5" + +#define REG_BIT0 0x00000001 +#define REG_BIT8 0x00000100 + +#define UNMASK_INTR_REG(chan_num) \ + ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) +#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) + +#define ENABLE_CHANNEL(chan_num) \ + ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) + +#define DESCS_PER_CHANNEL 16 +/*DMA Registers*/ +/*registers associated with channel programming*/ +#define DMA_REG_SIZE 0x400 +#define DMA_CH_SIZE 0x58 + +/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ +#define SAR 0x00 /* Source Address Register*/ +#define DAR 0x08 /* Destination Address Register*/ +#define CTL_LOW 0x18 /* Control Register*/ +#define CTL_HIGH 0x1C /* Control Register*/ +#define CFG_LOW 0x40 /* Configuration Register Low*/ +#define CFG_HIGH 0x44 /* Configuration Register high*/ + +#define STATUS_TFR 0x2E8 +#define STATUS_BLOCK 0x2F0 +#define STATUS_ERR 0x308 + +#define RAW_TFR 0x2C0 +#define RAW_BLOCK 0x2C8 +#define RAW_ERR 0x2E0 + +#define MASK_TFR 0x310 +#define MASK_BLOCK 0x318 +#define MASK_SRC_TRAN 0x320 +#define MASK_DST_TRAN 0x328 +#define MASK_ERR 0x330 + +#define CLEAR_TFR 0x338 +#define CLEAR_BLOCK 0x340 +#define CLEAR_SRC_TRAN 0x348 +#define CLEAR_DST_TRAN 0x350 +#define CLEAR_ERR 0x358 + +#define INTR_STATUS 0x360 +#define DMA_CFG 0x398 +#define DMA_CHAN_EN 0x3A0 + +/*DMA channel control registers*/ +union intel_mid_dma_ctl_lo { + struct { + u32 int_en:1; /*enable or disable interrupts*/ + /*should be 0*/ + u32 dst_tr_width:3; /*destination transfer width*/ + /*usually 32 bits = 010*/ + u32 src_tr_width:3; /*source transfer width*/ + /*usually 32 bits = 010*/ + u32 dinc:2; /*destination address inc/dec*/ + /*For mem:INC=00, Periphral NoINC=11*/ + u32 sinc:2; /*source address inc or dec, as above*/ + u32 dst_msize:3; /*destination burst transaction length*/ + /*always = 16 ie 011*/ + u32 src_msize:3; /*source burst transaction length*/ + /*always = 16 ie 011*/ + u32 reser1:3; + u32 tt_fc:3; /*transfer type and flow controller*/ + /*M-M = 000 + P-M = 010 + M-P = 001*/ + u32 dms:2; /*destination master select = 0*/ + u32 sms:2; /*source master select = 0*/ + u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/ + u32 llp_src_en:1; /*enable/disable source LLP = 0*/ + u32 reser2:3; + } ctlx; + u32 ctl_lo; +}; + +union intel_mid_dma_ctl_hi { + struct { + u32 block_ts:12; /*block transfer size*/ + /*configured by DMAC*/ + u32 reser:20; + } ctlx; + u32 ctl_hi; + +}; + +/*DMA channel configuration registers*/ +union intel_mid_dma_cfg_lo { + struct { + u32 reser1:5; + u32 ch_prior:3; /*channel priority = 0*/ + u32 ch_susp:1; /*channel suspend = 0*/ + u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/ + u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/ + /*HW = 0, SW = 1*/ + u32 hs_sel_src:1; /*select HW/SW src handshaking*/ + u32 reser2:6; + u32 dst_hs_pol:1; /*dest HS interface polarity*/ + u32 src_hs_pol:1; /*src HS interface polarity*/ + u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/ + u32 reload_src:1; /*auto reload src addr =1 if src is P*/ + u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/ + } cfgx; + u32 cfg_lo; +}; + +union intel_mid_dma_cfg_hi { + struct { + u32 fcmode:1; /*flow control mode = 1*/ + u32 fifo_mode:1; /*FIFO mode select = 1*/ + u32 protctl:3; /*protection control = 0*/ + u32 rsvd:2; + u32 src_per:4; /*src hw HS interface*/ + u32 dst_per:4; /*dstn hw HS interface*/ + u32 reser2:17; + } cfgx; + u32 cfg_hi; +}; + +/** + * struct intel_mid_dma_chan - internal mid representation of a DMA channel + * @chan: dma_chan strcture represetation for mid chan + * @ch_regs: MMIO register space pointer to channel register + * @dma_base: MMIO register space DMA engine base pointer + * @ch_id: DMA channel id + * @lock: channel spinlock + * @completed: DMA cookie + * @active_list: current active descriptors + * @queue: current queued up descriptors + * @free_list: current free descriptors + * @slave: dma slave struture + * @descs_allocated: total number of decsiptors allocated + * @dma: dma device struture pointer + * @in_use: bool representing if ch is in use or not + */ +struct intel_mid_dma_chan { + struct dma_chan chan; + void __iomem *ch_regs; + void __iomem *dma_base; + int ch_id; + spinlock_t lock; + dma_cookie_t completed; + struct list_head active_list; + struct list_head queue; + struct list_head free_list; + struct intel_mid_dma_slave *slave; + unsigned int descs_allocated; + struct middma_device *dma; + bool in_use; +}; + +static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( + struct dma_chan *chan) +{ + return container_of(chan, struct intel_mid_dma_chan, chan); +} + +/** + * struct middma_device - internal representation of a DMA device + * @pdev: PCI device + * @dma_base: MMIO register space pointer of DMA + * @dma_pool: for allocating DMA descriptors + * @common: embedded struct dma_device + * @tasklet: dma tasklet for processing interrupts + * @ch: per channel data + * @pci_id: DMA device PCI ID + * @intr_mask: Interrupt mask to be used + * @mask_reg: MMIO register for periphral mask + * @chan_base: Base ch index (read from driver data) + * @max_chan: max number of chs supported (from drv_data) + * @block_size: Block size of DMA transfer supported (from drv_data) + * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) + */ +struct middma_device { + struct pci_dev *pdev; + void __iomem *dma_base; + struct pci_pool *dma_pool; + struct dma_device common; + struct tasklet_struct tasklet; + struct intel_mid_dma_chan ch[MAX_CHAN]; + unsigned int pci_id; + unsigned int intr_mask; + void __iomem *mask_reg; + int chan_base; + int max_chan; + int block_size; + unsigned int pimr_mask; +}; + +static inline struct middma_device *to_middma_device(struct dma_device *common) +{ + return container_of(common, struct middma_device, common); +} + +struct intel_mid_dma_desc { + void __iomem *block; /*ch ptr*/ + struct list_head desc_node; + struct dma_async_tx_descriptor txd; + size_t len; + dma_addr_t sar; + dma_addr_t dar; + u32 cfg_hi; + u32 cfg_lo; + u32 ctl_lo; + u32 ctl_hi; + dma_addr_t next; + enum dma_data_direction dirn; + enum dma_status status; + enum intel_mid_dma_width width; /*width of DMA txn*/ + enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ + +}; + +static inline int test_ch_en(void __iomem *dma, u32 ch_no) +{ + u32 en_reg = ioread32(dma + DMA_CHAN_EN); + return (en_reg >> ch_no) & 0x1; +} + +static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc + (struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct intel_mid_dma_desc, txd); +} +#endif /*__INTEL_MID_DMAC_REGS_H__*/ diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 6d3a73b57e54..5216c8a92a21 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -97,6 +97,7 @@ struct ioat_chan_common { #define IOAT_RESET_PENDING 2 #define IOAT_KOBJ_INIT_FAIL 3 #define IOAT_RESHAPE_PENDING 4 + #define IOAT_RUN 5 struct timer_list timer; #define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define IDLE_TIMEOUT msecs_to_jiffies(2000) diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 3c8b32a83794..216f9d383b5b 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -287,7 +287,10 @@ void ioat2_timer_event(unsigned long data) chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(chan), "%s: Channel halted (%x)\n", __func__, chanerr); - BUG_ON(is_ioat_bug(chanerr)); + if (test_bit(IOAT_RUN, &chan->state)) + BUG_ON(is_ioat_bug(chanerr)); + else /* we never got off the ground */ + return; } /* if we haven't made progress and we have already @@ -492,6 +495,8 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf return ring; } +void ioat2_free_chan_resources(struct dma_chan *c); + /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring * @chan: channel to be initialized */ @@ -500,6 +505,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent **ring; + u64 status; int order; /* have we already been set up? */ @@ -540,7 +546,20 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) tasklet_enable(&chan->cleanup_task); ioat2_start_null_desc(ioat); - return 1 << ioat->alloc_order; + /* check that we got off the ground */ + udelay(5); + status = ioat_chansts(chan); + if (is_ioat_active(status) || is_ioat_idle(status)) { + set_bit(IOAT_RUN, &chan->state); + return 1 << ioat->alloc_order; + } else { + u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + + dev_WARN(to_dev(chan), + "failed to start channel chanerr: %#x\n", chanerr); + ioat2_free_chan_resources(c); + return -EFAULT; + } } bool reshape_ring(struct ioat2_dma_chan *ioat, int order) @@ -778,6 +797,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) del_timer_sync(&chan->timer); device->cleanup_fn((unsigned long) c); device->reset_hw(chan); + clear_bit(IOAT_RUN, &chan->state); spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&ioat->prep_lock); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 1cdd22e1051b..d0f499098479 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -361,7 +361,10 @@ static void ioat3_timer_event(unsigned long data) chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(chan), "%s: Channel halted (%x)\n", __func__, chanerr); - BUG_ON(is_ioat_bug(chanerr)); + if (test_bit(IOAT_RUN, &chan->state)) + BUG_ON(is_ioat_bug(chanerr)); + else /* we never got off the ground */ + return; } /* if we haven't made progress and we have already diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c new file mode 100644 index 000000000000..3533948b88ba --- /dev/null +++ b/drivers/dma/pch_dma.c @@ -0,0 +1,957 @@ +/* + * Topcliff PCH DMA controller driver + * Copyright (c) 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pch_dma.h> + +#define DRV_NAME "pch-dma" + +#define DMA_CTL0_DISABLE 0x0 +#define DMA_CTL0_SG 0x1 +#define DMA_CTL0_ONESHOT 0x2 +#define DMA_CTL0_MODE_MASK_BITS 0x3 +#define DMA_CTL0_DIR_SHIFT_BITS 2 +#define DMA_CTL0_BITS_PER_CH 4 + +#define DMA_CTL2_START_SHIFT_BITS 8 +#define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1) + +#define DMA_STATUS_IDLE 0x0 +#define DMA_STATUS_DESC_READ 0x1 +#define DMA_STATUS_WAIT 0x2 +#define DMA_STATUS_ACCESS 0x3 +#define DMA_STATUS_BITS_PER_CH 2 +#define DMA_STATUS_MASK_BITS 0x3 +#define DMA_STATUS_SHIFT_BITS 16 +#define DMA_STATUS_IRQ(x) (0x1 << (x)) +#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) + +#define DMA_DESC_WIDTH_SHIFT_BITS 12 +#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) +#define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS) +#define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS) +#define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF +#define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF +#define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF +#define DMA_DESC_END_WITHOUT_IRQ 0x0 +#define DMA_DESC_END_WITH_IRQ 0x1 +#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 +#define DMA_DESC_FOLLOW_WITH_IRQ 0x3 + +#define MAX_CHAN_NR 8 + +static unsigned int init_nr_desc_per_channel = 64; +module_param(init_nr_desc_per_channel, uint, 0644); +MODULE_PARM_DESC(init_nr_desc_per_channel, + "initial descriptors per channel (default: 64)"); + +struct pch_dma_desc_regs { + u32 dev_addr; + u32 mem_addr; + u32 size; + u32 next; +}; + +struct pch_dma_regs { + u32 dma_ctl0; + u32 dma_ctl1; + u32 dma_ctl2; + u32 reserved1; + u32 dma_sts0; + u32 dma_sts1; + u32 reserved2; + u32 reserved3; + struct pch_dma_desc_regs desc[0]; +}; + +struct pch_dma_desc { + struct pch_dma_desc_regs regs; + struct dma_async_tx_descriptor txd; + struct list_head desc_node; + struct list_head tx_list; +}; + +struct pch_dma_chan { + struct dma_chan chan; + void __iomem *membase; + enum dma_data_direction dir; + struct tasklet_struct tasklet; + unsigned long err_status; + + spinlock_t lock; + + dma_cookie_t completed_cookie; + struct list_head active_list; + struct list_head queue; + struct list_head free_list; + unsigned int descs_allocated; +}; + +#define PDC_DEV_ADDR 0x00 +#define PDC_MEM_ADDR 0x04 +#define PDC_SIZE 0x08 +#define PDC_NEXT 0x0C + +#define channel_readl(pdc, name) \ + readl((pdc)->membase + PDC_##name) +#define channel_writel(pdc, name, val) \ + writel((val), (pdc)->membase + PDC_##name) + +struct pch_dma { + struct dma_device dma; + void __iomem *membase; + struct pci_pool *pool; + struct pch_dma_regs regs; + struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; + struct pch_dma_chan channels[0]; +}; + +#define PCH_DMA_CTL0 0x00 +#define PCH_DMA_CTL1 0x04 +#define PCH_DMA_CTL2 0x08 +#define PCH_DMA_STS0 0x10 +#define PCH_DMA_STS1 0x14 + +#define dma_readl(pd, name) \ + readl((pd)->membase + PCH_DMA_##name) +#define dma_writel(pd, name, val) \ + writel((val), (pd)->membase + PCH_DMA_##name) + +static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct pch_dma_desc, txd); +} + +static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan) +{ + return container_of(chan, struct pch_dma_chan, chan); +} + +static inline struct pch_dma *to_pd(struct dma_device *ddev) +{ + return container_of(ddev, struct pch_dma, dma); +} + +static inline struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +static inline struct device *chan2parent(struct dma_chan *chan) +{ + return chan->dev->device.parent; +} + +static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) +{ + return list_first_entry(&pd_chan->active_list, + struct pch_dma_desc, desc_node); +} + +static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) +{ + return list_first_entry(&pd_chan->queue, + struct pch_dma_desc, desc_node); +} + +static void pdc_enable_irq(struct dma_chan *chan, int enable) +{ + struct pch_dma *pd = to_pd(chan->device); + u32 val; + + val = dma_readl(pd, CTL2); + + if (enable) + val |= 0x1 << chan->chan_id; + else + val &= ~(0x1 << chan->chan_id); + + dma_writel(pd, CTL2, val); + + dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n", + chan->chan_id, val); +} + +static void pdc_set_dir(struct dma_chan *chan) +{ + struct pch_dma_chan *pd_chan = to_pd_chan(chan); + struct pch_dma *pd = to_pd(chan->device); + u32 val; + + val = dma_readl(pd, CTL0); + + if (pd_chan->dir == DMA_TO_DEVICE) + val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + + DMA_CTL0_DIR_SHIFT_BITS); + else + val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + + DMA_CTL0_DIR_SHIFT_BITS)); + + dma_writel(pd, CTL0, val); + + dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", + chan->chan_id, val); +} + +static void pdc_set_mode(struct dma_chan *chan, u32 mode) +{ + struct pch_dma *pd = to_pd(chan->device); + u32 val; + + val = dma_readl(pd, CTL0); + + val &= ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * chan->chan_id)); + val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); + + dma_writel(pd, CTL0, val); + + dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", + chan->chan_id, val); +} + +static u32 pdc_get_status(struct pch_dma_chan *pd_chan) +{ + struct pch_dma *pd = to_pd(pd_chan->chan.device); + u32 val; + + val = dma_readl(pd, STS0); + return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + + DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); +} + +static bool pdc_is_idle(struct pch_dma_chan *pd_chan) +{ + if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) + return true; + else + return false; +} + +static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) +{ + struct pch_dma *pd = to_pd(pd_chan->chan.device); + u32 val; + + if (!pdc_is_idle(pd_chan)) { + dev_err(chan2dev(&pd_chan->chan), + "BUG: Attempt to start non-idle channel\n"); + return; + } + + channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); + channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); + channel_writel(pd_chan, SIZE, desc->regs.size); + channel_writel(pd_chan, NEXT, desc->regs.next); + + dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", + pd_chan->chan.chan_id, desc->regs.dev_addr); + dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", + pd_chan->chan.chan_id, desc->regs.mem_addr); + dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n", + pd_chan->chan.chan_id, desc->regs.size); + dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", + pd_chan->chan.chan_id, desc->regs.next); + + if (list_empty(&desc->tx_list)) + pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); + else + pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); + + val = dma_readl(pd, CTL2); + val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); + dma_writel(pd, CTL2, val); +} + +static void pdc_chain_complete(struct pch_dma_chan *pd_chan, + struct pch_dma_desc *desc) +{ + struct dma_async_tx_descriptor *txd = &desc->txd; + dma_async_tx_callback callback = txd->callback; + void *param = txd->callback_param; + + list_splice_init(&desc->tx_list, &pd_chan->free_list); + list_move(&desc->desc_node, &pd_chan->free_list); + + if (callback) + callback(param); +} + +static void pdc_complete_all(struct pch_dma_chan *pd_chan) +{ + struct pch_dma_desc *desc, *_d; + LIST_HEAD(list); + + BUG_ON(!pdc_is_idle(pd_chan)); + + if (!list_empty(&pd_chan->queue)) + pdc_dostart(pd_chan, pdc_first_queued(pd_chan)); + + list_splice_init(&pd_chan->active_list, &list); + list_splice_init(&pd_chan->queue, &pd_chan->active_list); + + list_for_each_entry_safe(desc, _d, &list, desc_node) + pdc_chain_complete(pd_chan, desc); +} + +static void pdc_handle_error(struct pch_dma_chan *pd_chan) +{ + struct pch_dma_desc *bad_desc; + + bad_desc = pdc_first_active(pd_chan); + list_del(&bad_desc->desc_node); + + list_splice_init(&pd_chan->queue, pd_chan->active_list.prev); + + if (!list_empty(&pd_chan->active_list)) + pdc_dostart(pd_chan, pdc_first_active(pd_chan)); + + dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n"); + dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n", + bad_desc->txd.cookie); + + pdc_chain_complete(pd_chan, bad_desc); +} + +static void pdc_advance_work(struct pch_dma_chan *pd_chan) +{ + if (list_empty(&pd_chan->active_list) || + list_is_singular(&pd_chan->active_list)) { + pdc_complete_all(pd_chan); + } else { + pdc_chain_complete(pd_chan, pdc_first_active(pd_chan)); + pdc_dostart(pd_chan, pdc_first_active(pd_chan)); + } +} + +static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan, + struct pch_dma_desc *desc) +{ + dma_cookie_t cookie = pd_chan->chan.cookie; + + if (++cookie < 0) + cookie = 1; + + pd_chan->chan.cookie = cookie; + desc->txd.cookie = cookie; + + return cookie; +} + +static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) +{ + struct pch_dma_desc *desc = to_pd_desc(txd); + struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); + dma_cookie_t cookie; + + spin_lock_bh(&pd_chan->lock); + cookie = pdc_assign_cookie(pd_chan, desc); + + if (list_empty(&pd_chan->active_list)) { + list_add_tail(&desc->desc_node, &pd_chan->active_list); + pdc_dostart(pd_chan, desc); + } else { + list_add_tail(&desc->desc_node, &pd_chan->queue); + } + + spin_unlock_bh(&pd_chan->lock); + return 0; +} + +static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) +{ + struct pch_dma_desc *desc = NULL; + struct pch_dma *pd = to_pd(chan->device); + dma_addr_t addr; + + desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); + if (desc) { + memset(desc, 0, sizeof(struct pch_dma_desc)); + INIT_LIST_HEAD(&desc->tx_list); + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = pd_tx_submit; + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.phys = addr; + } + + return desc; +} + +static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) +{ + struct pch_dma_desc *desc, *_d; + struct pch_dma_desc *ret = NULL; + int i; + + spin_lock_bh(&pd_chan->lock); + list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { + i++; + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); + } + spin_unlock_bh(&pd_chan->lock); + dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); + + if (!ret) { + ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); + if (ret) { + spin_lock_bh(&pd_chan->lock); + pd_chan->descs_allocated++; + spin_unlock_bh(&pd_chan->lock); + } else { + dev_err(chan2dev(&pd_chan->chan), + "failed to alloc desc\n"); + } + } + + return ret; +} + +static void pdc_desc_put(struct pch_dma_chan *pd_chan, + struct pch_dma_desc *desc) +{ + if (desc) { + spin_lock_bh(&pd_chan->lock); + list_splice_init(&desc->tx_list, &pd_chan->free_list); + list_add(&desc->desc_node, &pd_chan->free_list); + spin_unlock_bh(&pd_chan->lock); + } +} + +static int pd_alloc_chan_resources(struct dma_chan *chan) +{ + struct pch_dma_chan *pd_chan = to_pd_chan(chan); + struct pch_dma_desc *desc; + LIST_HEAD(tmp_list); + int i; + + if (!pdc_is_idle(pd_chan)) { + dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); + return -EIO; + } + + if (!list_empty(&pd_chan->free_list)) + return pd_chan->descs_allocated; + + for (i = 0; i < init_nr_desc_per_channel; i++) { + desc = pdc_alloc_desc(chan, GFP_KERNEL); + + if (!desc) { + dev_warn(chan2dev(chan), + "Only allocated %d initial descriptors\n", i); + break; + } + + list_add_tail(&desc->desc_node, &tmp_list); + } + + spin_lock_bh(&pd_chan->lock); + list_splice(&tmp_list, &pd_chan->free_list); + pd_chan->descs_allocated = i; + pd_chan->completed_cookie = chan->cookie = 1; + spin_unlock_bh(&pd_chan->lock); + + pdc_enable_irq(chan, 1); + pdc_set_dir(chan); + + return pd_chan->descs_allocated; +} + +static void pd_free_chan_resources(struct dma_chan *chan) +{ + struct pch_dma_chan *pd_chan = to_pd_chan(chan); + struct pch_dma *pd = to_pd(chan->device); + struct pch_dma_desc *desc, *_d; + LIST_HEAD(tmp_list); + + BUG_ON(!pdc_is_idle(pd_chan)); + BUG_ON(!list_empty(&pd_chan->active_list)); + BUG_ON(!list_empty(&pd_chan->queue)); + + spin_lock_bh(&pd_chan->lock); + list_splice_init(&pd_chan->free_list, &tmp_list); + pd_chan->descs_allocated = 0; + spin_unlock_bh(&pd_chan->lock); + + list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) + pci_pool_free(pd->pool, desc, desc->txd.phys); + + pdc_enable_irq(chan, 0); +} + +static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct pch_dma_chan *pd_chan = to_pd_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_completed; + int ret; + + spin_lock_bh(&pd_chan->lock); + last_completed = pd_chan->completed_cookie; + last_used = chan->cookie; + spin_unlock_bh(&pd_chan->lock); + + ret = dma_async_is_complete(cookie, last_completed, last_used); + + dma_set_tx_state(txstate, last_completed, last_used, 0); + + return ret; +} + +static void pd_issue_pending(struct dma_chan *chan) +{ + struct pch_dma_chan *pd_chan = to_pd_chan(chan); + + if (pdc_is_idle(pd_chan)) { + spin_lock_bh(&pd_chan->lock); + pdc_advance_work(pd_chan); + spin_unlock_bh(&pd_chan->lock); + } +} + +static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, + struct scatterlist *sgl, unsigned int sg_len, + enum dma_data_direction direction, unsigned long flags) +{ + struct pch_dma_chan *pd_chan = to_pd_chan(chan); + struct pch_dma_slave *pd_slave = chan->private; + struct pch_dma_desc *first = NULL; + struct pch_dma_desc *prev = NULL; + struct pch_dma_desc *desc = NULL; + struct scatterlist *sg; + dma_addr_t reg; + int i; + + if (unlikely(!sg_len)) { + dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n"); + return NULL; + } + + if (direction == DMA_FROM_DEVICE) + reg = pd_slave->rx_reg; + else if (direction == DMA_TO_DEVICE) + reg = pd_slave->tx_reg; + else + return NULL; + + for_each_sg(sgl, sg, sg_len, i) { + desc = pdc_desc_get(pd_chan); + + if (!desc) + goto err_desc_get; + + desc->regs.dev_addr = reg; + desc->regs.mem_addr = sg_phys(sg); + desc->regs.size = sg_dma_len(sg); + desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; + + switch (pd_slave->width) { + case PCH_DMA_WIDTH_1_BYTE: + if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) + goto err_desc_get; + desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; + break; + case PCH_DMA_WIDTH_2_BYTES: + if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) + goto err_desc_get; + desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; + break; + case PCH_DMA_WIDTH_4_BYTES: + if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) + goto err_desc_get; + desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; + break; + default: + goto err_desc_get; + } + + + if (!first) { + first = desc; + } else { + prev->regs.next |= desc->txd.phys; + list_add_tail(&desc->desc_node, &first->tx_list); + } + + prev = desc; + } + + if (flags & DMA_PREP_INTERRUPT) + desc->regs.next = DMA_DESC_END_WITH_IRQ; + else + desc->regs.next = DMA_DESC_END_WITHOUT_IRQ; + + first->txd.cookie = -EBUSY; + desc->txd.flags = flags; + + return &first->txd; + +err_desc_get: + dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n"); + pdc_desc_put(pd_chan, first); + return NULL; +} + +static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct pch_dma_chan *pd_chan = to_pd_chan(chan); + struct pch_dma_desc *desc, *_d; + LIST_HEAD(list); + + if (cmd != DMA_TERMINATE_ALL) + return -ENXIO; + + spin_lock_bh(&pd_chan->lock); + + pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); + + list_splice_init(&pd_chan->active_list, &list); + list_splice_init(&pd_chan->queue, &list); + + list_for_each_entry_safe(desc, _d, &list, desc_node) + pdc_chain_complete(pd_chan, desc); + + spin_unlock_bh(&pd_chan->lock); + + + return 0; +} + +static void pdc_tasklet(unsigned long data) +{ + struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; + + if (!pdc_is_idle(pd_chan)) { + dev_err(chan2dev(&pd_chan->chan), + "BUG: handle non-idle channel in tasklet\n"); + return; + } + + spin_lock_bh(&pd_chan->lock); + if (test_and_clear_bit(0, &pd_chan->err_status)) + pdc_handle_error(pd_chan); + else + pdc_advance_work(pd_chan); + spin_unlock_bh(&pd_chan->lock); +} + +static irqreturn_t pd_irq(int irq, void *devid) +{ + struct pch_dma *pd = (struct pch_dma *)devid; + struct pch_dma_chan *pd_chan; + u32 sts0; + int i; + int ret = IRQ_NONE; + + sts0 = dma_readl(pd, STS0); + + dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); + + for (i = 0; i < pd->dma.chancnt; i++) { + pd_chan = &pd->channels[i]; + + if (sts0 & DMA_STATUS_IRQ(i)) { + if (sts0 & DMA_STATUS_ERR(i)) + set_bit(0, &pd_chan->err_status); + + tasklet_schedule(&pd_chan->tasklet); + ret = IRQ_HANDLED; + } + + } + + /* clear interrupt bits in status register */ + dma_writel(pd, STS0, sts0); + + return ret; +} + +static void pch_dma_save_regs(struct pch_dma *pd) +{ + struct pch_dma_chan *pd_chan; + struct dma_chan *chan, *_c; + int i = 0; + + pd->regs.dma_ctl0 = dma_readl(pd, CTL0); + pd->regs.dma_ctl1 = dma_readl(pd, CTL1); + pd->regs.dma_ctl2 = dma_readl(pd, CTL2); + + list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { + pd_chan = to_pd_chan(chan); + + pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); + pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); + pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); + pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); + + i++; + } +} + +static void pch_dma_restore_regs(struct pch_dma *pd) +{ + struct pch_dma_chan *pd_chan; + struct dma_chan *chan, *_c; + int i = 0; + + dma_writel(pd, CTL0, pd->regs.dma_ctl0); + dma_writel(pd, CTL1, pd->regs.dma_ctl1); + dma_writel(pd, CTL2, pd->regs.dma_ctl2); + + list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { + pd_chan = to_pd_chan(chan); + + channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); + channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); + channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); + channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); + + i++; + } +} + +static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct pch_dma *pd = pci_get_drvdata(pdev); + + if (pd) + pch_dma_save_regs(pd); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int pch_dma_resume(struct pci_dev *pdev) +{ + struct pch_dma *pd = pci_get_drvdata(pdev); + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) { + dev_dbg(&pdev->dev, "failed to enable device\n"); + return err; + } + + if (pd) + pch_dma_restore_regs(pd); + + return 0; +} + +static int __devinit pch_dma_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct pch_dma *pd; + struct pch_dma_regs *regs; + unsigned int nr_channels; + int err; + int i; + + nr_channels = id->driver_data; + pd = kzalloc(sizeof(struct pch_dma)+ + sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL); + if (!pd) + return -ENOMEM; + + pci_set_drvdata(pdev, pd); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device\n"); + goto err_free_mem; + } + + if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Cannot find proper base address\n"); + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); + goto err_disable_pdev; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Cannot set proper DMA config\n"); + goto err_free_res; + } + + regs = pd->membase = pci_iomap(pdev, 1, 0); + if (!pd->membase) { + dev_err(&pdev->dev, "Cannot map MMIO registers\n"); + err = -ENOMEM; + goto err_free_res; + } + + pci_set_master(pdev); + + err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); + if (err) { + dev_err(&pdev->dev, "Failed to request IRQ\n"); + goto err_iounmap; + } + + pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, + sizeof(struct pch_dma_desc), 4, 0); + if (!pd->pool) { + dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); + err = -ENOMEM; + goto err_free_irq; + } + + pd->dma.dev = &pdev->dev; + pd->dma.chancnt = nr_channels; + + INIT_LIST_HEAD(&pd->dma.channels); + + for (i = 0; i < nr_channels; i++) { + struct pch_dma_chan *pd_chan = &pd->channels[i]; + + pd_chan->chan.device = &pd->dma; + pd_chan->chan.cookie = 1; + pd_chan->chan.chan_id = i; + + pd_chan->membase = ®s->desc[i]; + + pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + spin_lock_init(&pd_chan->lock); + + INIT_LIST_HEAD(&pd_chan->active_list); + INIT_LIST_HEAD(&pd_chan->queue); + INIT_LIST_HEAD(&pd_chan->free_list); + + tasklet_init(&pd_chan->tasklet, pdc_tasklet, + (unsigned long)pd_chan); + list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels); + } + + dma_cap_zero(pd->dma.cap_mask); + dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); + dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); + + pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; + pd->dma.device_free_chan_resources = pd_free_chan_resources; + pd->dma.device_tx_status = pd_tx_status; + pd->dma.device_issue_pending = pd_issue_pending; + pd->dma.device_prep_slave_sg = pd_prep_slave_sg; + pd->dma.device_control = pd_device_control; + + err = dma_async_device_register(&pd->dma); + if (err) { + dev_err(&pdev->dev, "Failed to register DMA device\n"); + goto err_free_pool; + } + + return 0; + +err_free_pool: + pci_pool_destroy(pd->pool); +err_free_irq: + free_irq(pdev->irq, pd); +err_iounmap: + pci_iounmap(pdev, pd->membase); +err_free_res: + pci_release_regions(pdev); +err_disable_pdev: + pci_disable_device(pdev); +err_free_mem: + return err; +} + +static void __devexit pch_dma_remove(struct pci_dev *pdev) +{ + struct pch_dma *pd = pci_get_drvdata(pdev); + struct pch_dma_chan *pd_chan; + struct dma_chan *chan, *_c; + + if (pd) { + dma_async_device_unregister(&pd->dma); + + list_for_each_entry_safe(chan, _c, &pd->dma.channels, + device_node) { + pd_chan = to_pd_chan(chan); + + tasklet_disable(&pd_chan->tasklet); + tasklet_kill(&pd_chan->tasklet); + } + + pci_pool_destroy(pd->pool); + free_irq(pdev->irq, pd); + pci_iounmap(pdev, pd->membase); + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(pd); + } +} + +/* PCI Device ID of DMA device */ +#define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810 +#define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815 + +static const struct pci_device_id pch_dma_id_table[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 }, +}; + +static struct pci_driver pch_dma_driver = { + .name = DRV_NAME, + .id_table = pch_dma_id_table, + .probe = pch_dma_probe, + .remove = __devexit_p(pch_dma_remove), +#ifdef CONFIG_PM + .suspend = pch_dma_suspend, + .resume = pch_dma_resume, +#endif +}; + +static int __init pch_dma_init(void) +{ + return pci_register_driver(&pch_dma_driver); +} + +static void __exit pch_dma_exit(void) +{ + pci_unregister_driver(&pch_dma_driver); +} + +module_init(pch_dma_init); +module_exit(pch_dma_exit); + +MODULE_DESCRIPTION("Topcliff PCH DMA controller driver"); +MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c426829f6ab8..17e2600a00cf 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -30,14 +30,16 @@ /* Maximum iterations taken before giving up suspending a channel */ #define D40_SUSPEND_MAX_IT 500 +/* Hardware requirement on LCLA alignment */ +#define LCLA_ALIGNMENT 0x40000 +/* Attempts before giving up to trying to get pages that are aligned */ +#define MAX_LCLA_ALLOC_ATTEMPTS 256 + +/* Bit markings for allocation map */ #define D40_ALLOC_FREE (1 << 31) #define D40_ALLOC_PHY (1 << 30) #define D40_ALLOC_LOG_FREE 0 -/* The number of free d40_desc to keep in memory before starting - * to kfree() them */ -#define D40_DESC_CACHE_SIZE 50 - /* Hardware designer of the block */ #define D40_PERIPHID2_DESIGNER 0x8 @@ -68,9 +70,9 @@ enum d40_command { */ struct d40_lli_pool { void *base; - int size; + int size; /* Space for dst and src, plus an extra for padding */ - u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; + u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; }; /** @@ -81,9 +83,10 @@ struct d40_lli_pool { * lli_len equals one. * @lli_log: Same as above but for logical channels. * @lli_pool: The pool with two entries pre-allocated. - * @lli_len: Number of LLI's in lli_pool - * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len - * then this transfer job is done. + * @lli_len: Number of llis of current descriptor. + * @lli_count: Number of transfered llis. + * @lli_tx_len: Max number of LLIs per transfer, there can be + * many transfer for one descriptor. * @txd: DMA engine struct. Used for among other things for communication * during a transfer. * @node: List entry. @@ -100,8 +103,9 @@ struct d40_desc { struct d40_log_lli_bidir lli_log; struct d40_lli_pool lli_pool; - u32 lli_len; - u32 lli_tcount; + int lli_len; + int lli_count; + u32 lli_tx_len; struct dma_async_tx_descriptor txd; struct list_head node; @@ -113,18 +117,20 @@ struct d40_desc { /** * struct d40_lcla_pool - LCLA pool settings and data. * - * @base: The virtual address of LCLA. - * @phy: Physical base address of LCLA. - * @base_size: size of lcla. + * @base: The virtual address of LCLA. 18 bit aligned. + * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. + * This pointer is only there for clean-up on error. + * @pages: The number of pages needed for all physical channels. + * Only used later for clean-up on error * @lock: Lock to protect the content in this struct. - * @alloc_map: Mapping between physical channel and LCLA entries. + * @alloc_map: Bitmap mapping between physical channel and LCLA entries. * @num_blocks: The number of entries of alloc_map. Equals to the * number of physical channels. */ struct d40_lcla_pool { void *base; - dma_addr_t phy; - resource_size_t base_size; + void *base_unaligned; + int pages; spinlock_t lock; u32 *alloc_map; int num_blocks; @@ -163,15 +169,14 @@ struct d40_base; * @pending_tx: The number of pending transfers. Used between interrupt handler * and tasklet. * @busy: Set to true when transfer is ongoing on this channel. - * @phy_chan: Pointer to physical channel which this instance runs on. + * @phy_chan: Pointer to physical channel which this instance runs on. If this + * point is NULL, then the channel is not allocated. * @chan: DMA engine handle. * @tasklet: Tasklet that gets scheduled from interrupt context to complete a * transfer and call client callback. * @client: Cliented owned descriptor list. * @active: Active descriptor. * @queue: Queued jobs. - * @free: List of free descripts, ready to be reused. - * @free_len: Number of descriptors in the free list. * @dma_cfg: The client configuration of this dma channel. * @base: Pointer to the device instance struct. * @src_def_cfg: Default cfg register setting for src. @@ -195,8 +200,6 @@ struct d40_chan { struct list_head client; struct list_head active; struct list_head queue; - struct list_head free; - int free_len; struct stedma40_chan_cfg dma_cfg; struct d40_base *base; /* Default register configurations */ @@ -205,6 +208,9 @@ struct d40_chan { struct d40_def_lcsp log_def; struct d40_lcla_elem lcla; struct d40_log_lli_full *lcpa; + /* Runtime reconfiguration */ + dma_addr_t runtime_addr; + enum dma_data_direction runtime_direction; }; /** @@ -215,6 +221,7 @@ struct d40_chan { * the same physical register. * @dev: The device structure. * @virtbase: The virtual base address of the DMA's register. + * @rev: silicon revision detected. * @clk: Pointer to the DMA clock structure. * @phy_start: Physical memory start of the DMA registers. * @phy_size: Size of the DMA register map. @@ -240,12 +247,14 @@ struct d40_chan { * @lcpa_base: The virtual mapped address of LCPA. * @phy_lcpa: The physical address of the LCPA. * @lcpa_size: The size of the LCPA area. + * @desc_slab: cache for descriptors. */ struct d40_base { spinlock_t interrupt_lock; spinlock_t execmd_lock; struct device *dev; void __iomem *virtbase; + u8 rev:4; struct clk *clk; phys_addr_t phy_start; resource_size_t phy_size; @@ -266,6 +275,7 @@ struct d40_base { void *lcpa_base; dma_addr_t phy_lcpa; resource_size_t lcpa_size; + struct kmem_cache *desc_slab; }; /** @@ -365,11 +375,6 @@ static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, return cookie; } -static void d40_desc_reset(struct d40_desc *d40d) -{ - d40d->lli_tcount = 0; -} - static void d40_desc_remove(struct d40_desc *d40d) { list_del(&d40d->node); @@ -377,7 +382,6 @@ static void d40_desc_remove(struct d40_desc *d40d) static struct d40_desc *d40_desc_get(struct d40_chan *d40c) { - struct d40_desc *desc; struct d40_desc *d; struct d40_desc *_d; @@ -386,36 +390,21 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) if (async_tx_test_ack(&d->txd)) { d40_pool_lli_free(d); d40_desc_remove(d); - desc = d; - goto out; + break; } - } - - if (list_empty(&d40c->free)) { - /* Alloc new desc because we're out of used ones */ - desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT); - if (desc == NULL) - goto out; - INIT_LIST_HEAD(&desc->node); } else { - /* Reuse an old desc. */ - desc = list_first_entry(&d40c->free, - struct d40_desc, - node); - list_del(&desc->node); - d40c->free_len--; + d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT); + if (d != NULL) { + memset(d, 0, sizeof(struct d40_desc)); + INIT_LIST_HEAD(&d->node); + } } -out: - return desc; + return d; } static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) { - if (d40c->free_len < D40_DESC_CACHE_SIZE) { - list_add_tail(&d40d->node, &d40c->free); - d40c->free_len++; - } else - kfree(d40d); + kmem_cache_free(d40c->base->desc_slab, d40d); } static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) @@ -456,37 +445,41 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) /* Support functions for logical channels */ -static int d40_lcla_id_get(struct d40_chan *d40c, - struct d40_lcla_pool *pool) +static int d40_lcla_id_get(struct d40_chan *d40c) { int src_id = 0; int dst_id = 0; struct d40_log_lli *lcla_lidx_base = - pool->base + d40c->phy_chan->num * 1024; + d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024; int i; int lli_per_log = d40c->base->plat_data->llis_per_log; + unsigned long flags; if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) return 0; - if (pool->num_blocks > 32) + if (d40c->base->lcla_pool.num_blocks > 32) return -EINVAL; - spin_lock(&pool->lock); + spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); - for (i = 0; i < pool->num_blocks; i++) { - if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { - pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); + for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) { + if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & + (0x1 << i))) { + d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= + (0x1 << i); break; } } src_id = i; - if (src_id >= pool->num_blocks) + if (src_id >= d40c->base->lcla_pool.num_blocks) goto err; - for (; i < pool->num_blocks; i++) { - if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { - pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); + for (; i < d40c->base->lcla_pool.num_blocks; i++) { + if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & + (0x1 << i))) { + d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= + (0x1 << i); break; } } @@ -500,28 +493,13 @@ static int d40_lcla_id_get(struct d40_chan *d40c, d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; - - spin_unlock(&pool->lock); + spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); return 0; err: - spin_unlock(&pool->lock); + spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); return -EINVAL; } -static void d40_lcla_id_put(struct d40_chan *d40c, - struct d40_lcla_pool *pool, - int id) -{ - if (id < 0) - return; - - d40c->lcla.src_id = -1; - d40c->lcla.dst_id = -1; - - spin_lock(&pool->lock); - pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id)); - spin_unlock(&pool->lock); -} static int d40_channel_execute_command(struct d40_chan *d40c, enum d40_command command) @@ -530,6 +508,7 @@ static int d40_channel_execute_command(struct d40_chan *d40c, void __iomem *active_reg; int ret = 0; unsigned long flags; + u32 wmask; spin_lock_irqsave(&d40c->base->execmd_lock, flags); @@ -547,7 +526,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c, goto done; } - writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg); + wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); + writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), + active_reg); if (command == D40_DMA_SUSPEND_REQ) { @@ -586,8 +567,7 @@ done: static void d40_term_all(struct d40_chan *d40c) { struct d40_desc *d40d; - struct d40_desc *d; - struct d40_desc *_d; + unsigned long flags; /* Release active descriptors */ while ((d40d = d40_first_active_get(d40c))) { @@ -605,19 +585,17 @@ static void d40_term_all(struct d40_chan *d40c) d40_desc_free(d40c, d40d); } - /* Release client owned descriptors */ - if (!list_empty(&d40c->client)) - list_for_each_entry_safe(d, _d, &d40c->client, node) { - d40_pool_lli_free(d); - d40_desc_remove(d); - /* Return desc to free-list */ - d40_desc_free(d40c, d40d); - } + spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); + + d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= + (~(0x1 << d40c->lcla.dst_id)); + d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= + (~(0x1 << d40c->lcla.src_id)); + + d40c->lcla.src_id = -1; + d40c->lcla.dst_id = -1; - d40_lcla_id_put(d40c, &d40c->base->lcla_pool, - d40c->lcla.src_id); - d40_lcla_id_put(d40c, &d40c->base->lcla_pool, - d40c->lcla.dst_id); + spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); d40c->pending_tx = 0; d40c->busy = false; @@ -628,6 +606,7 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) u32 val; unsigned long flags; + /* Notice, that disable requires the physical channel to be stopped */ if (do_enable) val = D40_ACTIVATE_EVENTLINE; else @@ -732,31 +711,34 @@ static int d40_config_write(struct d40_chan *d40c) static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) { - if (d40d->lli_phy.dst && d40d->lli_phy.src) { d40_phy_lli_write(d40c->base->virtbase, d40c->phy_chan->num, d40d->lli_phy.dst, d40d->lli_phy.src); - d40d->lli_tcount = d40d->lli_len; } else if (d40d->lli_log.dst && d40d->lli_log.src) { - u32 lli_len; struct d40_log_lli *src = d40d->lli_log.src; struct d40_log_lli *dst = d40d->lli_log.dst; - - src += d40d->lli_tcount; - dst += d40d->lli_tcount; - - if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) - lli_len = d40d->lli_len; - else - lli_len = d40c->base->plat_data->llis_per_log; - d40d->lli_tcount += lli_len; - d40_log_lli_write(d40c->lcpa, d40c->lcla.src, - d40c->lcla.dst, - dst, src, - d40c->base->plat_data->llis_per_log); + int s; + + src += d40d->lli_count; + dst += d40d->lli_count; + s = d40_log_lli_write(d40c->lcpa, + d40c->lcla.src, d40c->lcla.dst, + dst, src, + d40c->base->plat_data->llis_per_log); + + /* If s equals to zero, the job is not linked */ + if (s > 0) { + (void) dma_map_single(d40c->base->dev, d40c->lcla.src, + s * sizeof(struct d40_log_lli), + DMA_TO_DEVICE); + (void) dma_map_single(d40c->base->dev, d40c->lcla.dst, + s * sizeof(struct d40_log_lli), + DMA_TO_DEVICE); + } } + d40d->lli_count += d40d->lli_tx_len; } static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) @@ -780,18 +762,21 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) static int d40_start(struct d40_chan *d40c) { - int err; + if (d40c->base->rev == 0) { + int err; - if (d40c->log_num != D40_PHY_CHAN) { - err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (err) - return err; - d40_config_set_event(d40c, true); + if (d40c->log_num != D40_PHY_CHAN) { + err = d40_channel_execute_command(d40c, + D40_DMA_SUSPEND_REQ); + if (err) + return err; + } } - err = d40_channel_execute_command(d40c, D40_DMA_RUN); + if (d40c->log_num != D40_PHY_CHAN) + d40_config_set_event(d40c, true); - return err; + return d40_channel_execute_command(d40c, D40_DMA_RUN); } static struct d40_desc *d40_queue_start(struct d40_chan *d40c) @@ -838,7 +823,7 @@ static void dma_tc_handle(struct d40_chan *d40c) if (d40d == NULL) return; - if (d40d->lli_tcount < d40d->lli_len) { + if (d40d->lli_count < d40d->lli_len) { d40_desc_load(d40c, d40d); /* Start dma job */ @@ -891,7 +876,6 @@ static void dma_tasklet(unsigned long data) /* Return desc to free-list */ d40_desc_free(d40c, d40d_fin); } else { - d40_desc_reset(d40d_fin); if (!d40d_fin->is_in_client_list) { d40_desc_remove(d40d_fin); list_add_tail(&d40d_fin->node, &d40c->client); @@ -975,7 +959,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) if (!il[row].is_error) dma_tc_handle(d40c); else - dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n", + dev_err(base->dev, + "[%s] IRQ chan: %ld offset %d idx %d\n", __func__, chan, il[row].offset, idx); spin_unlock(&d40c->lock); @@ -1134,7 +1119,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) int j; int log_num; bool is_src; - bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) + bool is_log = (d40c->dma_cfg.channel_type & + STEDMA40_CHANNEL_IN_OPER_MODE) == STEDMA40_CHANNEL_IN_LOG_MODE; @@ -1169,8 +1155,10 @@ static int d40_allocate_channel(struct d40_chan *d40c) for (j = 0; j < d40c->base->num_phy_chans; j += 8) { int phy_num = j + event_group * 2; for (i = phy_num; i < phy_num + 2; i++) { - if (d40_alloc_mask_set(&phys[i], is_src, - 0, is_log)) + if (d40_alloc_mask_set(&phys[i], + is_src, + 0, + is_log)) goto found_phy; } } @@ -1221,30 +1209,6 @@ out: } -static int d40_config_chan(struct d40_chan *d40c, - struct stedma40_chan_cfg *info) -{ - - /* Fill in basic CFG register values */ - d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); - - if (d40c->log_num != D40_PHY_CHAN) { - d40_log_cfg(&d40c->dma_cfg, - &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); - - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) - d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.src_dev_type * 32; - else - d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.dst_dev_type * 32 + 16; - } - - /* Write channel configuration to the DMA */ - return d40_config_write(d40c); -} - static int d40_config_memcpy(struct d40_chan *d40c) { dma_cap_mask_t cap = d40c->chan.device->cap_mask; @@ -1272,13 +1236,25 @@ static int d40_free_dma(struct d40_chan *d40c) { int res = 0; - u32 event, dir; + u32 event; struct d40_phy_res *phy = d40c->phy_chan; bool is_src; + struct d40_desc *d; + struct d40_desc *_d; + /* Terminate all queued and active transfers */ d40_term_all(d40c); + /* Release client owned descriptors */ + if (!list_empty(&d40c->client)) + list_for_each_entry_safe(d, _d, &d40c->client, node) { + d40_pool_lli_free(d); + d40_desc_remove(d); + /* Return desc to free-list */ + d40_desc_free(d40c, d); + } + if (phy == NULL) { dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", __func__); @@ -1292,22 +1268,12 @@ static int d40_free_dma(struct d40_chan *d40c) return -EINVAL; } - - res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (res) { - dev_err(&d40c->chan.dev->device, "[%s] suspend\n", - __func__); - return res; - } - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); - dir = D40_CHAN_REG_SDLNK; is_src = false; } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); - dir = D40_CHAN_REG_SSLNK; is_src = true; } else { dev_err(&d40c->chan.dev->device, @@ -1315,16 +1281,17 @@ static int d40_free_dma(struct d40_chan *d40c) return -EINVAL; } + res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); + if (res) { + dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", + __func__); + return res; + } + if (d40c->log_num != D40_PHY_CHAN) { - /* - * Release logical channel, deactivate the event line during - * the time physical res is suspended. - */ - writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) & - D40_EVENTLINE_MASK(event), - d40c->base->virtbase + D40_DREG_PCBASE + - phy->num * D40_DREG_PCDELTA + dir); + /* Release logical channel, deactivate the event line */ + d40_config_set_event(d40c, false); d40c->base->lookup_log_chans[d40c->log_num] = NULL; /* @@ -1345,8 +1312,9 @@ static int d40_free_dma(struct d40_chan *d40c) } return 0; } - } else - d40_alloc_mask_free(phy, is_src, 0); + } else { + (void) d40_alloc_mask_free(phy, is_src, 0); + } /* Release physical channel */ res = d40_channel_execute_command(d40c, D40_DMA_STOP); @@ -1361,8 +1329,6 @@ static int d40_free_dma(struct d40_chan *d40c) d40c->base->lookup_phy_chans[phy->num] = NULL; return 0; - - } static int d40_pause(struct dma_chan *chan) @@ -1370,7 +1336,6 @@ static int d40_pause(struct dma_chan *chan) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); int res; - unsigned long flags; spin_lock_irqsave(&d40c->lock, flags); @@ -1397,7 +1362,6 @@ static bool d40_is_paused(struct d40_chan *d40c) void __iomem *active_reg; u32 status; u32 event; - int res; spin_lock_irqsave(&d40c->lock, flags); @@ -1416,10 +1380,6 @@ static bool d40_is_paused(struct d40_chan *d40c) goto _exit; } - res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (res != 0) - goto _exit; - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); @@ -1436,12 +1396,6 @@ static bool d40_is_paused(struct d40_chan *d40c) if (status != D40_DMA_RUN) is_paused = true; - - /* Resume the other logical channels if any */ - if (d40_chan_has_events(d40c)) - res = d40_channel_execute_command(d40c, - D40_DMA_RUN); - _exit: spin_unlock_irqrestore(&d40c->lock, flags); return is_paused; @@ -1468,13 +1422,14 @@ static u32 d40_residue(struct d40_chan *d40c) u32 num_elt; if (d40c->log_num != D40_PHY_CHAN) - num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) + num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) >> D40_MEM_LCSP2_ECNT_POS; else num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT) & - D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; + D40_SREG_ELEM_PHY_ECNT_MASK) >> + D40_SREG_ELEM_PHY_ECNT_POS; return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); } @@ -1487,20 +1442,21 @@ static int d40_resume(struct dma_chan *chan) spin_lock_irqsave(&d40c->lock, flags); - if (d40c->log_num != D40_PHY_CHAN) { - res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (res) - goto out; + if (d40c->base->rev == 0) + if (d40c->log_num != D40_PHY_CHAN) { + res = d40_channel_execute_command(d40c, + D40_DMA_SUSPEND_REQ); + goto no_suspend; + } - /* If bytes left to transfer or linked tx resume job */ - if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { + /* If bytes left to transfer or linked tx resume job */ + if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { + if (d40c->log_num != D40_PHY_CHAN) d40_config_set_event(d40c, true); - res = d40_channel_execute_command(d40c, D40_DMA_RUN); - } - } else if (d40_residue(d40c) || d40_tx_is_linked(d40c)) res = d40_channel_execute_command(d40c, D40_DMA_RUN); + } -out: +no_suspend: spin_unlock_irqrestore(&d40c->lock, flags); return res; } @@ -1534,8 +1490,10 @@ int stedma40_set_psize(struct dma_chan *chan, if (d40c->log_num != D40_PHY_CHAN) { d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; - d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; - d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; + d40c->log_def.lcsp1 |= src_psize << + D40_MEM_LCSP1_SCFG_PSIZE_POS; + d40c->log_def.lcsp3 |= dst_psize << + D40_MEM_LCSP1_SCFG_PSIZE_POS; goto out; } @@ -1566,37 +1524,42 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, struct scatterlist *sgl_dst, struct scatterlist *sgl_src, unsigned int sgl_len, - unsigned long flags) + unsigned long dma_flags) { int res; struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); - unsigned long flg; - int lli_max = d40c->base->plat_data->llis_per_log; + unsigned long flags; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Unallocated channel.\n", __func__); + return ERR_PTR(-EINVAL); + } - spin_lock_irqsave(&d40c->lock, flg); + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_desc_get(d40c); if (d40d == NULL) goto err; - memset(d40d, 0, sizeof(struct d40_desc)); d40d->lli_len = sgl_len; - - d40d->txd.flags = flags; + d40d->lli_tx_len = d40d->lli_len; + d40d->txd.flags = dma_flags; if (d40c->log_num != D40_PHY_CHAN) { + if (d40d->lli_len > d40c->base->plat_data->llis_per_log) + d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; + if (sgl_len > 1) /* * Check if there is space available in lcla. If not, * split list into 1-length and run only in lcpa * space. */ - if (d40_lcla_id_get(d40c, - &d40c->base->lcla_pool) != 0) - lli_max = 1; + if (d40_lcla_id_get(d40c) != 0) + d40d->lli_tx_len = 1; if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { dev_err(&d40c->chan.dev->device, @@ -1610,7 +1573,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->lli_log.src, d40c->log_def.lcsp1, d40c->dma_cfg.src_info.data_width, - flags & DMA_PREP_INTERRUPT, lli_max, + dma_flags & DMA_PREP_INTERRUPT, + d40d->lli_tx_len, d40c->base->plat_data->llis_per_log); (void) d40_log_sg_to_lli(d40c->lcla.dst_id, @@ -1619,7 +1583,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->lli_log.dst, d40c->log_def.lcsp3, d40c->dma_cfg.dst_info.data_width, - flags & DMA_PREP_INTERRUPT, lli_max, + dma_flags & DMA_PREP_INTERRUPT, + d40d->lli_tx_len, d40c->base->plat_data->llis_per_log); @@ -1664,11 +1629,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->txd.tx_submit = d40_tx_submit; - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); return &d40d->txd; err: - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); return NULL; } EXPORT_SYMBOL(stedma40_memcpy_sg); @@ -1698,46 +1663,66 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); - + bool is_free_phy; spin_lock_irqsave(&d40c->lock, flags); d40c->completed = chan->cookie = 1; /* * If no dma configuration is set (channel_type == 0) - * use default configuration + * use default configuration (memcpy) */ if (d40c->dma_cfg.channel_type == 0) { err = d40_config_memcpy(d40c); - if (err) - goto err_alloc; + if (err) { + dev_err(&d40c->chan.dev->device, + "[%s] Failed to configure memcpy channel\n", + __func__); + goto fail; + } } + is_free_phy = (d40c->phy_chan == NULL); err = d40_allocate_channel(d40c); if (err) { dev_err(&d40c->chan.dev->device, "[%s] Failed to allocate channel\n", __func__); - goto err_alloc; + goto fail; } - err = d40_config_chan(d40c, &d40c->dma_cfg); - if (err) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to configure channel\n", - __func__); - goto err_config; - } + /* Fill in basic CFG register values */ + d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, + &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); - spin_unlock_irqrestore(&d40c->lock, flags); - return 0; + if (d40c->log_num != D40_PHY_CHAN) { + d40_log_cfg(&d40c->dma_cfg, + &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); - err_config: - (void) d40_free_dma(d40c); - err_alloc: + if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) + d40c->lcpa = d40c->base->lcpa_base + + d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; + else + d40c->lcpa = d40c->base->lcpa_base + + d40c->dma_cfg.dst_dev_type * + D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; + } + + /* + * Only write channel configuration to the DMA if the physical + * resource is free. In case of multiple logical channels + * on the same physical resource, only the first write is necessary. + */ + if (is_free_phy) { + err = d40_config_write(d40c); + if (err) { + dev_err(&d40c->chan.dev->device, + "[%s] Failed to configure channel\n", + __func__); + } + } +fail: spin_unlock_irqrestore(&d40c->lock, flags); - dev_err(&d40c->chan.dev->device, - "[%s] Channel allocation failed\n", __func__); - return -EINVAL; + return err; } static void d40_free_chan_resources(struct dma_chan *chan) @@ -1747,6 +1732,13 @@ static void d40_free_chan_resources(struct dma_chan *chan) int err; unsigned long flags; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Cannot free unallocated channel\n", __func__); + return; + } + + spin_lock_irqsave(&d40c->lock, flags); err = d40_free_dma(d40c); @@ -1761,15 +1753,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t size, - unsigned long flags) + unsigned long dma_flags) { struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); - unsigned long flg; + unsigned long flags; int err = 0; - spin_lock_irqsave(&d40c->lock, flg); + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Channel is not allocated.\n", __func__); + return ERR_PTR(-EINVAL); + } + + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_desc_get(d40c); if (d40d == NULL) { @@ -1778,9 +1776,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, goto err; } - memset(d40d, 0, sizeof(struct d40_desc)); - - d40d->txd.flags = flags; + d40d->txd.flags = dma_flags; dma_async_tx_descriptor_init(&d40d->txd, chan); @@ -1794,6 +1790,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, goto err; } d40d->lli_len = 1; + d40d->lli_tx_len = 1; d40_log_fill_lli(d40d->lli_log.src, src, @@ -1801,7 +1798,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, 0, d40c->log_def.lcsp1, d40c->dma_cfg.src_info.data_width, - true, true); + false, true); d40_log_fill_lli(d40d->lli_log.dst, dst, @@ -1848,7 +1845,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40d->lli_pool.size, DMA_TO_DEVICE); } - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); return &d40d->txd; err_fill_lli: @@ -1856,7 +1853,7 @@ err_fill_lli: "[%s] Failed filling in PHY LLI\n", __func__); d40_pool_lli_free(d40d); err: - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); return NULL; } @@ -1865,11 +1862,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, - unsigned long flags) + unsigned long dma_flags) { dma_addr_t dev_addr = 0; int total_size; - int lli_max = d40c->base->plat_data->llis_per_log; if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { dev_err(&d40c->chan.dev->device, @@ -1878,7 +1874,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, } d40d->lli_len = sg_len; - d40d->lli_tcount = 0; + if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) + d40d->lli_tx_len = d40d->lli_len; + else + d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; if (sg_len > 1) /* @@ -1886,35 +1885,34 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, * If not, split list into 1-length and run only * in lcpa space. */ - if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) - lli_max = 1; + if (d40_lcla_id_get(d40c) != 0) + d40d->lli_tx_len = 1; - if (direction == DMA_FROM_DEVICE) { - dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; - total_size = d40_log_sg_to_dev(&d40c->lcla, - sgl, sg_len, - &d40d->lli_log, - &d40c->log_def, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - direction, - flags & DMA_PREP_INTERRUPT, - dev_addr, lli_max, - d40c->base->plat_data->llis_per_log); - } else if (direction == DMA_TO_DEVICE) { - dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; - total_size = d40_log_sg_to_dev(&d40c->lcla, - sgl, sg_len, - &d40d->lli_log, - &d40c->log_def, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - direction, - flags & DMA_PREP_INTERRUPT, - dev_addr, lli_max, - d40c->base->plat_data->llis_per_log); - } else + if (direction == DMA_FROM_DEVICE) + if (d40c->runtime_addr) + dev_addr = d40c->runtime_addr; + else + dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; + else if (direction == DMA_TO_DEVICE) + if (d40c->runtime_addr) + dev_addr = d40c->runtime_addr; + else + dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; + + else return -EINVAL; + + total_size = d40_log_sg_to_dev(&d40c->lcla, + sgl, sg_len, + &d40d->lli_log, + &d40c->log_def, + d40c->dma_cfg.src_info.data_width, + d40c->dma_cfg.dst_info.data_width, + direction, + dma_flags & DMA_PREP_INTERRUPT, + dev_addr, d40d->lli_tx_len, + d40c->base->plat_data->llis_per_log); + if (total_size < 0) return -EINVAL; @@ -1926,7 +1924,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, struct scatterlist *sgl, unsigned int sgl_len, enum dma_data_direction direction, - unsigned long flags) + unsigned long dma_flags) { dma_addr_t src_dev_addr; dma_addr_t dst_dev_addr; @@ -1939,13 +1937,19 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, } d40d->lli_len = sgl_len; - d40d->lli_tcount = 0; + d40d->lli_tx_len = sgl_len; if (direction == DMA_FROM_DEVICE) { dst_dev_addr = 0; - src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; + if (d40c->runtime_addr) + src_dev_addr = d40c->runtime_addr; + else + src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; } else if (direction == DMA_TO_DEVICE) { - dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; + if (d40c->runtime_addr) + dst_dev_addr = d40c->runtime_addr; + else + dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; src_dev_addr = 0; } else return -EINVAL; @@ -1983,34 +1987,38 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, - unsigned long flags) + unsigned long dma_flags) { struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); - unsigned long flg; + unsigned long flags; int err; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Cannot prepare unallocated channel\n", __func__); + return ERR_PTR(-EINVAL); + } + if (d40c->dma_cfg.pre_transfer) d40c->dma_cfg.pre_transfer(chan, d40c->dma_cfg.pre_transfer_data, sg_dma_len(sgl)); - spin_lock_irqsave(&d40c->lock, flg); + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_desc_get(d40c); - spin_unlock_irqrestore(&d40c->lock, flg); + spin_unlock_irqrestore(&d40c->lock, flags); if (d40d == NULL) return NULL; - memset(d40d, 0, sizeof(struct d40_desc)); - if (d40c->log_num != D40_PHY_CHAN) err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, - direction, flags); + direction, dma_flags); else err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, - direction, flags); + direction, dma_flags); if (err) { dev_err(&d40c->chan.dev->device, "[%s] Failed to prepare %s slave sg job: %d\n", @@ -2019,7 +2027,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, return NULL; } - d40d->txd.flags = flags; + d40d->txd.flags = dma_flags; dma_async_tx_descriptor_init(&d40d->txd, chan); @@ -2037,6 +2045,13 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, dma_cookie_t last_complete; int ret; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Cannot read status of unallocated channel\n", + __func__); + return -EINVAL; + } + last_complete = d40c->completed; last_used = chan->cookie; @@ -2056,6 +2071,12 @@ static void d40_issue_pending(struct dma_chan *chan) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); unsigned long flags; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Channel is not allocated!\n", __func__); + return; + } + spin_lock_irqsave(&d40c->lock, flags); /* Busy means that pending jobs are already being processed */ @@ -2065,12 +2086,129 @@ static void d40_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&d40c->lock, flags); } +/* Runtime reconfiguration extension */ +static void d40_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; + enum dma_slave_buswidth config_addr_width; + dma_addr_t config_addr; + u32 config_maxburst; + enum stedma40_periph_data_width addr_width; + int psize; + + if (config->direction == DMA_FROM_DEVICE) { + dma_addr_t dev_addr_rx = + d40c->base->plat_data->dev_rx[cfg->src_dev_type]; + + config_addr = config->src_addr; + if (dev_addr_rx) + dev_dbg(d40c->base->dev, + "channel has a pre-wired RX address %08x " + "overriding with %08x\n", + dev_addr_rx, config_addr); + if (cfg->dir != STEDMA40_PERIPH_TO_MEM) + dev_dbg(d40c->base->dev, + "channel was not configured for peripheral " + "to memory transfer (%d) overriding\n", + cfg->dir); + cfg->dir = STEDMA40_PERIPH_TO_MEM; + + config_addr_width = config->src_addr_width; + config_maxburst = config->src_maxburst; + + } else if (config->direction == DMA_TO_DEVICE) { + dma_addr_t dev_addr_tx = + d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; + + config_addr = config->dst_addr; + if (dev_addr_tx) + dev_dbg(d40c->base->dev, + "channel has a pre-wired TX address %08x " + "overriding with %08x\n", + dev_addr_tx, config_addr); + if (cfg->dir != STEDMA40_MEM_TO_PERIPH) + dev_dbg(d40c->base->dev, + "channel was not configured for memory " + "to peripheral transfer (%d) overriding\n", + cfg->dir); + cfg->dir = STEDMA40_MEM_TO_PERIPH; + + config_addr_width = config->dst_addr_width; + config_maxburst = config->dst_maxburst; + + } else { + dev_err(d40c->base->dev, + "unrecognized channel direction %d\n", + config->direction); + return; + } + + switch (config_addr_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + addr_width = STEDMA40_BYTE_WIDTH; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + addr_width = STEDMA40_HALFWORD_WIDTH; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + addr_width = STEDMA40_WORD_WIDTH; + break; + case DMA_SLAVE_BUSWIDTH_8_BYTES: + addr_width = STEDMA40_DOUBLEWORD_WIDTH; + break; + default: + dev_err(d40c->base->dev, + "illegal peripheral address width " + "requested (%d)\n", + config->src_addr_width); + return; + } + + if (config_maxburst >= 16) + psize = STEDMA40_PSIZE_LOG_16; + else if (config_maxburst >= 8) + psize = STEDMA40_PSIZE_LOG_8; + else if (config_maxburst >= 4) + psize = STEDMA40_PSIZE_LOG_4; + else + psize = STEDMA40_PSIZE_LOG_1; + + /* Set up all the endpoint configs */ + cfg->src_info.data_width = addr_width; + cfg->src_info.psize = psize; + cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN; + cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; + cfg->dst_info.data_width = addr_width; + cfg->dst_info.psize = psize; + cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN; + cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; + + /* These settings will take precedence later */ + d40c->runtime_addr = config_addr; + d40c->runtime_direction = config->direction; + dev_dbg(d40c->base->dev, + "configured channel %s for %s, data width %d, " + "maxburst %d bytes, LE, no flow control\n", + dma_chan_name(chan), + (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", + config_addr_width, + config_maxburst); +} + static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Channel is not allocated!\n", __func__); + return -EINVAL; + } + switch (cmd) { case DMA_TERMINATE_ALL: spin_lock_irqsave(&d40c->lock, flags); @@ -2081,6 +2219,12 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return d40_pause(chan); case DMA_RESUME: return d40_resume(chan); + case DMA_SLAVE_CONFIG: + d40_set_runtime_config(chan, + (struct dma_slave_config *) arg); + return 0; + default: + break; } /* Other commands are unimplemented */ @@ -2111,13 +2255,10 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, d40c->log_num = D40_PHY_CHAN; - INIT_LIST_HEAD(&d40c->free); INIT_LIST_HEAD(&d40c->active); INIT_LIST_HEAD(&d40c->queue); INIT_LIST_HEAD(&d40c->client); - d40c->free_len = 0; - tasklet_init(&d40c->tasklet, dma_tasklet, (unsigned long) d40c); @@ -2243,6 +2384,14 @@ static int __init d40_phy_res_init(struct d40_base *base) } spin_lock_init(&base->phy_res[i].lock); } + + /* Mark disabled channels as occupied */ + for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { + base->phy_res[i].allocated_src = D40_ALLOC_PHY; + base->phy_res[i].allocated_dst = D40_ALLOC_PHY; + num_phy_chans_avail--; + } + dev_info(base->dev, "%d of %d physical DMA channels available\n", num_phy_chans_avail, base->num_phy_chans); @@ -2291,6 +2440,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) int num_log_chans = 0; int num_phy_chans; int i; + u32 val; clk = clk_get(&pdev->dev, NULL); @@ -2329,12 +2479,13 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) } } - i = readl(virtbase + D40_DREG_PERIPHID2); + /* Get silicon revision */ + val = readl(virtbase + D40_DREG_PERIPHID2); - if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { + if ((val & 0xf) != D40_PERIPHID2_DESIGNER) { dev_err(&pdev->dev, "[%s] Unknown designer! Got %x wanted %x\n", - __func__, i & 0xf, D40_PERIPHID2_DESIGNER); + __func__, val & 0xf, D40_PERIPHID2_DESIGNER); goto failure; } @@ -2342,7 +2493,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", - (i >> 4) & 0xf, res->start); + (val >> 4) & 0xf, res->start); plat_data = pdev->dev.platform_data; @@ -2364,6 +2515,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) goto failure; } + base->rev = (val >> 4) & 0xf; base->clk = clk; base->num_phy_chans = num_phy_chans; base->num_log_chans = num_log_chans; @@ -2402,6 +2554,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if (!base->lcla_pool.alloc_map) goto failure; + base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), + 0, SLAB_HWCACHE_ALIGN, + NULL); + if (base->desc_slab == NULL) + goto failure; + return base; failure: @@ -2495,6 +2653,78 @@ static void __init d40_hw_init(struct d40_base *base) } +static int __init d40_lcla_allocate(struct d40_base *base) +{ + unsigned long *page_list; + int i, j; + int ret = 0; + + /* + * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, + * To full fill this hardware requirement without wasting 256 kb + * we allocate pages until we get an aligned one. + */ + page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, + GFP_KERNEL); + + if (!page_list) { + ret = -ENOMEM; + goto failure; + } + + /* Calculating how many pages that are required */ + base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; + + for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { + page_list[i] = __get_free_pages(GFP_KERNEL, + base->lcla_pool.pages); + if (!page_list[i]) { + + dev_err(base->dev, + "[%s] Failed to allocate %d pages.\n", + __func__, base->lcla_pool.pages); + + for (j = 0; j < i; j++) + free_pages(page_list[j], base->lcla_pool.pages); + goto failure; + } + + if ((virt_to_phys((void *)page_list[i]) & + (LCLA_ALIGNMENT - 1)) == 0) + break; + } + + for (j = 0; j < i; j++) + free_pages(page_list[j], base->lcla_pool.pages); + + if (i < MAX_LCLA_ALLOC_ATTEMPTS) { + base->lcla_pool.base = (void *)page_list[i]; + } else { + /* After many attempts, no succees with finding the correct + * alignment try with allocating a big buffer */ + dev_warn(base->dev, + "[%s] Failed to get %d pages @ 18 bit align.\n", + __func__, base->lcla_pool.pages); + base->lcla_pool.base_unaligned = kmalloc(SZ_1K * + base->num_phy_chans + + LCLA_ALIGNMENT, + GFP_KERNEL); + if (!base->lcla_pool.base_unaligned) { + ret = -ENOMEM; + goto failure; + } + + base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, + LCLA_ALIGNMENT); + } + + writel(virt_to_phys(base->lcla_pool.base), + base->virtbase + D40_DREG_LCLA); +failure: + kfree(page_list); + return ret; +} + static int __init d40_probe(struct platform_device *pdev) { int err; @@ -2554,41 +2784,11 @@ static int __init d40_probe(struct platform_device *pdev) __func__); goto failure; } - /* Get IO for logical channel link address */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla"); - if (!res) { - ret = -ENOENT; - dev_err(&pdev->dev, - "[%s] No \"lcla\" resource defined\n", - __func__); - goto failure; - } - base->lcla_pool.base_size = resource_size(res); - base->lcla_pool.phy = res->start; - - if (request_mem_region(res->start, resource_size(res), - D40_NAME " I/O lcla") == NULL) { - ret = -EBUSY; - dev_err(&pdev->dev, - "[%s] Failed to request LCLA region 0x%x-0x%x\n", - __func__, res->start, res->end); - goto failure; - } - val = readl(base->virtbase + D40_DREG_LCLA); - if (res->start != val && val != 0) { - dev_warn(&pdev->dev, - "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n", - __func__, val, res->start); - } else - writel(res->start, base->virtbase + D40_DREG_LCLA); - - base->lcla_pool.base = ioremap(res->start, resource_size(res)); - if (!base->lcla_pool.base) { - ret = -ENOMEM; - dev_err(&pdev->dev, - "[%s] Failed to ioremap LCLA 0x%x-0x%x\n", - __func__, res->start, res->end); + ret = d40_lcla_allocate(base); + if (ret) { + dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", + __func__); goto failure; } @@ -2616,11 +2816,15 @@ static int __init d40_probe(struct platform_device *pdev) failure: if (base) { + if (base->desc_slab) + kmem_cache_destroy(base->desc_slab); if (base->virtbase) iounmap(base->virtbase); - if (base->lcla_pool.phy) - release_mem_region(base->lcla_pool.phy, - base->lcla_pool.base_size); + if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) + free_pages((unsigned long)base->lcla_pool.base, + base->lcla_pool.pages); + if (base->lcla_pool.base_unaligned) + kfree(base->lcla_pool.base_unaligned); if (base->phy_lcpa) release_mem_region(base->phy_lcpa, base->lcpa_size); diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 561fdd8a80c1..d937f76d6e2e 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -315,11 +315,8 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, int total_size = 0; struct scatterlist *current_sg = sg; int i; - u32 next_lli_off_dst; - u32 next_lli_off_src; - - next_lli_off_src = 0; - next_lli_off_dst = 0; + u32 next_lli_off_dst = 0; + u32 next_lli_off_src = 0; for_each_sg(sg, current_sg, sg_len, i) { total_size += sg_dma_len(current_sg); @@ -351,7 +348,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, sg_dma_len(current_sg), next_lli_off_src, lcsp->lcsp1, src_data_width, - term_int && !next_lli_off_src, + false, true); d40_log_fill_lli(&lli->dst[i], dev_addr, @@ -375,7 +372,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, sg_dma_len(current_sg), next_lli_off_src, lcsp->lcsp1, src_data_width, - term_int && !next_lli_off_src, + false, false); } } @@ -423,32 +420,35 @@ int d40_log_sg_to_lli(int lcla_id, return total_size; } -void d40_log_lli_write(struct d40_log_lli_full *lcpa, +int d40_log_lli_write(struct d40_log_lli_full *lcpa, struct d40_log_lli *lcla_src, struct d40_log_lli *lcla_dst, struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, int llis_per_log) { - u32 slos = 0; - u32 dlos = 0; + u32 slos; + u32 dlos; int i; - lcpa->lcsp0 = lli_src->lcsp02; - lcpa->lcsp1 = lli_src->lcsp13; - lcpa->lcsp2 = lli_dst->lcsp02; - lcpa->lcsp3 = lli_dst->lcsp13; + writel(lli_src->lcsp02, &lcpa->lcsp0); + writel(lli_src->lcsp13, &lcpa->lcsp1); + writel(lli_dst->lcsp02, &lcpa->lcsp2); + writel(lli_dst->lcsp13, &lcpa->lcsp3); slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK; dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK; for (i = 0; (i < llis_per_log) && slos && dlos; i++) { - writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02); - writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13); - writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02); - writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13); + writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02); + writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13); + writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02); + writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13); - slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; - dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; + slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; + dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; } + + return i; + } diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 2029280cb332..9c0fa2f5fe57 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -13,6 +13,9 @@ #define D40_DREG_PCDELTA (8 * 4) #define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */ +#define D40_LCPA_CHAN_SIZE 32 +#define D40_LCPA_CHAN_DST_DELTA 16 + #define D40_TYPE_TO_GROUP(type) (type / 16) #define D40_TYPE_TO_EVENT(type) (type % 16) @@ -336,12 +339,12 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, bool term_int, dma_addr_t dev_addr, int max_len, int llis_per_log); -void d40_log_lli_write(struct d40_log_lli_full *lcpa, - struct d40_log_lli *lcla_src, - struct d40_log_lli *lcla_dst, - struct d40_log_lli *lli_dst, - struct d40_log_lli *lli_src, - int llis_per_log); +int d40_log_lli_write(struct d40_log_lli_full *lcpa, + struct d40_log_lli *lcla_src, + struct d40_log_lli *lcla_dst, + struct d40_log_lli *lli_dst, + struct d40_log_lli *lli_src, + int llis_per_log); int d40_log_sg_to_lli(int lcla_id, struct scatterlist *sg, diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index a1bf77c1993f..2ec1ed56f204 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -200,8 +200,8 @@ static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, return -EINVAL; } - dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n", - dma_desc, (void *)sg_dma_address(sg)); + dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n", + dma_desc, (unsigned long long)sg_dma_address(sg)); dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; @@ -382,7 +382,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); if (!td_desc) { dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); - goto err; + goto out; } td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; @@ -410,7 +410,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) err: kfree(td_desc->desc_list); kfree(td_desc); - +out: return NULL; } diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c index 110e24e50883..f287fe79edc4 100644 --- a/drivers/firmware/edd.c +++ b/drivers/firmware/edd.c @@ -744,7 +744,7 @@ static inline int edd_num_devices(void) static int __init edd_init(void) { - unsigned int i; + int i; int rc=0; struct edd_device *edev; @@ -760,21 +760,27 @@ edd_init(void) if (!edd_kset) return -ENOMEM; - for (i = 0; i < edd_num_devices() && !rc; i++) { + for (i = 0; i < edd_num_devices(); i++) { edev = kzalloc(sizeof (*edev), GFP_KERNEL); - if (!edev) - return -ENOMEM; + if (!edev) { + rc = -ENOMEM; + goto out; + } rc = edd_device_register(edev, i); if (rc) { kfree(edev); - break; + goto out; } edd_devices[i] = edev; } - if (rc) - kset_unregister(edd_kset); + return 0; + +out: + while (--i >= 0) + edd_device_unregister(edd_devices[i]); + kset_unregister(edd_kset); return rc; } diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 45981304feb8..b9e4dbfa0533 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -839,7 +839,6 @@ static void output_poll_execute(struct work_struct *work) struct drm_connector *connector; enum drm_connector_status old_status, status; bool repoll = false, changed = false; - int ret; mutex_lock(&dev->mode_config.mutex); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -874,11 +873,8 @@ static void output_poll_execute(struct work_struct *work) dev->mode_config.funcs->output_poll_changed(dev); } - if (repoll) { - ret = queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); - if (ret) - DRM_ERROR("delayed enqueue failed %d\n", ret); - } + if (repoll) + queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); } void drm_kms_helper_poll_disable(struct drm_device *dev) @@ -893,18 +889,14 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; - int ret; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->polled) poll = true; } - if (poll) { - ret = queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); - if (ret) - DRM_ERROR("delayed enqueue failed %d\n", ret); - } + if (poll) + queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); } EXPORT_SYMBOL(drm_kms_helper_poll_enable); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 4d382ae53092..f3adf18bfa05 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -332,11 +332,11 @@ config SENSORS_F71805F will be called f71805f. config SENSORS_F71882FG - tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000" + tristate "Fintek F71808E, F71858FG, F71862FG, F71882FG, F71889FG and F8000" depends on EXPERIMENTAL help - If you say yes here you get support for hardware monitoring - features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG, + If you say yes here you get support for hardware monitoring features + of the Fintek F71808E, F71858FG, F71862FG/71863FG, F71882FG/F71883FG, F71889FG and F8000 Super-I/O chips. This driver can also be built as a module. If so, the module @@ -405,7 +405,7 @@ config SENSORS_CORETEMP help If you say yes here you get support for the temperature sensor inside your CPU. Most of the family 6 CPUs - are supported. Check documentation/driver for details. + are supported. Check Documentation/hwmon/coretemp for details. config SENSORS_PKGTEMP tristate "Intel processor package temperature sensor" @@ -463,6 +463,17 @@ config SENSORS_JZ4740 This driver can also be build as a module. If so, the module will be called jz4740-hwmon. +config SENSORS_JC42 + tristate "JEDEC JC42.4 compliant temperature sensors" + help + If you say yes here you get support for Jedec JC42.4 compliant + temperature sensors. Support will include, but not be limited to, + ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, + MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3. + + This driver can also be built as a module. If so, the module + will be called jc42. + config SENSORS_LM63 tristate "National Semiconductor LM63 and LM64" depends on I2C @@ -756,6 +767,21 @@ config SENSORS_SIS5595 This driver can also be built as a module. If so, the module will be called sis5595. +config SENSORS_SMM665 + tristate "Summit Microelectronics SMM665" + depends on I2C && EXPERIMENTAL + default n + help + If you say yes here you get support for the hardware monitoring + features of the Summit Microelectronics SMM665/SMM665B Six-Channel + Active DC Output Controller / Monitor. + + Other supported chips are SMM465, SMM665C, SMM764, and SMM766. + Support for those chips is untested. + + This driver can also be built as a module. If so, the module will + be called smm665. + config SENSORS_DME1737 tristate "SMSC DME1737, SCH311x and compatibles" depends on I2C && EXPERIMENTAL diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 9103bd6ea73a..13d913e34dbf 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o obj-$(CONFIG_SENSORS_IT87) += it87.o +obj-$(CONFIG_SENSORS_JC42) += jc42.o obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o @@ -88,6 +89,7 @@ obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o obj-$(CONFIG_SENSORS_S3C) += s3c-hwmon.o obj-$(CONFIG_SENSORS_SHT15) += sht15.o obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o +obj-$(CONFIG_SENSORS_SMM665) += smm665.o obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 05344af50734..a92e28a35767 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -480,7 +480,6 @@ exit: return err; } -#ifdef CONFIG_HOTPLUG_CPU static void coretemp_device_remove(unsigned int cpu) { struct pdev_entry *p, *n; @@ -502,10 +501,13 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: coretemp_device_add(cpu); break; case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: coretemp_device_remove(cpu); break; } @@ -515,7 +517,6 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb, static struct notifier_block coretemp_cpu_notifier __refdata = { .notifier_call = coretemp_cpu_callback, }; -#endif /* !CONFIG_HOTPLUG_CPU */ static int __init coretemp_init(void) { @@ -537,12 +538,9 @@ static int __init coretemp_init(void) * sensors. We check this bit only, all the early CPUs * without thermal sensors will be filtered out. */ - if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) { - err = coretemp_device_add(i); - if (err) - goto exit_devices_unreg; - - } else { + if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) + coretemp_device_add(i); + else { printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" " has no thermal sensor.\n", c->x86_model); } @@ -552,21 +550,13 @@ static int __init coretemp_init(void) goto exit_driver_unreg; } -#ifdef CONFIG_HOTPLUG_CPU register_hotcpu_notifier(&coretemp_cpu_notifier); -#endif return 0; -exit_devices_unreg: - mutex_lock(&pdev_list_mutex); - list_for_each_entry_safe(p, n, &pdev_list, list) { - platform_device_unregister(p->pdev); - list_del(&p->list); - kfree(p); - } - mutex_unlock(&pdev_list_mutex); exit_driver_unreg: +#ifndef CONFIG_HOTPLUG_CPU platform_driver_unregister(&coretemp_driver); +#endif exit: return err; } diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 537841ef44b9..6207120dcd4d 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -45,6 +45,7 @@ #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ #define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ +#define SIO_F71808_ID 0x0901 /* Chipset ID */ #define SIO_F71858_ID 0x0507 /* Chipset ID */ #define SIO_F71862_ID 0x0601 /* Chipset ID */ #define SIO_F71882_ID 0x0541 /* Chipset ID */ @@ -96,9 +97,10 @@ static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); -enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; +enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; static const char *f71882fg_names[] = { + "f71808fg", "f71858fg", "f71862fg", "f71882fg", @@ -306,8 +308,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = { SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), }; -/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */ -static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { +/* In attr common to the f71862fg, f71882fg and f71889fg */ +static struct sensor_device_attribute_2 fxxxx_in_attr[] = { SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), @@ -317,6 +319,22 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), +}; + +/* In attr for the f71808fg */ +static struct sensor_device_attribute_2 f71808_in_attr[] = { + SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), + SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), + SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), + SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3), + SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4), + SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5), + SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 7), + SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 8), +}; + +/* Temp attr common to the f71808fg, f71862fg, f71882fg and f71889fg */ +static struct sensor_device_attribute_2 fxxxx_temp_attr[] = { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), @@ -355,6 +373,10 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { store_temp_beep, 0, 6), SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), +}; + +/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */ +static struct sensor_device_attribute_2 f71862_temp_attr[] = { SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 3), @@ -989,6 +1011,11 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) data->temp_type[1] = 6; break; } + } else if (data->type == f71808fg) { + reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE); + data->temp_type[1] = (reg & 0x02) ? 2 : 4; + data->temp_type[2] = (reg & 0x04) ? 2 : 4; + } else { reg2 = f71882fg_read8(data, F71882FG_REG_PECI); if ((reg2 & 0x03) == 0x01) @@ -1871,7 +1898,8 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev, val /= 1000; - if (data->type == f71889fg) + if (data->type == f71889fg + || data->type == f71808fg) val = SENSORS_LIMIT(val, -128, 127); else val = SENSORS_LIMIT(val, 0, 127); @@ -1974,8 +2002,28 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) /* fall through! */ case f71862fg: err = f71882fg_create_sysfs_files(pdev, - fxxxx_in_temp_attr, - ARRAY_SIZE(fxxxx_in_temp_attr)); + f71862_temp_attr, + ARRAY_SIZE(f71862_temp_attr)); + if (err) + goto exit_unregister_sysfs; + err = f71882fg_create_sysfs_files(pdev, + fxxxx_in_attr, + ARRAY_SIZE(fxxxx_in_attr)); + if (err) + goto exit_unregister_sysfs; + err = f71882fg_create_sysfs_files(pdev, + fxxxx_temp_attr, + ARRAY_SIZE(fxxxx_temp_attr)); + break; + case f71808fg: + err = f71882fg_create_sysfs_files(pdev, + f71808_in_attr, + ARRAY_SIZE(f71808_in_attr)); + if (err) + goto exit_unregister_sysfs; + err = f71882fg_create_sysfs_files(pdev, + fxxxx_temp_attr, + ARRAY_SIZE(fxxxx_temp_attr)); break; case f8000: err = f71882fg_create_sysfs_files(pdev, @@ -2002,6 +2050,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) case f71862fg: err = (data->pwm_enable & 0x15) != 0x15; break; + case f71808fg: case f71882fg: case f71889fg: err = 0; @@ -2047,6 +2096,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) f8000_auto_pwm_attr, ARRAY_SIZE(f8000_auto_pwm_attr)); break; + case f71808fg: case f71889fg: for (i = 0; i < nr_fans; i++) { data->pwm_auto_point_mapping[i] = @@ -2126,8 +2176,22 @@ static int f71882fg_remove(struct platform_device *pdev) /* fall through! */ case f71862fg: f71882fg_remove_sysfs_files(pdev, - fxxxx_in_temp_attr, - ARRAY_SIZE(fxxxx_in_temp_attr)); + f71862_temp_attr, + ARRAY_SIZE(f71862_temp_attr)); + f71882fg_remove_sysfs_files(pdev, + fxxxx_in_attr, + ARRAY_SIZE(fxxxx_in_attr)); + f71882fg_remove_sysfs_files(pdev, + fxxxx_temp_attr, + ARRAY_SIZE(fxxxx_temp_attr)); + break; + case f71808fg: + f71882fg_remove_sysfs_files(pdev, + f71808_in_attr, + ARRAY_SIZE(f71808_in_attr)); + f71882fg_remove_sysfs_files(pdev, + fxxxx_temp_attr, + ARRAY_SIZE(fxxxx_temp_attr)); break; case f8000: f71882fg_remove_sysfs_files(pdev, @@ -2195,6 +2259,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); switch (devid) { + case SIO_F71808_ID: + sio_data->type = f71808fg; + break; case SIO_F71858_ID: sio_data->type = f71858fg; break; diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c index be2d131e405c..bfd42f18924b 100644 --- a/drivers/hwmon/hdaps.c +++ b/drivers/hwmon/hdaps.c @@ -522,6 +522,7 @@ static struct dmi_system_id __initdata hdaps_whitelist[] = { HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T42p", HDAPS_BOTH_AXES), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T42"), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T43"), + HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T400", HDAPS_BOTH_AXES), HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T60", HDAPS_BOTH_AXES), HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61p", HDAPS_BOTH_AXES), HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61", HDAPS_BOTH_AXES), diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c new file mode 100644 index 000000000000..340fc78c8dde --- /dev/null +++ b/drivers/hwmon/jc42.c @@ -0,0 +1,593 @@ +/* + * jc42.c - driver for Jedec JC42.4 compliant temperature sensors + * + * Copyright (c) 2010 Ericsson AB. + * + * Derived from lm77.c by Andras BALI <drewie@freemail.hu>. + * + * JC42.4 compliant temperature sensors are typically used on memory modules. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/jiffies.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/mutex.h> + +/* Addresses to scan */ +static const unsigned short normal_i2c[] = { + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, I2C_CLIENT_END }; + +/* JC42 registers. All registers are 16 bit. */ +#define JC42_REG_CAP 0x00 +#define JC42_REG_CONFIG 0x01 +#define JC42_REG_TEMP_UPPER 0x02 +#define JC42_REG_TEMP_LOWER 0x03 +#define JC42_REG_TEMP_CRITICAL 0x04 +#define JC42_REG_TEMP 0x05 +#define JC42_REG_MANID 0x06 +#define JC42_REG_DEVICEID 0x07 + +/* Status bits in temperature register */ +#define JC42_ALARM_CRIT_BIT 15 +#define JC42_ALARM_MAX_BIT 14 +#define JC42_ALARM_MIN_BIT 13 + +/* Configuration register defines */ +#define JC42_CFG_CRIT_ONLY (1 << 2) +#define JC42_CFG_SHUTDOWN (1 << 8) +#define JC42_CFG_HYST_SHIFT 9 +#define JC42_CFG_HYST_MASK 0x03 + +/* Capabilities */ +#define JC42_CAP_RANGE (1 << 2) + +/* Manufacturer IDs */ +#define ADT_MANID 0x11d4 /* Analog Devices */ +#define MAX_MANID 0x004d /* Maxim */ +#define IDT_MANID 0x00b3 /* IDT */ +#define MCP_MANID 0x0054 /* Microchip */ +#define NXP_MANID 0x1131 /* NXP Semiconductors */ +#define ONS_MANID 0x1b09 /* ON Semiconductor */ +#define STM_MANID 0x104a /* ST Microelectronics */ + +/* Supported chips */ + +/* Analog Devices */ +#define ADT7408_DEVID 0x0801 +#define ADT7408_DEVID_MASK 0xffff + +/* IDT */ +#define TS3000B3_DEVID 0x2903 /* Also matches TSE2002B3 */ +#define TS3000B3_DEVID_MASK 0xffff + +/* Maxim */ +#define MAX6604_DEVID 0x3e00 +#define MAX6604_DEVID_MASK 0xffff + +/* Microchip */ +#define MCP98242_DEVID 0x2000 +#define MCP98242_DEVID_MASK 0xfffc + +#define MCP98243_DEVID 0x2100 +#define MCP98243_DEVID_MASK 0xfffc + +#define MCP9843_DEVID 0x0000 /* Also matches mcp9805 */ +#define MCP9843_DEVID_MASK 0xfffe + +/* NXP */ +#define SE97_DEVID 0xa200 +#define SE97_DEVID_MASK 0xfffc + +#define SE98_DEVID 0xa100 +#define SE98_DEVID_MASK 0xfffc + +/* ON Semiconductor */ +#define CAT6095_DEVID 0x0800 /* Also matches CAT34TS02 */ +#define CAT6095_DEVID_MASK 0xffe0 + +/* ST Microelectronics */ +#define STTS424_DEVID 0x0101 +#define STTS424_DEVID_MASK 0xffff + +#define STTS424E_DEVID 0x0000 +#define STTS424E_DEVID_MASK 0xfffe + +static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 }; + +struct jc42_chips { + u16 manid; + u16 devid; + u16 devid_mask; +}; + +static struct jc42_chips jc42_chips[] = { + { ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK }, + { IDT_MANID, TS3000B3_DEVID, TS3000B3_DEVID_MASK }, + { MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK }, + { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK }, + { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK }, + { MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK }, + { NXP_MANID, SE97_DEVID, SE97_DEVID_MASK }, + { ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK }, + { NXP_MANID, SE98_DEVID, SE98_DEVID_MASK }, + { STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK }, + { STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK }, +}; + +/* Each client has this additional data */ +struct jc42_data { + struct device *hwmon_dev; + struct mutex update_lock; /* protect register access */ + bool extended; /* true if extended range supported */ + bool valid; + unsigned long last_updated; /* In jiffies */ + u16 orig_config; /* original configuration */ + u16 config; /* current configuration */ + u16 temp_input; /* Temperatures */ + u16 temp_crit; + u16 temp_min; + u16 temp_max; +}; + +static int jc42_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info); +static int jc42_remove(struct i2c_client *client); +static int jc42_read_value(struct i2c_client *client, u8 reg); +static int jc42_write_value(struct i2c_client *client, u8 reg, u16 value); + +static struct jc42_data *jc42_update_device(struct device *dev); + +static const struct i2c_device_id jc42_id[] = { + { "adt7408", 0 }, + { "cat94ts02", 0 }, + { "cat6095", 0 }, + { "jc42", 0 }, + { "max6604", 0 }, + { "mcp9805", 0 }, + { "mcp98242", 0 }, + { "mcp98243", 0 }, + { "mcp9843", 0 }, + { "se97", 0 }, + { "se97b", 0 }, + { "se98", 0 }, + { "stts424", 0 }, + { "tse2002b3", 0 }, + { "ts3000b3", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, jc42_id); + +#ifdef CONFIG_PM + +static int jc42_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct jc42_data *data = i2c_get_clientdata(client); + + data->config |= JC42_CFG_SHUTDOWN; + jc42_write_value(client, JC42_REG_CONFIG, data->config); + return 0; +} + +static int jc42_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct jc42_data *data = i2c_get_clientdata(client); + + data->config &= ~JC42_CFG_SHUTDOWN; + jc42_write_value(client, JC42_REG_CONFIG, data->config); + return 0; +} + +static const struct dev_pm_ops jc42_dev_pm_ops = { + .suspend = jc42_suspend, + .resume = jc42_resume, +}; + +#define JC42_DEV_PM_OPS (&jc42_dev_pm_ops) +#else +#define JC42_DEV_PM_OPS NULL +#endif /* CONFIG_PM */ + +/* This is the driver that will be inserted */ +static struct i2c_driver jc42_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "jc42", + .pm = JC42_DEV_PM_OPS, + }, + .probe = jc42_probe, + .remove = jc42_remove, + .id_table = jc42_id, + .detect = jc42_detect, + .address_list = normal_i2c, +}; + +#define JC42_TEMP_MIN_EXTENDED (-40000) +#define JC42_TEMP_MIN 0 +#define JC42_TEMP_MAX 125000 + +static u16 jc42_temp_to_reg(int temp, bool extended) +{ + int ntemp = SENSORS_LIMIT(temp, + extended ? JC42_TEMP_MIN_EXTENDED : + JC42_TEMP_MIN, JC42_TEMP_MAX); + + /* convert from 0.001 to 0.0625 resolution */ + return (ntemp * 2 / 125) & 0x1fff; +} + +static int jc42_temp_from_reg(s16 reg) +{ + reg &= 0x1fff; + + /* sign extend register */ + if (reg & 0x1000) + reg |= 0xf000; + + /* convert from 0.0625 to 0.001 resolution */ + return reg * 125 / 2; +} + +/* sysfs stuff */ + +/* read routines for temperature limits */ +#define show(value) \ +static ssize_t show_##value(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct jc42_data *data = jc42_update_device(dev); \ + if (IS_ERR(data)) \ + return PTR_ERR(data); \ + return sprintf(buf, "%d\n", jc42_temp_from_reg(data->value)); \ +} + +show(temp_input); +show(temp_crit); +show(temp_min); +show(temp_max); + +/* read routines for hysteresis values */ +static ssize_t show_temp_crit_hyst(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct jc42_data *data = jc42_update_device(dev); + int temp, hyst; + + if (IS_ERR(data)) + return PTR_ERR(data); + + temp = jc42_temp_from_reg(data->temp_crit); + hyst = jc42_hysteresis[(data->config >> JC42_CFG_HYST_SHIFT) + & JC42_CFG_HYST_MASK]; + return sprintf(buf, "%d\n", temp - hyst); +} + +static ssize_t show_temp_max_hyst(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct jc42_data *data = jc42_update_device(dev); + int temp, hyst; + + if (IS_ERR(data)) + return PTR_ERR(data); + + temp = jc42_temp_from_reg(data->temp_max); + hyst = jc42_hysteresis[(data->config >> JC42_CFG_HYST_SHIFT) + & JC42_CFG_HYST_MASK]; + return sprintf(buf, "%d\n", temp - hyst); +} + +/* write routines */ +#define set(value, reg) \ +static ssize_t set_##value(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct jc42_data *data = i2c_get_clientdata(client); \ + int err, ret = count; \ + long val; \ + if (strict_strtol(buf, 10, &val) < 0) \ + return -EINVAL; \ + mutex_lock(&data->update_lock); \ + data->value = jc42_temp_to_reg(val, data->extended); \ + err = jc42_write_value(client, reg, data->value); \ + if (err < 0) \ + ret = err; \ + mutex_unlock(&data->update_lock); \ + return ret; \ +} + +set(temp_min, JC42_REG_TEMP_LOWER); +set(temp_max, JC42_REG_TEMP_UPPER); +set(temp_crit, JC42_REG_TEMP_CRITICAL); + +/* JC42.4 compliant chips only support four hysteresis values. + * Pick best choice and go from there. */ +static ssize_t set_temp_crit_hyst(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct jc42_data *data = i2c_get_clientdata(client); + long val; + int diff, hyst; + int err; + int ret = count; + + if (strict_strtoul(buf, 10, &val) < 0) + return -EINVAL; + + diff = jc42_temp_from_reg(data->temp_crit) - val; + hyst = 0; + if (diff > 0) { + if (diff < 2250) + hyst = 1; /* 1.5 degrees C */ + else if (diff < 4500) + hyst = 2; /* 3.0 degrees C */ + else + hyst = 3; /* 6.0 degrees C */ + } + + mutex_lock(&data->update_lock); + data->config = (data->config + & ~(JC42_CFG_HYST_MASK << JC42_CFG_HYST_SHIFT)) + | (hyst << JC42_CFG_HYST_SHIFT); + err = jc42_write_value(client, JC42_REG_CONFIG, data->config); + if (err < 0) + ret = err; + mutex_unlock(&data->update_lock); + return ret; +} + +static ssize_t show_alarm(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u16 bit = to_sensor_dev_attr(attr)->index; + struct jc42_data *data = jc42_update_device(dev); + u16 val; + + if (IS_ERR(data)) + return PTR_ERR(data); + + val = data->temp_input; + if (bit != JC42_ALARM_CRIT_BIT && (data->config & JC42_CFG_CRIT_ONLY)) + val = 0; + return sprintf(buf, "%u\n", (val >> bit) & 1); +} + +static DEVICE_ATTR(temp1_input, S_IRUGO, + show_temp_input, NULL); +static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, + show_temp_crit, set_temp_crit); +static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, + show_temp_min, set_temp_min); +static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, + show_temp_max, set_temp_max); + +static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, + show_temp_crit_hyst, set_temp_crit_hyst); +static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, + show_temp_max_hyst, NULL); + +static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, + JC42_ALARM_CRIT_BIT); +static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, + JC42_ALARM_MIN_BIT); +static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, + JC42_ALARM_MAX_BIT); + +static struct attribute *jc42_attributes[] = { + &dev_attr_temp1_input.attr, + &dev_attr_temp1_crit.attr, + &dev_attr_temp1_min.attr, + &dev_attr_temp1_max.attr, + &dev_attr_temp1_crit_hyst.attr, + &dev_attr_temp1_max_hyst.attr, + &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + NULL +}; + +static const struct attribute_group jc42_group = { + .attrs = jc42_attributes, +}; + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int jc42_detect(struct i2c_client *new_client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = new_client->adapter; + int i, config, cap, manid, devid; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA)) + return -ENODEV; + + cap = jc42_read_value(new_client, JC42_REG_CAP); + config = jc42_read_value(new_client, JC42_REG_CONFIG); + manid = jc42_read_value(new_client, JC42_REG_MANID); + devid = jc42_read_value(new_client, JC42_REG_DEVICEID); + + if (cap < 0 || config < 0 || manid < 0 || devid < 0) + return -ENODEV; + + if ((cap & 0xff00) || (config & 0xf800)) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(jc42_chips); i++) { + struct jc42_chips *chip = &jc42_chips[i]; + if (manid == chip->manid && + (devid & chip->devid_mask) == chip->devid) { + strlcpy(info->type, "jc42", I2C_NAME_SIZE); + return 0; + } + } + return -ENODEV; +} + +static int jc42_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct jc42_data *data; + int config, cap, err; + + data = kzalloc(sizeof(struct jc42_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(new_client, data); + mutex_init(&data->update_lock); + + cap = jc42_read_value(new_client, JC42_REG_CAP); + if (cap < 0) { + err = -EINVAL; + goto exit_free; + } + data->extended = !!(cap & JC42_CAP_RANGE); + + config = jc42_read_value(new_client, JC42_REG_CONFIG); + if (config < 0) { + err = -EINVAL; + goto exit_free; + } + data->orig_config = config; + if (config & JC42_CFG_SHUTDOWN) { + config &= ~JC42_CFG_SHUTDOWN; + jc42_write_value(new_client, JC42_REG_CONFIG, config); + } + data->config = config; + + /* Register sysfs hooks */ + err = sysfs_create_group(&new_client->dev.kobj, &jc42_group); + if (err) + goto exit_free; + + data->hwmon_dev = hwmon_device_register(&new_client->dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + return 0; + +exit_remove: + sysfs_remove_group(&new_client->dev.kobj, &jc42_group); +exit_free: + kfree(data); +exit: + return err; +} + +static int jc42_remove(struct i2c_client *client) +{ + struct jc42_data *data = i2c_get_clientdata(client); + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &jc42_group); + if (data->config != data->orig_config) + jc42_write_value(client, JC42_REG_CONFIG, data->orig_config); + kfree(data); + return 0; +} + +/* All registers are word-sized. */ +static int jc42_read_value(struct i2c_client *client, u8 reg) +{ + int ret = i2c_smbus_read_word_data(client, reg); + if (ret < 0) + return ret; + return swab16(ret); +} + +static int jc42_write_value(struct i2c_client *client, u8 reg, u16 value) +{ + return i2c_smbus_write_word_data(client, reg, swab16(value)); +} + +static struct jc42_data *jc42_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct jc42_data *data = i2c_get_clientdata(client); + struct jc42_data *ret = data; + int val; + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { + val = jc42_read_value(client, JC42_REG_TEMP); + if (val < 0) { + ret = ERR_PTR(val); + goto abort; + } + data->temp_input = val; + + val = jc42_read_value(client, JC42_REG_TEMP_CRITICAL); + if (val < 0) { + ret = ERR_PTR(val); + goto abort; + } + data->temp_crit = val; + + val = jc42_read_value(client, JC42_REG_TEMP_LOWER); + if (val < 0) { + ret = ERR_PTR(val); + goto abort; + } + data->temp_min = val; + + val = jc42_read_value(client, JC42_REG_TEMP_UPPER); + if (val < 0) { + ret = ERR_PTR(val); + goto abort; + } + data->temp_max = val; + + data->last_updated = jiffies; + data->valid = true; + } +abort: + mutex_unlock(&data->update_lock); + return ret; +} + +static int __init sensors_jc42_init(void) +{ + return i2c_add_driver(&jc42_driver); +} + +static void __exit sensors_jc42_exit(void) +{ + i2c_del_driver(&jc42_driver); +} + +MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>"); +MODULE_DESCRIPTION("JC42 driver"); +MODULE_LICENSE("GPL"); + +module_init(sensors_jc42_init); +module_exit(sensors_jc42_exit); diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c new file mode 100644 index 000000000000..425df5bccd45 --- /dev/null +++ b/drivers/hwmon/smm665.c @@ -0,0 +1,743 @@ +/* + * Driver for SMM665 Power Controller / Monitor + * + * Copyright (C) 2010 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This driver should also work for SMM465, SMM764, and SMM766, but is untested + * for those chips. Only monitoring functionality is implemented. + * + * Datasheets: + * http://www.summitmicro.com/prod_select/summary/SMM665/SMM665B_2089_20.pdf + * http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/delay.h> + +/* Internal reference voltage (VREF, x 1000 */ +#define SMM665_VREF_ADC_X1000 1250 + +/* module parameters */ +static int vref = SMM665_VREF_ADC_X1000; +module_param(vref, int, 0); +MODULE_PARM_DESC(vref, "Reference voltage in mV"); + +enum chips { smm465, smm665, smm665c, smm764, smm766 }; + +/* + * ADC channel addresses + */ +#define SMM665_MISC16_ADC_DATA_A 0x00 +#define SMM665_MISC16_ADC_DATA_B 0x01 +#define SMM665_MISC16_ADC_DATA_C 0x02 +#define SMM665_MISC16_ADC_DATA_D 0x03 +#define SMM665_MISC16_ADC_DATA_E 0x04 +#define SMM665_MISC16_ADC_DATA_F 0x05 +#define SMM665_MISC16_ADC_DATA_VDD 0x06 +#define SMM665_MISC16_ADC_DATA_12V 0x07 +#define SMM665_MISC16_ADC_DATA_INT_TEMP 0x08 +#define SMM665_MISC16_ADC_DATA_AIN1 0x09 +#define SMM665_MISC16_ADC_DATA_AIN2 0x0a + +/* + * Command registers + */ +#define SMM665_MISC8_CMD_STS 0x80 +#define SMM665_MISC8_STATUS1 0x81 +#define SMM665_MISC8_STATUSS2 0x82 +#define SMM665_MISC8_IO_POLARITY 0x83 +#define SMM665_MISC8_PUP_POLARITY 0x84 +#define SMM665_MISC8_ADOC_STATUS1 0x85 +#define SMM665_MISC8_ADOC_STATUS2 0x86 +#define SMM665_MISC8_WRITE_PROT 0x87 +#define SMM665_MISC8_STS_TRACK 0x88 + +/* + * Configuration registers and register groups + */ +#define SMM665_ADOC_ENABLE 0x0d +#define SMM665_LIMIT_BASE 0x80 /* First limit register */ + +/* + * Limit register bit masks + */ +#define SMM665_TRIGGER_RST 0x8000 +#define SMM665_TRIGGER_HEALTHY 0x4000 +#define SMM665_TRIGGER_POWEROFF 0x2000 +#define SMM665_TRIGGER_SHUTDOWN 0x1000 +#define SMM665_ADC_MASK 0x03ff + +#define smm665_is_critical(lim) ((lim) & (SMM665_TRIGGER_RST \ + | SMM665_TRIGGER_POWEROFF \ + | SMM665_TRIGGER_SHUTDOWN)) +/* + * Fault register bit definitions + * Values are merged from status registers 1/2, + * with status register 1 providing the upper 8 bits. + */ +#define SMM665_FAULT_A 0x0001 +#define SMM665_FAULT_B 0x0002 +#define SMM665_FAULT_C 0x0004 +#define SMM665_FAULT_D 0x0008 +#define SMM665_FAULT_E 0x0010 +#define SMM665_FAULT_F 0x0020 +#define SMM665_FAULT_VDD 0x0040 +#define SMM665_FAULT_12V 0x0080 +#define SMM665_FAULT_TEMP 0x0100 +#define SMM665_FAULT_AIN1 0x0200 +#define SMM665_FAULT_AIN2 0x0400 + +/* + * I2C Register addresses + * + * The configuration register needs to be the configured base register. + * The command/status register address is derived from it. + */ +#define SMM665_REGMASK 0x78 +#define SMM665_CMDREG_BASE 0x48 +#define SMM665_CONFREG_BASE 0x50 + +/* + * Equations given by chip manufacturer to calculate voltage/temperature values + * vref = Reference voltage on VREF_ADC pin (module parameter) + * adc = 10bit ADC value read back from registers + */ + +/* Voltage A-F and VDD */ +#define SMM665_VMON_ADC_TO_VOLTS(adc) ((adc) * vref / 256) + +/* Voltage 12VIN */ +#define SMM665_12VIN_ADC_TO_VOLTS(adc) ((adc) * vref * 3 / 256) + +/* Voltage AIN1, AIN2 */ +#define SMM665_AIN_ADC_TO_VOLTS(adc) ((adc) * vref / 512) + +/* Temp Sensor */ +#define SMM665_TEMP_ADC_TO_CELSIUS(adc) ((adc) <= 511) ? \ + ((int)(adc) * 1000 / 4) : \ + (((int)(adc) - 0x400) * 1000 / 4) + +#define SMM665_NUM_ADC 11 + +/* + * Chip dependent ADC conversion time, in uS + */ +#define SMM665_ADC_WAIT_SMM665 70 +#define SMM665_ADC_WAIT_SMM766 185 + +struct smm665_data { + enum chips type; + int conversion_time; /* ADC conversion time */ + struct device *hwmon_dev; + struct mutex update_lock; + bool valid; + unsigned long last_updated; /* in jiffies */ + u16 adc[SMM665_NUM_ADC]; /* adc values (raw) */ + u16 faults; /* fault status */ + /* The following values are in mV */ + int critical_min_limit[SMM665_NUM_ADC]; + int alarm_min_limit[SMM665_NUM_ADC]; + int critical_max_limit[SMM665_NUM_ADC]; + int alarm_max_limit[SMM665_NUM_ADC]; + struct i2c_client *cmdreg; +}; + +/* + * smm665_read16() + * + * Read 16 bit value from <reg>, <reg+1>. Upper 8 bits are in <reg>. + */ +static int smm665_read16(struct i2c_client *client, int reg) +{ + int rv, val; + + rv = i2c_smbus_read_byte_data(client, reg); + if (rv < 0) + return rv; + val = rv << 8; + rv = i2c_smbus_read_byte_data(client, reg + 1); + if (rv < 0) + return rv; + val |= rv; + return val; +} + +/* + * Read adc value. + */ +static int smm665_read_adc(struct smm665_data *data, int adc) +{ + struct i2c_client *client = data->cmdreg; + int rv; + int radc; + + /* + * Algorithm for reading ADC, per SMM665 datasheet + * + * {[S][addr][W][Ack]} {[offset][Ack]} {[S][addr][R][Nack]} + * [wait conversion time] + * {[S][addr][R][Ack]} {[datahi][Ack]} {[datalo][Ack][P]} + * + * To implement the first part of this exchange, + * do a full read transaction and expect a failure/Nack. + * This sets up the address pointer on the SMM665 + * and starts the ADC conversion. + * Then do a two-byte read transaction. + */ + rv = i2c_smbus_read_byte_data(client, adc << 3); + if (rv != -ENXIO) { + /* + * We expect ENXIO to reflect NACK + * (per Documentation/i2c/fault-codes). + * Everything else is an error. + */ + dev_dbg(&client->dev, + "Unexpected return code %d when setting ADC index", rv); + return (rv < 0) ? rv : -EIO; + } + + udelay(data->conversion_time); + + /* + * Now read two bytes. + * + * Neither i2c_smbus_read_byte() nor + * i2c_smbus_read_block_data() worked here, + * so use i2c_smbus_read_word_data() instead. + * We could also try to use i2c_master_recv(), + * but that is not always supported. + */ + rv = i2c_smbus_read_word_data(client, 0); + if (rv < 0) { + dev_dbg(&client->dev, "Failed to read ADC value: error %d", rv); + return -1; + } + /* + * Validate/verify readback adc channel (in bit 11..14). + * High byte is in lower 8 bit of rv, so only shift by 3. + */ + radc = (rv >> 3) & 0x0f; + if (radc != adc) { + dev_dbg(&client->dev, "Unexpected RADC: Expected %d got %d", + adc, radc); + return -EIO; + } + /* + * Chip replies with H/L, while SMBus expects L/H. + * Thus, byte order is reversed, and we have to swap + * the result. + */ + rv = swab16(rv) & SMM665_ADC_MASK; + + return rv; +} + +static struct smm665_data *smm665_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct smm665_data *data = i2c_get_clientdata(client); + struct smm665_data *ret = data; + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { + int i, val; + + /* + * read status registers + */ + val = smm665_read16(client, SMM665_MISC8_STATUS1); + if (unlikely(val < 0)) { + ret = ERR_PTR(val); + goto abort; + } + data->faults = val; + + /* Read adc registers */ + for (i = 0; i < SMM665_NUM_ADC; i++) { + val = smm665_read_adc(data, i); + if (unlikely(val < 0)) { + ret = ERR_PTR(val); + goto abort; + } + data->adc[i] = val; + } + data->last_updated = jiffies; + data->valid = 1; + } +abort: + mutex_unlock(&data->update_lock); + return ret; +} + +/* Return converted value from given adc */ +static int smm665_convert(u16 adcval, int index) +{ + int val = 0; + + switch (index) { + case SMM665_MISC16_ADC_DATA_12V: + val = SMM665_12VIN_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK); + break; + + case SMM665_MISC16_ADC_DATA_VDD: + case SMM665_MISC16_ADC_DATA_A: + case SMM665_MISC16_ADC_DATA_B: + case SMM665_MISC16_ADC_DATA_C: + case SMM665_MISC16_ADC_DATA_D: + case SMM665_MISC16_ADC_DATA_E: + case SMM665_MISC16_ADC_DATA_F: + val = SMM665_VMON_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK); + break; + + case SMM665_MISC16_ADC_DATA_AIN1: + case SMM665_MISC16_ADC_DATA_AIN2: + val = SMM665_AIN_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK); + break; + + case SMM665_MISC16_ADC_DATA_INT_TEMP: + val = SMM665_TEMP_ADC_TO_CELSIUS(adcval & SMM665_ADC_MASK); + break; + + default: + /* If we get here, the developer messed up */ + WARN_ON_ONCE(1); + break; + } + + return val; +} + +static int smm665_get_min(struct device *dev, int index) +{ + struct i2c_client *client = to_i2c_client(dev); + struct smm665_data *data = i2c_get_clientdata(client); + + return data->alarm_min_limit[index]; +} + +static int smm665_get_max(struct device *dev, int index) +{ + struct i2c_client *client = to_i2c_client(dev); + struct smm665_data *data = i2c_get_clientdata(client); + + return data->alarm_max_limit[index]; +} + +static int smm665_get_lcrit(struct device *dev, int index) +{ + struct i2c_client *client = to_i2c_client(dev); + struct smm665_data *data = i2c_get_clientdata(client); + + return data->critical_min_limit[index]; +} + +static int smm665_get_crit(struct device *dev, int index) +{ + struct i2c_client *client = to_i2c_client(dev); + struct smm665_data *data = i2c_get_clientdata(client); + + return data->critical_max_limit[index]; +} + +static ssize_t smm665_show_crit_alarm(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct smm665_data *data = smm665_update_device(dev); + int val = 0; + + if (IS_ERR(data)) + return PTR_ERR(data); + + if (data->faults & (1 << attr->index)) + val = 1; + + return snprintf(buf, PAGE_SIZE, "%d\n", val); +} + +static ssize_t smm665_show_input(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct smm665_data *data = smm665_update_device(dev); + int adc = attr->index; + int val; + + if (IS_ERR(data)) + return PTR_ERR(data); + + val = smm665_convert(data->adc[adc], adc); + return snprintf(buf, PAGE_SIZE, "%d\n", val); +} + +#define SMM665_SHOW(what) \ + static ssize_t smm665_show_##what(struct device *dev, \ + struct device_attribute *da, char *buf) \ +{ \ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); \ + const int val = smm665_get_##what(dev, attr->index); \ + return snprintf(buf, PAGE_SIZE, "%d\n", val); \ +} + +SMM665_SHOW(min); +SMM665_SHOW(max); +SMM665_SHOW(lcrit); +SMM665_SHOW(crit); + +/* These macros are used below in constructing device attribute objects + * for use with sysfs_create_group() to make a sysfs device file + * for each register. + */ + +#define SMM665_ATTR(name, type, cmd_idx) \ + static SENSOR_DEVICE_ATTR(name##_##type, S_IRUGO, \ + smm665_show_##type, NULL, cmd_idx) + +/* Construct a sensor_device_attribute structure for each register */ + +/* Input voltages */ +SMM665_ATTR(in1, input, SMM665_MISC16_ADC_DATA_12V); +SMM665_ATTR(in2, input, SMM665_MISC16_ADC_DATA_VDD); +SMM665_ATTR(in3, input, SMM665_MISC16_ADC_DATA_A); +SMM665_ATTR(in4, input, SMM665_MISC16_ADC_DATA_B); +SMM665_ATTR(in5, input, SMM665_MISC16_ADC_DATA_C); +SMM665_ATTR(in6, input, SMM665_MISC16_ADC_DATA_D); +SMM665_ATTR(in7, input, SMM665_MISC16_ADC_DATA_E); +SMM665_ATTR(in8, input, SMM665_MISC16_ADC_DATA_F); +SMM665_ATTR(in9, input, SMM665_MISC16_ADC_DATA_AIN1); +SMM665_ATTR(in10, input, SMM665_MISC16_ADC_DATA_AIN2); + +/* Input voltages min */ +SMM665_ATTR(in1, min, SMM665_MISC16_ADC_DATA_12V); +SMM665_ATTR(in2, min, SMM665_MISC16_ADC_DATA_VDD); +SMM665_ATTR(in3, min, SMM665_MISC16_ADC_DATA_A); +SMM665_ATTR(in4, min, SMM665_MISC16_ADC_DATA_B); +SMM665_ATTR(in5, min, SMM665_MISC16_ADC_DATA_C); +SMM665_ATTR(in6, min, SMM665_MISC16_ADC_DATA_D); +SMM665_ATTR(in7, min, SMM665_MISC16_ADC_DATA_E); +SMM665_ATTR(in8, min, SMM665_MISC16_ADC_DATA_F); +SMM665_ATTR(in9, min, SMM665_MISC16_ADC_DATA_AIN1); +SMM665_ATTR(in10, min, SMM665_MISC16_ADC_DATA_AIN2); + +/* Input voltages max */ +SMM665_ATTR(in1, max, SMM665_MISC16_ADC_DATA_12V); +SMM665_ATTR(in2, max, SMM665_MISC16_ADC_DATA_VDD); +SMM665_ATTR(in3, max, SMM665_MISC16_ADC_DATA_A); +SMM665_ATTR(in4, max, SMM665_MISC16_ADC_DATA_B); +SMM665_ATTR(in5, max, SMM665_MISC16_ADC_DATA_C); +SMM665_ATTR(in6, max, SMM665_MISC16_ADC_DATA_D); +SMM665_ATTR(in7, max, SMM665_MISC16_ADC_DATA_E); +SMM665_ATTR(in8, max, SMM665_MISC16_ADC_DATA_F); +SMM665_ATTR(in9, max, SMM665_MISC16_ADC_DATA_AIN1); +SMM665_ATTR(in10, max, SMM665_MISC16_ADC_DATA_AIN2); + +/* Input voltages lcrit */ +SMM665_ATTR(in1, lcrit, SMM665_MISC16_ADC_DATA_12V); +SMM665_ATTR(in2, lcrit, SMM665_MISC16_ADC_DATA_VDD); +SMM665_ATTR(in3, lcrit, SMM665_MISC16_ADC_DATA_A); +SMM665_ATTR(in4, lcrit, SMM665_MISC16_ADC_DATA_B); +SMM665_ATTR(in5, lcrit, SMM665_MISC16_ADC_DATA_C); +SMM665_ATTR(in6, lcrit, SMM665_MISC16_ADC_DATA_D); +SMM665_ATTR(in7, lcrit, SMM665_MISC16_ADC_DATA_E); +SMM665_ATTR(in8, lcrit, SMM665_MISC16_ADC_DATA_F); +SMM665_ATTR(in9, lcrit, SMM665_MISC16_ADC_DATA_AIN1); +SMM665_ATTR(in10, lcrit, SMM665_MISC16_ADC_DATA_AIN2); + +/* Input voltages crit */ +SMM665_ATTR(in1, crit, SMM665_MISC16_ADC_DATA_12V); +SMM665_ATTR(in2, crit, SMM665_MISC16_ADC_DATA_VDD); +SMM665_ATTR(in3, crit, SMM665_MISC16_ADC_DATA_A); +SMM665_ATTR(in4, crit, SMM665_MISC16_ADC_DATA_B); +SMM665_ATTR(in5, crit, SMM665_MISC16_ADC_DATA_C); +SMM665_ATTR(in6, crit, SMM665_MISC16_ADC_DATA_D); +SMM665_ATTR(in7, crit, SMM665_MISC16_ADC_DATA_E); +SMM665_ATTR(in8, crit, SMM665_MISC16_ADC_DATA_F); +SMM665_ATTR(in9, crit, SMM665_MISC16_ADC_DATA_AIN1); +SMM665_ATTR(in10, crit, SMM665_MISC16_ADC_DATA_AIN2); + +/* critical alarms */ +SMM665_ATTR(in1, crit_alarm, SMM665_FAULT_12V); +SMM665_ATTR(in2, crit_alarm, SMM665_FAULT_VDD); +SMM665_ATTR(in3, crit_alarm, SMM665_FAULT_A); +SMM665_ATTR(in4, crit_alarm, SMM665_FAULT_B); +SMM665_ATTR(in5, crit_alarm, SMM665_FAULT_C); +SMM665_ATTR(in6, crit_alarm, SMM665_FAULT_D); +SMM665_ATTR(in7, crit_alarm, SMM665_FAULT_E); +SMM665_ATTR(in8, crit_alarm, SMM665_FAULT_F); +SMM665_ATTR(in9, crit_alarm, SMM665_FAULT_AIN1); +SMM665_ATTR(in10, crit_alarm, SMM665_FAULT_AIN2); + +/* Temperature */ +SMM665_ATTR(temp1, input, SMM665_MISC16_ADC_DATA_INT_TEMP); +SMM665_ATTR(temp1, min, SMM665_MISC16_ADC_DATA_INT_TEMP); +SMM665_ATTR(temp1, max, SMM665_MISC16_ADC_DATA_INT_TEMP); +SMM665_ATTR(temp1, lcrit, SMM665_MISC16_ADC_DATA_INT_TEMP); +SMM665_ATTR(temp1, crit, SMM665_MISC16_ADC_DATA_INT_TEMP); +SMM665_ATTR(temp1, crit_alarm, SMM665_FAULT_TEMP); + +/* + * Finally, construct an array of pointers to members of the above objects, + * as required for sysfs_create_group() + */ +static struct attribute *smm665_attributes[] = { + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in1_min.dev_attr.attr, + &sensor_dev_attr_in1_max.dev_attr.attr, + &sensor_dev_attr_in1_lcrit.dev_attr.attr, + &sensor_dev_attr_in1_crit.dev_attr.attr, + &sensor_dev_attr_in1_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in2_min.dev_attr.attr, + &sensor_dev_attr_in2_max.dev_attr.attr, + &sensor_dev_attr_in2_lcrit.dev_attr.attr, + &sensor_dev_attr_in2_crit.dev_attr.attr, + &sensor_dev_attr_in2_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_in3_input.dev_attr.attr, + &sensor_dev_attr_in3_min.dev_attr.attr, + &sensor_dev_attr_in3_max.dev_attr.attr, + &sensor_dev_attr_in3_lcrit.dev_attr.attr, + &sensor_dev_attr_in3_crit.dev_attr.attr, + &sensor_dev_attr_in3_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_in4_input.dev_attr.attr, + &sensor_dev_attr_in4_min.dev_attr.attr, + &sensor_dev_attr_in4_max.dev_attr.attr, + &sensor_dev_attr_in4_lcrit.dev_attr.attr, + &sensor_dev_attr_in4_crit.dev_attr.attr, + &sensor_dev_attr_in4_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_in5_input.dev_attr.attr, + &sensor_dev_attr_in5_min.dev_attr.attr, + &sensor_dev_attr_in5_max.dev_attr.attr, + &sensor_dev_attr_in5_lcrit.dev_attr.attr, + &sensor_dev_attr_in5_crit.dev_attr.attr, + &sensor_dev_attr_in5_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_in6_input.dev_attr.attr, + &sensor_dev_attr_in6_min.dev_attr.attr, + &sensor_dev_attr_in6_max.dev_attr.attr, + &sensor_dev_attr_in6_lcrit.dev_attr.attr, + &sensor_dev_attr_in6_crit.dev_attr.attr, + &sensor_dev_attr_in6_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_in7_input.dev_attr.attr, + &sensor_dev_attr_in7_min.dev_attr.attr, + &sensor_dev_attr_in7_max.dev_attr.attr, + &sensor_dev_attr_in7_lcrit.dev_attr.attr, + &sensor_dev_attr_in7_crit.dev_attr.attr, + &sensor_dev_attr_in7_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_in8_input.dev_attr.attr, + &sensor_dev_attr_in8_min.dev_attr.attr, + &sensor_dev_attr_in8_max.dev_attr.attr, + &sensor_dev_attr_in8_lcrit.dev_attr.attr, + &sensor_dev_attr_in8_crit.dev_attr.attr, + &sensor_dev_attr_in8_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_in9_input.dev_attr.attr, + &sensor_dev_attr_in9_min.dev_attr.attr, + &sensor_dev_attr_in9_max.dev_attr.attr, + &sensor_dev_attr_in9_lcrit.dev_attr.attr, + &sensor_dev_attr_in9_crit.dev_attr.attr, + &sensor_dev_attr_in9_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_in10_input.dev_attr.attr, + &sensor_dev_attr_in10_min.dev_attr.attr, + &sensor_dev_attr_in10_max.dev_attr.attr, + &sensor_dev_attr_in10_lcrit.dev_attr.attr, + &sensor_dev_attr_in10_crit.dev_attr.attr, + &sensor_dev_attr_in10_crit_alarm.dev_attr.attr, + + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_min.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_lcrit.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, + + NULL, +}; + +static const struct attribute_group smm665_group = { + .attrs = smm665_attributes, +}; + +static int smm665_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = client->adapter; + struct smm665_data *data; + int i, ret; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA + | I2C_FUNC_SMBUS_WORD_DATA)) + return -ENODEV; + + if (i2c_smbus_read_byte_data(client, SMM665_ADOC_ENABLE) < 0) + return -ENODEV; + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto out_return; + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + data->type = id->driver_data; + data->cmdreg = i2c_new_dummy(adapter, (client->addr & ~SMM665_REGMASK) + | SMM665_CMDREG_BASE); + if (!data->cmdreg) + goto out_kfree; + + switch (data->type) { + case smm465: + case smm665: + data->conversion_time = SMM665_ADC_WAIT_SMM665; + break; + case smm665c: + case smm764: + case smm766: + data->conversion_time = SMM665_ADC_WAIT_SMM766; + break; + } + + ret = -ENODEV; + if (i2c_smbus_read_byte_data(data->cmdreg, SMM665_MISC8_CMD_STS) < 0) + goto out_unregister; + + /* + * Read limits. + * + * Limit registers start with register SMM665_LIMIT_BASE. + * Each channel uses 8 registers, providing four limit values + * per channel. Each limit value requires two registers, with the + * high byte in the first register and the low byte in the second + * register. The first two limits are under limit values, followed + * by two over limit values. + * + * Limit register order matches the ADC register order, so we use + * ADC register defines throughout the code to index limit registers. + * + * We save the first retrieved value both as "critical" and "alarm" + * value. The second value overwrites either the critical or the + * alarm value, depending on its configuration. This ensures that both + * critical and alarm values are initialized, even if both registers are + * configured as critical or non-critical. + */ + for (i = 0; i < SMM665_NUM_ADC; i++) { + int val; + + val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8); + if (unlikely(val < 0)) + goto out_unregister; + data->critical_min_limit[i] = data->alarm_min_limit[i] + = smm665_convert(val, i); + val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 2); + if (unlikely(val < 0)) + goto out_unregister; + if (smm665_is_critical(val)) + data->critical_min_limit[i] = smm665_convert(val, i); + else + data->alarm_min_limit[i] = smm665_convert(val, i); + val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 4); + if (unlikely(val < 0)) + goto out_unregister; + data->critical_max_limit[i] = data->alarm_max_limit[i] + = smm665_convert(val, i); + val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 6); + if (unlikely(val < 0)) + goto out_unregister; + if (smm665_is_critical(val)) + data->critical_max_limit[i] = smm665_convert(val, i); + else + data->alarm_max_limit[i] = smm665_convert(val, i); + } + + /* Register sysfs hooks */ + ret = sysfs_create_group(&client->dev.kobj, &smm665_group); + if (ret) + goto out_unregister; + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + ret = PTR_ERR(data->hwmon_dev); + goto out_remove_group; + } + + return 0; + +out_remove_group: + sysfs_remove_group(&client->dev.kobj, &smm665_group); +out_unregister: + i2c_unregister_device(data->cmdreg); +out_kfree: + kfree(data); +out_return: + return ret; +} + +static int smm665_remove(struct i2c_client *client) +{ + struct smm665_data *data = i2c_get_clientdata(client); + + i2c_unregister_device(data->cmdreg); + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &smm665_group); + + kfree(data); + + return 0; +} + +static const struct i2c_device_id smm665_id[] = { + {"smm465", smm465}, + {"smm665", smm665}, + {"smm665c", smm665c}, + {"smm764", smm764}, + {"smm766", smm766}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, smm665_id); + +/* This is the driver that will be inserted */ +static struct i2c_driver smm665_driver = { + .driver = { + .name = "smm665", + }, + .probe = smm665_probe, + .remove = smm665_remove, + .id_table = smm665_id, +}; + +static int __init smm665_init(void) +{ + return i2c_add_driver(&smm665_driver); +} + +static void __exit smm665_exit(void) +{ + i2c_del_driver(&smm665_driver); +} + +MODULE_AUTHOR("Guenter Roeck"); +MODULE_DESCRIPTION("SMM665 driver"); +MODULE_LICENSE("GPL"); + +module_init(smm665_init); +module_exit(smm665_exit); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 64207df8da82..2de76cc08f61 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -506,15 +506,22 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, return (flags & REQ_FAILED) ? -EIO : 0; } -static void ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) +/* + * returns true if rq has been completed + */ +static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) { unsigned int nr_bytes = cmd->nbytes - cmd->nleft; if (cmd->tf_flags & IDE_TFLAG_WRITE) nr_bytes -= cmd->last_xfer_len; - if (nr_bytes > 0) + if (nr_bytes > 0) { ide_complete_rq(drive, 0, nr_bytes); + return true; + } + + return false; } static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) @@ -679,7 +686,8 @@ out_end: } if (uptodate == 0 && rq->bio) - ide_cd_error_cmd(drive, cmd); + if (ide_cd_error_cmd(drive, cmd)) + return ide_stopped; /* make sure it's fully ended */ if (blk_fs_request(rq) == 0) { diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 67fb73559fd5..34b9872f35d1 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -480,13 +480,9 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) u16 nsect = 0; char __user *buf = (char __user *)arg; - req_task = kzalloc(tasksize, GFP_KERNEL); - if (req_task == NULL) - return -ENOMEM; - if (copy_from_user(req_task, buf, tasksize)) { - kfree(req_task); - return -EFAULT; - } + req_task = memdup_user(buf, tasksize); + if (IS_ERR(req_task)) + return PTR_ERR(req_task); taskout = req_task->out_size; taskin = req_task->in_size; diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c index 1d80f1fdbc97..7002765b593c 100644 --- a/drivers/ide/tx4938ide.c +++ b/drivers/ide/tx4938ide.c @@ -64,7 +64,7 @@ static void tx4938ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) pair = ide_get_pair_dev(drive); if (pair) - safe = min(safe, pair->pio_mode - XFER_PIO_0); + safe = min_t(u8, safe, pair->pio_mode - XFER_PIO_0); tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe); } diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c index 3c7367751873..bed3e39aac96 100644 --- a/drivers/ide/tx4939ide.c +++ b/drivers/ide/tx4939ide.c @@ -114,7 +114,7 @@ static void tx4939ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) pair = ide_get_pair_dev(drive); if (pair) - safe = min(safe, pair->pio_mode - XFER_PIO_0); + safe = min_t(u8, safe, pair->pio_mode - XFER_PIO_0); /* * Update Command Transfer Mode for master/slave and Data * Transfer Mode for this drive. diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c index 101f40022386..d2a0997b78f8 100644 --- a/drivers/ide/via82cxxx.c +++ b/drivers/ide/via82cxxx.c @@ -79,7 +79,7 @@ static struct via_isa_bridge { { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, - { "vt6415", PCI_DEVICE_ID_VIA_6410, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST }, + { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST }, { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c index 05edd75abca0..a9cf76831634 100644 --- a/drivers/input/mouse/appletouch.c +++ b/drivers/input/mouse/appletouch.c @@ -205,8 +205,8 @@ struct atp { bool overflow_warned; int x_old; /* last reported x/y, */ int y_old; /* used for smoothing */ - u8 xy_cur[ATP_XSENSORS + ATP_YSENSORS]; - u8 xy_old[ATP_XSENSORS + ATP_YSENSORS]; + signed char xy_cur[ATP_XSENSORS + ATP_YSENSORS]; + signed char xy_old[ATP_XSENSORS + ATP_YSENSORS]; int xy_acc[ATP_XSENSORS + ATP_YSENSORS]; int idlecount; /* number of empty packets */ struct work_struct work; @@ -531,7 +531,7 @@ static void atp_complete_geyser_1_2(struct urb *urb) for (i = 0; i < ATP_XSENSORS + ATP_YSENSORS; i++) { /* accumulate the change */ - int change = dev->xy_old[i] - dev->xy_cur[i]; + signed char change = dev->xy_old[i] - dev->xy_cur[i]; dev->xy_acc[i] -= change; /* prevent down drifting */ diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 0ded3640b926..707d9c94cf9e 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -1914,11 +1914,13 @@ static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) * The next command will reopen the AT channel automatically. */ if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) { - kfree(cb); rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); if (cb->wake_tasklet) tasklet_schedule(cb->wake_tasklet); - return rc < 0 ? rc : cb->len; + if (!rc) + rc = cb->len; + kfree(cb); + return rc; } spin_lock_irqsave(&cs->cmdlock, flags); diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index e5ea344a551a..bcc174e4f3b1 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -1052,6 +1052,7 @@ static inline void remove_appl_from_channel(struct bc_state *bcs, do { if (bcap->bcnext == ap) { bcap->bcnext = bcap->bcnext->bcnext; + spin_unlock_irqrestore(&bcs->aplock, flags); return; } bcap = bcap->bcnext; diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c index 43c5dc3516e5..4cfdbe08ffd1 100644 --- a/drivers/isdn/sc/ioctl.c +++ b/drivers/isdn/sc/ioctl.c @@ -174,7 +174,7 @@ int sc_ioctl(int card, scs_ioctl *data) pr_debug("%s: SCIOGETSPID: ioctl received\n", sc_adapter[card]->devicename); - spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL); + spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL); if (!spid) { kfree(rcvmsg); return -ENOMEM; @@ -194,7 +194,7 @@ int sc_ioctl(int card, scs_ioctl *data) kfree(rcvmsg); return status; } - strcpy(spid, rcvmsg->msg_data.byte_array); + strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE); /* * Package the switch type and send to user space @@ -266,12 +266,12 @@ int sc_ioctl(int card, scs_ioctl *data) return status; } - dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL); + dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL); if (!dn) { kfree(rcvmsg); return -ENOMEM; } - strcpy(dn, rcvmsg->msg_data.byte_array); + strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE); kfree(rcvmsg); /* @@ -337,7 +337,7 @@ int sc_ioctl(int card, scs_ioctl *data) pr_debug("%s: SCIOSTAT: ioctl received\n", sc_adapter[card]->devicename); - bi = kmalloc (sizeof(boardInfo), GFP_KERNEL); + bi = kzalloc(sizeof(boardInfo), GFP_KERNEL); if (!bi) { kfree(rcvmsg); return -ENOMEM; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 488f25472291..0b591b658243 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -304,6 +304,23 @@ config SENSORS_TSL2550 This driver can also be built as a module. If so, the module will be called tsl2550. +config SENSORS_BH1780 + tristate "ROHM BH1780GLI ambient light sensor" + depends on I2C && SYSFS + help + If you say yes here you get support for the ROHM BH1780GLI + ambient light sensor. + + This driver can also be built as a module. If so, the module + will be called bh1780gli. + +config HMC6352 + tristate "Honeywell HMC6352 compass" + depends on I2C + help + This driver provides support for the Honeywell HMC6352 compass, + providing configuration and heading data via sysfs. + config EP93XX_PWM tristate "EP93xx PWM support" depends on ARCH_EP93XX @@ -363,6 +380,16 @@ config ARM_CHARLCD line and the Linux version on the second line, but that's still useful. +config BMP085 + tristate "BMP085 digital pressure sensor" + depends on I2C && SYSFS + help + If you say yes here you get support for the Bosch Sensortec + BMP086 digital pressure sensor. + + To compile this driver as a module, choose M here: the + module will be called bmp085. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 67552d6e9327..255a80dc9d73 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -9,11 +9,13 @@ obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o +obj-$(CONFIG_BMP085) += bmp085.o obj-$(CONFIG_ICS932S401) += ics932s401.o obj-$(CONFIG_LKDTM) += lkdtm.o obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o +obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o obj-$(CONFIG_SGI_IOC4) += ioc4.o obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o @@ -28,6 +30,7 @@ obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ +obj-$(CONFIG_HMC6352) += hmc6352.o obj-y += eeprom/ obj-y += cb710/ obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c new file mode 100644 index 000000000000..714c6b487313 --- /dev/null +++ b/drivers/misc/bh1780gli.c @@ -0,0 +1,273 @@ +/* + * bh1780gli.c + * ROHM Ambient Light Sensor Driver + * + * Copyright (C) 2010 Texas Instruments + * Author: Hemanth V <hemanthv@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/delay.h> + +#define BH1780_REG_CONTROL 0x80 +#define BH1780_REG_PARTID 0x8A +#define BH1780_REG_MANFID 0x8B +#define BH1780_REG_DLOW 0x8C +#define BH1780_REG_DHIGH 0x8D + +#define BH1780_REVMASK (0xf) +#define BH1780_POWMASK (0x3) +#define BH1780_POFF (0x0) +#define BH1780_PON (0x3) + +/* power on settling time in ms */ +#define BH1780_PON_DELAY 2 + +struct bh1780_data { + struct i2c_client *client; + int power_state; + /* lock for sysfs operations */ + struct mutex lock; +}; + +static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg) +{ + int ret = i2c_smbus_write_byte_data(ddata->client, reg, val); + if (ret < 0) + dev_err(&ddata->client->dev, + "i2c_smbus_write_byte_data failed error %d\ + Register (%s)\n", ret, msg); + return ret; +} + +static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg) +{ + int ret = i2c_smbus_read_byte_data(ddata->client, reg); + if (ret < 0) + dev_err(&ddata->client->dev, + "i2c_smbus_read_byte_data failed error %d\ + Register (%s)\n", ret, msg); + return ret; +} + +static ssize_t bh1780_show_lux(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bh1780_data *ddata = platform_get_drvdata(pdev); + int lsb, msb; + + lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW"); + if (lsb < 0) + return lsb; + + msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH"); + if (msb < 0) + return msb; + + return sprintf(buf, "%d\n", (msb << 8) | lsb); +} + +static ssize_t bh1780_show_power_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bh1780_data *ddata = platform_get_drvdata(pdev); + int state; + + state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); + if (state < 0) + return state; + + return sprintf(buf, "%d\n", state & BH1780_POWMASK); +} + +static ssize_t bh1780_store_power_state(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bh1780_data *ddata = platform_get_drvdata(pdev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 0, &val); + if (error) + return error; + + if (val < BH1780_POFF || val > BH1780_PON) + return -EINVAL; + + mutex_lock(&ddata->lock); + + error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL"); + if (error < 0) { + mutex_unlock(&ddata->lock); + return error; + } + + msleep(BH1780_PON_DELAY); + ddata->power_state = val; + mutex_unlock(&ddata->lock); + + return count; +} + +static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL); + +static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, + bh1780_show_power_state, bh1780_store_power_state); + +static struct attribute *bh1780_attributes[] = { + &dev_attr_power_state.attr, + &dev_attr_lux.attr, + NULL +}; + +static const struct attribute_group bh1780_attr_group = { + .attrs = bh1780_attributes, +}; + +static int __devinit bh1780_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct bh1780_data *ddata = NULL; + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) { + ret = -EIO; + goto err_op_failed; + } + + ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL); + if (ddata == NULL) { + ret = -ENOMEM; + goto err_op_failed; + } + + ddata->client = client; + i2c_set_clientdata(client, ddata); + + ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID"); + if (ret < 0) + goto err_op_failed; + + dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n", + (ret & BH1780_REVMASK)); + + mutex_init(&ddata->lock); + + ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group); + if (ret) + goto err_op_failed; + + return 0; + +err_op_failed: + kfree(ddata); + return ret; +} + +static int __devexit bh1780_remove(struct i2c_client *client) +{ + struct bh1780_data *ddata; + + ddata = i2c_get_clientdata(client); + sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); + i2c_set_clientdata(client, NULL); + kfree(ddata); + + return 0; +} + +#ifdef CONFIG_PM +static int bh1780_suspend(struct i2c_client *client, pm_message_t mesg) +{ + struct bh1780_data *ddata; + int state, ret; + + ddata = i2c_get_clientdata(client); + state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); + if (state < 0) + return state; + + ddata->power_state = state & BH1780_POWMASK; + + ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, + "CONTROL"); + + if (ret < 0) + return ret; + + return 0; +} + +static int bh1780_resume(struct i2c_client *client) +{ + struct bh1780_data *ddata; + int state, ret; + + ddata = i2c_get_clientdata(client); + state = ddata->power_state; + + ret = bh1780_write(ddata, BH1780_REG_CONTROL, state, + "CONTROL"); + + if (ret < 0) + return ret; + + return 0; +} +#else +#define bh1780_suspend NULL +#define bh1780_resume NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id bh1780_id[] = { + { "bh1780", 0 }, + { }, +}; + +static struct i2c_driver bh1780_driver = { + .probe = bh1780_probe, + .remove = bh1780_remove, + .id_table = bh1780_id, + .suspend = bh1780_suspend, + .resume = bh1780_resume, + .driver = { + .name = "bh1780" + }, +}; + +static int __init bh1780_init(void) +{ + return i2c_add_driver(&bh1780_driver); +} + +static void __exit bh1780_exit(void) +{ + i2c_del_driver(&bh1780_driver); +} + +module_init(bh1780_init) +module_exit(bh1780_exit) + +MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>"); diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c new file mode 100644 index 000000000000..63ee4c1a5315 --- /dev/null +++ b/drivers/misc/bmp085.c @@ -0,0 +1,482 @@ +/* Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com> + + This driver supports the bmp085 digital barometric pressure + and temperature sensor from Bosch Sensortec. The datasheet + is avaliable from their website: + http://www.bosch-sensortec.com/content/language1/downloads/BST-BMP085-DS000-05.pdf + + A pressure measurement is issued by reading from pressure0_input. + The return value ranges from 30000 to 110000 pascal with a resulution + of 1 pascal (0.01 millibar) which enables measurements from 9000m above + to 500m below sea level. + + The temperature can be read from temp0_input. Values range from + -400 to 850 representing the ambient temperature in degree celsius + multiplied by 10.The resolution is 0.1 celsius. + + Because ambient pressure is temperature dependent, a temperature + measurement will be executed automatically even if the user is reading + from pressure0_input. This happens if the last temperature measurement + has been executed more then one second ago. + + To decrease RMS noise from pressure measurements, the bmp085 can + autonomously calculate the average of up to eight samples. This is + set up by writing to the oversampling sysfs file. Accepted values + are 0, 1, 2 and 3. 2^x when x is the value written to this file + specifies the number of samples used to calculate the ambient pressure. + RMS noise is specified with six pascal (without averaging) and decreases + down to 3 pascal when using an oversampling setting of 3. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/delay.h> + + +#define BMP085_I2C_ADDRESS 0x77 +#define BMP085_CHIP_ID 0x55 + +#define BMP085_CALIBRATION_DATA_START 0xAA +#define BMP085_CALIBRATION_DATA_LENGTH 11 /* 16 bit values */ +#define BMP085_CHIP_ID_REG 0xD0 +#define BMP085_VERSION_REG 0xD1 +#define BMP085_CTRL_REG 0xF4 +#define BMP085_TEMP_MEASUREMENT 0x2E +#define BMP085_PRESSURE_MEASUREMENT 0x34 +#define BMP085_CONVERSION_REGISTER_MSB 0xF6 +#define BMP085_CONVERSION_REGISTER_LSB 0xF7 +#define BMP085_CONVERSION_REGISTER_XLSB 0xF8 +#define BMP085_TEMP_CONVERSION_TIME 5 + +#define BMP085_CLIENT_NAME "bmp085" + + +static const unsigned short normal_i2c[] = { BMP085_I2C_ADDRESS, + I2C_CLIENT_END }; + +struct bmp085_calibration_data { + s16 AC1, AC2, AC3; + u16 AC4, AC5, AC6; + s16 B1, B2; + s16 MB, MC, MD; +}; + + +/* Each client has this additional data */ +struct bmp085_data { + struct i2c_client *client; + struct mutex lock; + struct bmp085_calibration_data calibration; + u32 raw_temperature; + u32 raw_pressure; + unsigned char oversampling_setting; + u32 last_temp_measurement; + s32 b6; /* calculated temperature correction coefficient */ +}; + + +static s32 bmp085_read_calibration_data(struct i2c_client *client) +{ + u16 tmp[BMP085_CALIBRATION_DATA_LENGTH]; + struct bmp085_data *data = i2c_get_clientdata(client); + struct bmp085_calibration_data *cali = &(data->calibration); + s32 status = i2c_smbus_read_i2c_block_data(client, + BMP085_CALIBRATION_DATA_START, + BMP085_CALIBRATION_DATA_LENGTH*sizeof(u16), + (u8 *)tmp); + if (status < 0) + return status; + + if (status != BMP085_CALIBRATION_DATA_LENGTH*sizeof(u16)) + return -EIO; + + cali->AC1 = be16_to_cpu(tmp[0]); + cali->AC2 = be16_to_cpu(tmp[1]); + cali->AC3 = be16_to_cpu(tmp[2]); + cali->AC4 = be16_to_cpu(tmp[3]); + cali->AC5 = be16_to_cpu(tmp[4]); + cali->AC6 = be16_to_cpu(tmp[5]); + cali->B1 = be16_to_cpu(tmp[6]); + cali->B2 = be16_to_cpu(tmp[7]); + cali->MB = be16_to_cpu(tmp[8]); + cali->MC = be16_to_cpu(tmp[9]); + cali->MD = be16_to_cpu(tmp[10]); + return 0; +} + + +static s32 bmp085_update_raw_temperature(struct bmp085_data *data) +{ + u16 tmp; + s32 status; + + mutex_lock(&data->lock); + status = i2c_smbus_write_byte_data(data->client, BMP085_CTRL_REG, + BMP085_TEMP_MEASUREMENT); + if (status != 0) { + dev_err(&data->client->dev, + "Error while requesting temperature measurement.\n"); + goto exit; + } + msleep(BMP085_TEMP_CONVERSION_TIME); + + status = i2c_smbus_read_i2c_block_data(data->client, + BMP085_CONVERSION_REGISTER_MSB, sizeof(tmp), (u8 *)&tmp); + if (status < 0) + goto exit; + if (status != sizeof(tmp)) { + dev_err(&data->client->dev, + "Error while reading temperature measurement result\n"); + status = -EIO; + goto exit; + } + data->raw_temperature = be16_to_cpu(tmp); + data->last_temp_measurement = jiffies; + status = 0; /* everything ok, return 0 */ + +exit: + mutex_unlock(&data->lock); + return status; +} + +static s32 bmp085_update_raw_pressure(struct bmp085_data *data) +{ + u32 tmp = 0; + s32 status; + + mutex_lock(&data->lock); + status = i2c_smbus_write_byte_data(data->client, BMP085_CTRL_REG, + BMP085_PRESSURE_MEASUREMENT + (data->oversampling_setting<<6)); + if (status != 0) { + dev_err(&data->client->dev, + "Error while requesting pressure measurement.\n"); + goto exit; + } + + /* wait for the end of conversion */ + msleep(2+(3 << data->oversampling_setting)); + + /* copy data into a u32 (4 bytes), but skip the first byte. */ + status = i2c_smbus_read_i2c_block_data(data->client, + BMP085_CONVERSION_REGISTER_MSB, 3, ((u8 *)&tmp)+1); + if (status < 0) + goto exit; + if (status != 3) { + dev_err(&data->client->dev, + "Error while reading pressure measurement results\n"); + status = -EIO; + goto exit; + } + data->raw_pressure = be32_to_cpu((tmp)); + data->raw_pressure >>= (8-data->oversampling_setting); + status = 0; /* everything ok, return 0 */ + +exit: + mutex_unlock(&data->lock); + return status; +} + + +/* + * This function starts the temperature measurement and returns the value + * in tenth of a degree celsius. + */ +static s32 bmp085_get_temperature(struct bmp085_data *data, int *temperature) +{ + struct bmp085_calibration_data *cali = &data->calibration; + long x1, x2; + int status; + + status = bmp085_update_raw_temperature(data); + if (status != 0) + goto exit; + + x1 = ((data->raw_temperature - cali->AC6) * cali->AC5) >> 15; + x2 = (cali->MC << 11) / (x1 + cali->MD); + data->b6 = x1 + x2 - 4000; + /* if NULL just update b6. Used for pressure only measurements */ + if (temperature != NULL) + *temperature = (x1+x2+8) >> 4; + +exit: + return status;; +} + +/* + * This function starts the pressure measurement and returns the value + * in millibar. Since the pressure depends on the ambient temperature, + * a temperature measurement is executed if the last known value is older + * than one second. + */ +static s32 bmp085_get_pressure(struct bmp085_data *data, int *pressure) +{ + struct bmp085_calibration_data *cali = &data->calibration; + s32 x1, x2, x3, b3; + u32 b4, b7; + s32 p; + int status; + + /* alt least every second force an update of the ambient temperature */ + if (data->last_temp_measurement + 1*HZ < jiffies) { + status = bmp085_get_temperature(data, NULL); + if (status != 0) + goto exit; + } + + status = bmp085_update_raw_pressure(data); + if (status != 0) + goto exit; + + x1 = (data->b6 * data->b6) >> 12; + x1 *= cali->B2; + x1 >>= 11; + + x2 = cali->AC2 * data->b6; + x2 >>= 11; + + x3 = x1 + x2; + + b3 = (((((s32)cali->AC1) * 4 + x3) << data->oversampling_setting) + 2); + b3 >>= 2; + + x1 = (cali->AC3 * data->b6) >> 13; + x2 = (cali->B1 * ((data->b6 * data->b6) >> 12)) >> 16; + x3 = (x1 + x2 + 2) >> 2; + b4 = (cali->AC4 * (u32)(x3 + 32768)) >> 15; + + b7 = ((u32)data->raw_pressure - b3) * + (50000 >> data->oversampling_setting); + p = ((b7 < 0x80000000) ? ((b7 << 1) / b4) : ((b7 / b4) * 2)); + + x1 = p >> 8; + x1 *= x1; + x1 = (x1 * 3038) >> 16; + x2 = (-7357 * p) >> 16; + p += (x1 + x2 + 3791) >> 4; + + *pressure = p; + +exit: + return status; +} + +/* + * This function sets the chip-internal oversampling. Valid values are 0..3. + * The chip will use 2^oversampling samples for internal averaging. + * This influences the measurement time and the accuracy; larger values + * increase both. The datasheet gives on overview on how measurement time, + * accuracy and noise correlate. + */ +static void bmp085_set_oversampling(struct bmp085_data *data, + unsigned char oversampling) +{ + if (oversampling > 3) + oversampling = 3; + data->oversampling_setting = oversampling; +} + +/* + * Returns the currently selected oversampling. Range: 0..3 + */ +static unsigned char bmp085_get_oversampling(struct bmp085_data *data) +{ + return data->oversampling_setting; +} + +/* sysfs callbacks */ +static ssize_t set_oversampling(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct bmp085_data *data = i2c_get_clientdata(client); + unsigned long oversampling; + int success = strict_strtoul(buf, 10, &oversampling); + if (success == 0) { + bmp085_set_oversampling(data, oversampling); + return count; + } + return success; +} + +static ssize_t show_oversampling(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct bmp085_data *data = i2c_get_clientdata(client); + return sprintf(buf, "%u\n", bmp085_get_oversampling(data)); +} +static DEVICE_ATTR(oversampling, S_IWUSR | S_IRUGO, + show_oversampling, set_oversampling); + + +static ssize_t show_temperature(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int temperature; + int status; + struct i2c_client *client = to_i2c_client(dev); + struct bmp085_data *data = i2c_get_clientdata(client); + + status = bmp085_get_temperature(data, &temperature); + if (status != 0) + return status; + else + return sprintf(buf, "%d\n", temperature); +} +static DEVICE_ATTR(temp0_input, S_IRUGO, show_temperature, NULL); + + +static ssize_t show_pressure(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int pressure; + int status; + struct i2c_client *client = to_i2c_client(dev); + struct bmp085_data *data = i2c_get_clientdata(client); + + status = bmp085_get_pressure(data, &pressure); + if (status != 0) + return status; + else + return sprintf(buf, "%d\n", pressure); +} +static DEVICE_ATTR(pressure0_input, S_IRUGO, show_pressure, NULL); + + +static struct attribute *bmp085_attributes[] = { + &dev_attr_temp0_input.attr, + &dev_attr_pressure0_input.attr, + &dev_attr_oversampling.attr, + NULL +}; + +static const struct attribute_group bmp085_attr_group = { + .attrs = bmp085_attributes, +}; + +static int bmp085_detect(struct i2c_client *client, struct i2c_board_info *info) +{ + if (client->addr != BMP085_I2C_ADDRESS) + return -ENODEV; + + if (i2c_smbus_read_byte_data(client, BMP085_CHIP_ID_REG) != BMP085_CHIP_ID) + return -ENODEV; + + return 0; +} + +static int bmp085_init_client(struct i2c_client *client) +{ + unsigned char version; + int status; + struct bmp085_data *data = i2c_get_clientdata(client); + data->client = client; + status = bmp085_read_calibration_data(client); + if (status != 0) + goto exit; + version = i2c_smbus_read_byte_data(client, BMP085_VERSION_REG); + data->last_temp_measurement = 0; + data->oversampling_setting = 3; + mutex_init(&data->lock); + dev_info(&data->client->dev, "BMP085 ver. %d.%d found.\n", + (version & 0x0F), (version & 0xF0) >> 4); +exit: + return status; +} + +static int bmp085_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct bmp085_data *data; + int err = 0; + + data = kzalloc(sizeof(struct bmp085_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + /* default settings after POR */ + data->oversampling_setting = 0x00; + + i2c_set_clientdata(client, data); + + /* Initialize the BMP085 chip */ + err = bmp085_init_client(client); + if (err != 0) + goto exit_free; + + /* Register sysfs hooks */ + err = sysfs_create_group(&client->dev.kobj, &bmp085_attr_group); + if (err) + goto exit_free; + + dev_info(&data->client->dev, "Succesfully initialized bmp085!\n"); + goto exit; + +exit_free: + kfree(data); +exit: + return err; +} + +static int bmp085_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &bmp085_attr_group); + kfree(i2c_get_clientdata(client)); + return 0; +} + +static const struct i2c_device_id bmp085_id[] = { + { "bmp085", 0 }, + { } +}; + +static struct i2c_driver bmp085_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "bmp085" + }, + .id_table = bmp085_id, + .probe = bmp085_probe, + .remove = bmp085_remove, + + .detect = bmp085_detect, + .address_list = normal_i2c +}; + +static int __init bmp085_init(void) +{ + return i2c_add_driver(&bmp085_driver); +} + +static void __exit bmp085_exit(void) +{ + i2c_del_driver(&bmp085_driver); +} + + +MODULE_AUTHOR("Christoph Mair <christoph.mair@gmail.com"); +MODULE_DESCRIPTION("BMP085 driver"); +MODULE_LICENSE("GPL"); + +module_init(bmp085_init); +module_exit(bmp085_exit); diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c new file mode 100644 index 000000000000..234bfcaf2099 --- /dev/null +++ b/drivers/misc/hmc6352.c @@ -0,0 +1,166 @@ +/* + * hmc6352.c - Honeywell Compass Driver + * + * Copyright (C) 2009 Intel Corp + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/sysfs.h> + +static DEFINE_MUTEX(compass_mutex); + +static int compass_command(struct i2c_client *c, u8 cmd) +{ + int ret = i2c_master_send(c, &cmd, 1); + if (ret < 0) + dev_warn(&c->dev, "command '%c' failed.\n", cmd); + return ret; +} + +static int compass_store(struct device *dev, const char *buf, size_t count, + const char *map) +{ + struct i2c_client *c = to_i2c_client(dev); + int ret; + unsigned long val; + + if (strict_strtoul(buf, 10, &val)) + return -EINVAL; + if (val >= strlen(map)) + return -EINVAL; + mutex_lock(&compass_mutex); + ret = compass_command(c, map[val]); + mutex_unlock(&compass_mutex); + if (ret < 0) + return ret; + return count; +} + +static ssize_t compass_calibration_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return compass_store(dev, buf, count, "EC"); +} + +static ssize_t compass_power_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return compass_store(dev, buf, count, "SW"); +} + +static ssize_t compass_heading_data_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char i2c_data[2]; + unsigned int ret; + + mutex_lock(&compass_mutex); + ret = compass_command(client, 'A'); + if (ret != 1) { + mutex_unlock(&compass_mutex); + return ret; + } + msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */ + ret = i2c_master_recv(client, i2c_data, 2); + mutex_unlock(&compass_mutex); + if (ret != 1) { + dev_warn(dev, "i2c read data cmd failed\n"); + return ret; + } + ret = (i2c_data[0] << 8) | i2c_data[1]; + return sprintf(buf, "%d.%d\n", ret/10, ret%10); +} + + +static DEVICE_ATTR(heading0_input, S_IRUGO, compass_heading_data_show, NULL); +static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store); +static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store); + +static struct attribute *mid_att_compass[] = { + &dev_attr_heading0_input.attr, + &dev_attr_calibration.attr, + &dev_attr_power_state.attr, + NULL +}; + +static const struct attribute_group m_compass_gr = { + .name = "hmc6352", + .attrs = mid_att_compass +}; + +static int hmc6352_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int res; + + res = sysfs_create_group(&client->dev.kobj, &m_compass_gr); + if (res) { + dev_err(&client->dev, "device_create_file failed\n"); + return res; + } + dev_info(&client->dev, "%s HMC6352 compass chip found\n", + client->name); + return 0; +} + +static int hmc6352_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &m_compass_gr); + return 0; +} + +static struct i2c_device_id hmc6352_id[] = { + { "hmc6352", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, hmc6352_id); + +static struct i2c_driver hmc6352_driver = { + .driver = { + .name = "hmc6352", + }, + .probe = hmc6352_probe, + .remove = hmc6352_remove, + .id_table = hmc6352_id, +}; + +static int __init sensor_hmc6352_init(void) +{ + return i2c_add_driver(&hmc6352_driver); +} + +static void __exit sensor_hmc6352_exit(void) +{ + i2c_del_driver(&hmc6352_driver); +} + +module_init(sensor_hmc6352_init); +module_exit(sensor_hmc6352_exit); + +MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); +MODULE_DESCRIPTION("hmc6352 Compass Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index 98ad0120aa9b..557a8c2a7336 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -256,7 +256,8 @@ static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) { - char *dma_va, *dma_pa; + char *dma_va; + dma_addr_t dma_pa; struct ccb *driver_ccb, *ilo_ccb; driver_ccb = &data->driver_ccb; @@ -272,12 +273,12 @@ static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) return -ENOMEM; dma_va = (char *)data->dma_va; - dma_pa = (char *)data->dma_pa; + dma_pa = data->dma_pa; memset(dma_va, 0, data->dma_size); dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); - dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_START_ALIGN); + dma_pa = roundup(dma_pa, ILO_START_ALIGN); /* * Create two ccb's, one with virt addrs, one with phys addrs. @@ -288,26 +289,26 @@ static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) fifo_setup(dma_va, NR_QENTRY); driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; - ilo_ccb->ccb_u1.send_fifobar = dma_pa + FIFOHANDLESIZE; + ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE; dma_va += fifo_sz(NR_QENTRY); dma_pa += fifo_sz(NR_QENTRY); dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); - dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_CACHE_SZ); + dma_pa = roundup(dma_pa, ILO_CACHE_SZ); fifo_setup(dma_va, NR_QENTRY); driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; - ilo_ccb->ccb_u3.recv_fifobar = dma_pa + FIFOHANDLESIZE; + ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE; dma_va += fifo_sz(NR_QENTRY); dma_pa += fifo_sz(NR_QENTRY); driver_ccb->ccb_u2.send_desc = dma_va; - ilo_ccb->ccb_u2.send_desc = dma_pa; + ilo_ccb->ccb_u2.send_desc_pa = dma_pa; dma_pa += desc_mem_sz(NR_QENTRY); dma_va += desc_mem_sz(NR_QENTRY); driver_ccb->ccb_u4.recv_desc = dma_va; - ilo_ccb->ccb_u4.recv_desc = dma_pa; + ilo_ccb->ccb_u4.recv_desc_pa = dma_pa; driver_ccb->channel = slot; ilo_ccb->channel = slot; diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h index 247eb386a973..54e43adbdea1 100644 --- a/drivers/misc/hpilo.h +++ b/drivers/misc/hpilo.h @@ -79,21 +79,21 @@ struct ilo_hwinfo { struct ccb { union { char *send_fifobar; - u64 padding1; + u64 send_fifobar_pa; } ccb_u1; union { char *send_desc; - u64 padding2; + u64 send_desc_pa; } ccb_u2; u64 send_ctrl; union { char *recv_fifobar; - u64 padding3; + u64 recv_fifobar_pa; } ccb_u3; union { char *recv_desc; - u64 padding4; + u64 recv_desc_pa; } ccb_u4; u64 recv_ctrl; diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index f8210bf2d241..1e2cbf5d9aa1 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -311,15 +311,17 @@ config SM_FTL select MTD_BLKDEVS select MTD_NAND_ECC help - This enables new and very EXPERMENTAL support for SmartMedia/xD + This enables EXPERIMENTAL R/W support for SmartMedia/xD FTL (Flash translation layer). - Write support isn't yet well tested, therefore this code IS likely to - eat your card, so please don't use it together with valuable data. - Use readonly driver (CONFIG_SSFDC) instead. + Write support is only lightly tested, therefore this driver + isn't recommended to use with valuable data (anyway if you have + valuable data, do backups regardless of software/hardware you + use, because you never know what will eat your data...) + If you only need R/O access, you can use older R/O driver + (CONFIG_SSFDC) config MTD_OOPS tristate "Log panic/oops to an MTD buffer" - depends on MTD help This enables panic and oops messages to be logged to a circular buffer in a flash partition where it can be read back at some diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c index cec7ab98b2a9..302372c08b56 100644 --- a/drivers/mtd/afs.c +++ b/drivers/mtd/afs.c @@ -2,7 +2,7 @@ drivers/mtd/afs.c: ARM Flash Layout/Partitioning - Copyright (C) 2000 ARM Limited + Copyright © 2000 ARM Limited This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 62f3ea9de848..9e2b7e9e0ad9 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -34,7 +34,6 @@ #include <linux/mtd/xip.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> -#include <linux/mtd/compatmac.h> #include <linux/mtd/cfi.h> /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */ @@ -63,6 +62,8 @@ static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); static void cfi_intelext_sync (struct mtd_info *); static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs, + uint64_t len); #ifdef CONFIG_MTD_OTP static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); @@ -448,6 +449,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) mtd->sync = cfi_intelext_sync; mtd->lock = cfi_intelext_lock; mtd->unlock = cfi_intelext_unlock; + mtd->is_locked = cfi_intelext_is_locked; mtd->suspend = cfi_intelext_suspend; mtd->resume = cfi_intelext_resume; mtd->flags = MTD_CAP_NORFLASH; @@ -717,7 +719,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, chip = &newcfi->chips[0]; for (i = 0; i < cfi->numchips; i++) { shared[i].writing = shared[i].erasing = NULL; - spin_lock_init(&shared[i].lock); + mutex_init(&shared[i].lock); for (j = 0; j < numparts; j++) { *chip = cfi->chips[i]; chip->start += j << partshift; @@ -886,7 +888,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr */ struct flchip_shared *shared = chip->priv; struct flchip *contender; - spin_lock(&shared->lock); + mutex_lock(&shared->lock); contender = shared->writing; if (contender && contender != chip) { /* @@ -899,7 +901,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr * get_chip returns success we're clear to go ahead. */ ret = mutex_trylock(&contender->mutex); - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); if (!ret) goto retry; mutex_unlock(&chip->mutex); @@ -914,7 +916,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr mutex_unlock(&contender->mutex); return ret; } - spin_lock(&shared->lock); + mutex_lock(&shared->lock); /* We should not own chip if it is already * in FL_SYNCING state. Put contender and retry. */ @@ -930,7 +932,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr * on this chip. Sleep. */ if (mode == FL_ERASING && shared->erasing && shared->erasing->oldstate == FL_ERASING) { - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); @@ -944,7 +946,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr shared->writing = chip; if (mode == FL_ERASING) shared->erasing = chip; - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); } ret = chip_ready(map, chip, adr, mode); if (ret == -EAGAIN) @@ -959,7 +961,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad if (chip->priv) { struct flchip_shared *shared = chip->priv; - spin_lock(&shared->lock); + mutex_lock(&shared->lock); if (shared->writing == chip && chip->oldstate == FL_READY) { /* We own the ability to write, but we're done */ shared->writing = shared->erasing; @@ -967,7 +969,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad /* give back ownership to who we loaned it from */ struct flchip *loaner = shared->writing; mutex_lock(&loaner->mutex); - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); mutex_unlock(&chip->mutex); put_chip(map, loaner, loaner->start); mutex_lock(&chip->mutex); @@ -985,11 +987,11 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad * Don't let the switch below mess things up since * we don't have ownership to resume anything. */ - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); wake_up(&chip->wq); return; } - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); } switch(chip->oldstate) { @@ -2139,6 +2141,13 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return ret; } +static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs, + uint64_t len) +{ + return cfi_varsize_frob(mtd, do_getlockstatus_oneblock, + ofs, len, NULL) ? 1 : 0; +} + #ifdef CONFIG_MTD_OTP typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index d81079ef91a5..3e6c47bdce53 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -33,7 +33,6 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/reboot.h> -#include <linux/mtd/compatmac.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> #include <linux/mtd/cfi.h> @@ -417,16 +416,26 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) */ cfi_fixup_major_minor(cfi, extp); + /* + * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4 + * see: http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_r20.pdf, page 19 + * http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_100_20011201.pdf + * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf + */ if (extp->MajorVersion != '1' || - (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { + (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) { printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " - "version %c.%c.\n", extp->MajorVersion, - extp->MinorVersion); + "version %c.%c (%#02x/%#02x).\n", + extp->MajorVersion, extp->MinorVersion, + extp->MajorVersion, extp->MinorVersion); kfree(extp); kfree(mtd); return NULL; } + printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", + extp->MajorVersion, extp->MinorVersion); + /* Install our own private info structure */ cfi->cmdset_priv = extp; diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index e54e8c169d76..314af1f5a370 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c @@ -33,7 +33,6 @@ #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> #include <linux/mtd/mtd.h> -#include <linux/mtd/compatmac.h> static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c index b2acd32f4fbf..8f5b96aa87a0 100644 --- a/drivers/mtd/chips/cfi_probe.c +++ b/drivers/mtd/chips/cfi_probe.c @@ -235,9 +235,9 @@ static int __xipram cfi_chip_setup(struct map_info *map, cfi_qry_mode_off(base, map, cfi); xip_allowed(base, map); - printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", + printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n", map->name, cfi->interleave, cfi->device_type*8, base, - map->bankwidth*8); + map->bankwidth*8, cfi->mfr, cfi->id); return 1; } diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index d7c2c672757e..e503b2ca894d 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c @@ -22,7 +22,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> -#include <linux/mtd/compatmac.h> int __xipram cfi_qry_present(struct map_info *map, __u32 base, struct cfi_private *cfi) diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c index c85760968227..da1f96f385c7 100644 --- a/drivers/mtd/chips/chipreg.c +++ b/drivers/mtd/chips/chipreg.c @@ -10,7 +10,6 @@ #include <linux/slab.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> -#include <linux/mtd/compatmac.h> static DEFINE_SPINLOCK(chip_drvs_lock); static LIST_HEAD(chip_drvs_list); diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c index 494d30d0631a..f2b872946871 100644 --- a/drivers/mtd/chips/map_absent.c +++ b/drivers/mtd/chips/map_absent.c @@ -25,7 +25,6 @@ #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> -#include <linux/mtd/compatmac.h> static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *); diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c index 6bdc50c727e7..67640ccb2d41 100644 --- a/drivers/mtd/chips/map_ram.c +++ b/drivers/mtd/chips/map_ram.c @@ -13,7 +13,6 @@ #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> -#include <linux/mtd/compatmac.h> static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c index 076090a67b90..593f73d480d2 100644 --- a/drivers/mtd/chips/map_rom.c +++ b/drivers/mtd/chips/map_rom.c @@ -13,7 +13,6 @@ #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> -#include <linux/mtd/compatmac.h> static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *); diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 1479da6d3aa6..e790f38893b0 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c @@ -1,7 +1,22 @@ /* * Read flash partition table from command line * - * Copyright 2002 SYSGO Real-Time Solutions GmbH + * Copyright © 2002 SYSGO Real-Time Solutions GmbH + * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * The format for the command line is as follows: * diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c index a19cda52da5c..a99838bb2dc0 100644 --- a/drivers/mtd/devices/docecc.c +++ b/drivers/mtd/devices/docecc.c @@ -31,7 +31,6 @@ #include <linux/init.h> #include <linux/types.h> -#include <linux/mtd/compatmac.h> /* for min() in older kernels */ #include <linux/mtd/mtd.h> #include <linux/mtd/doc2000.h> diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c index 6e62922942b1..d374603493a7 100644 --- a/drivers/mtd/devices/docprobe.c +++ b/drivers/mtd/devices/docprobe.c @@ -49,7 +49,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/doc2000.h> -#include <linux/mtd/compatmac.h> /* Where to look for the devices? */ #ifndef CONFIG_MTD_DOCPROBE_ADDRESS diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 81e49a9b017e..f90941a785e4 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -16,6 +16,8 @@ */ #include <linux/init.h> +#include <linux/err.h> +#include <linux/errno.h> #include <linux/module.h> #include <linux/device.h> #include <linux/interrupt.h> @@ -639,8 +641,18 @@ static const struct spi_device_id m25p_ids[] = { { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, { "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, + /* EON -- en25pxx */ + { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, + { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, + + /* Intel/Numonyx -- xxxs33b */ + { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, + { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, + { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, + /* Macronix */ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, + { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) }, { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, @@ -680,6 +692,16 @@ static const struct spi_device_id m25p_ids[] = { { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, + { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, + { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, + { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) }, + { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) }, + { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) }, + { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) }, + { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) }, + { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) }, + { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) }, + { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) }, { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, @@ -694,6 +716,7 @@ static const struct spi_device_id m25p_ids[] = { { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, + { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, /* Catalyst / On Semiconductor -- non-JEDEC */ @@ -723,7 +746,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi) if (tmp < 0) { DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", dev_name(&spi->dev), tmp); - return NULL; + return ERR_PTR(tmp); } jedec = id[0]; jedec = jedec << 8; @@ -731,14 +754,6 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi) jedec = jedec << 8; jedec |= id[2]; - /* - * Some chips (like Numonyx M25P80) have JEDEC and non-JEDEC variants, - * which depend on technology process. Officially RDID command doesn't - * exist for non-JEDEC chips, but for compatibility they return ID 0. - */ - if (jedec == 0) - return NULL; - ext_jedec = id[3] << 8 | id[4]; for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) { @@ -749,7 +764,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi) return &m25p_ids[tmp]; } } - return NULL; + return ERR_PTR(-ENODEV); } @@ -794,9 +809,8 @@ static int __devinit m25p_probe(struct spi_device *spi) const struct spi_device_id *jid; jid = jedec_probe(spi); - if (!jid) { - dev_info(&spi->dev, "non-JEDEC variant of %s\n", - id->name); + if (IS_ERR(jid)) { + return PTR_ERR(jid); } else if (jid != id) { /* * JEDEC knows better, so overwrite platform ID. We @@ -826,11 +840,12 @@ static int __devinit m25p_probe(struct spi_device *spi) dev_set_drvdata(&spi->dev, flash); /* - * Atmel and SST serial flash tend to power + * Atmel, SST and Intel/Numonyx serial flash tend to power * up with the software protection bits set */ if (info->jedec_id >> 16 == 0x1f || + info->jedec_id >> 16 == 0x89 || info->jedec_id >> 16 == 0xbf) { write_enable(flash); write_sr(flash, 0); diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 19817404ce7d..c5015cc721d5 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -141,7 +141,7 @@ static int dataflash_waitready(struct spi_device *spi) */ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) { - struct dataflash *priv = (struct dataflash *)mtd->priv; + struct dataflash *priv = mtd->priv; struct spi_device *spi = priv->spi; struct spi_transfer x = { .tx_dma = 0, }; struct spi_message msg; @@ -231,7 +231,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { - struct dataflash *priv = (struct dataflash *)mtd->priv; + struct dataflash *priv = mtd->priv; struct spi_transfer x[2] = { { .tx_dma = 0, }, }; struct spi_message msg; unsigned int addr; @@ -304,7 +304,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf) { - struct dataflash *priv = (struct dataflash *)mtd->priv; + struct dataflash *priv = mtd->priv; struct spi_device *spi = priv->spi; struct spi_transfer x[2] = { { .tx_dma = 0, }, }; struct spi_message msg; @@ -515,7 +515,7 @@ static ssize_t otp_read(struct spi_device *spi, unsigned base, static int dataflash_read_fact_otp(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { - struct dataflash *priv = (struct dataflash *)mtd->priv; + struct dataflash *priv = mtd->priv; int status; /* 64 bytes, from 0..63 ... start at 64 on-chip */ @@ -532,7 +532,7 @@ static int dataflash_read_fact_otp(struct mtd_info *mtd, static int dataflash_read_user_otp(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { - struct dataflash *priv = (struct dataflash *)mtd->priv; + struct dataflash *priv = mtd->priv; int status; /* 64 bytes, from 0..63 ... start at 0 on-chip */ @@ -553,7 +553,7 @@ static int dataflash_write_user_otp(struct mtd_info *mtd, const size_t l = 4 + 64; uint8_t *scratch; struct spi_transfer t; - struct dataflash *priv = (struct dataflash *)mtd->priv; + struct dataflash *priv = mtd->priv; int status; if (len > 64) diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index fce5ff7589aa..26a6e809013d 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c @@ -14,7 +14,6 @@ #include <linux/ioport.h> #include <linux/vmalloc.h> #include <linux/init.h> -#include <linux/mtd/compatmac.h> #include <linux/mtd/mtd.h> #include <linux/mtd/mtdram.h> diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c index fc8ea0a57ac2..ef0aba0ce58f 100644 --- a/drivers/mtd/devices/pmc551.c +++ b/drivers/mtd/devices/pmc551.c @@ -98,7 +98,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/pmc551.h> -#include <linux/mtd/compatmac.h> static struct mtd_info *pmc551list; diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c index ab5d8cd02a15..684247a8a5ed 100644 --- a/drivers/mtd/devices/sst25l.c +++ b/drivers/mtd/devices/sst25l.c @@ -454,7 +454,7 @@ static int __init sst25l_probe(struct spi_device *spi) parts, nr_parts); } - } else if (data->nr_parts) { + } else if (data && data->nr_parts) { dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", data->nr_parts, data->name); } diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index 62da9eb7032b..4d6a64c387ec 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c @@ -26,7 +26,7 @@ The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds - are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + are Copyright © 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 015a7fe1b6ee..d7592e67d048 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c @@ -1,11 +1,11 @@ /* * inftlcore.c -- Linux driver for Inverse Flash Translation Layer (INFTL) * - * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) + * Copyright © 2002, Greg Ungerer (gerg@snapgear.com) * * Based heavily on the nftlcore.c code which is: - * (c) 1999 Machine Vision Holdings, Inc. - * Author: David Woodhouse <dwmw2@infradead.org> + * Copyright © 1999 Machine Vision Holdings, Inc. + * Copyright © 1999 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c index 8f988d7d3c5c..104052e774b0 100644 --- a/drivers/mtd/inftlmount.c +++ b/drivers/mtd/inftlmount.c @@ -2,11 +2,11 @@ * inftlmount.c -- INFTL mount code with extensive checks. * * Author: Greg Ungerer (gerg@snapgear.com) - * (C) Copyright 2002-2003, Greg Ungerer (gerg@snapgear.com) + * Copyright © 2002-2003, Greg Ungerer (gerg@snapgear.com) * * Based heavily on the nftlmount.c code which is: * Author: Fabrice Bellard (fabrice.bellard@netgem.com) - * Copyright (C) 2000 Netgem S.A. + * Copyright © 2000 Netgem S.A. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,7 +34,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nftl.h> #include <linux/mtd/inftl.h> -#include <linux/mtd/compatmac.h> /* * find_boot_record: Find the INFTL Media Header and its Spare copy which diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c index fece5be58715..04fdfcca93f7 100644 --- a/drivers/mtd/lpddr/lpddr_cmds.c +++ b/drivers/mtd/lpddr/lpddr_cmds.c @@ -98,7 +98,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum; for (i = 0; i < numchips; i++) { shared[i].writing = shared[i].erasing = NULL; - spin_lock_init(&shared[i].lock); + mutex_init(&shared[i].lock); for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) { *chip = lpddr->chips[i]; chip->start += j << lpddr->chipshift; @@ -217,7 +217,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) */ struct flchip_shared *shared = chip->priv; struct flchip *contender; - spin_lock(&shared->lock); + mutex_lock(&shared->lock); contender = shared->writing; if (contender && contender != chip) { /* @@ -230,7 +230,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) * get_chip returns success we're clear to go ahead. */ ret = mutex_trylock(&contender->mutex); - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); if (!ret) goto retry; mutex_unlock(&chip->mutex); @@ -245,7 +245,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) mutex_unlock(&contender->mutex); return ret; } - spin_lock(&shared->lock); + mutex_lock(&shared->lock); /* We should not own chip if it is already in FL_SYNCING * state. Put contender and retry. */ @@ -261,7 +261,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) Must sleep in such a case. */ if (mode == FL_ERASING && shared->erasing && shared->erasing->oldstate == FL_ERASING) { - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); @@ -275,7 +275,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) shared->writing = chip; if (mode == FL_ERASING) shared->erasing = chip; - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); } ret = chip_ready(map, chip, mode); @@ -348,7 +348,7 @@ static void put_chip(struct map_info *map, struct flchip *chip) { if (chip->priv) { struct flchip_shared *shared = chip->priv; - spin_lock(&shared->lock); + mutex_lock(&shared->lock); if (shared->writing == chip && chip->oldstate == FL_READY) { /* We own the ability to write, but we're done */ shared->writing = shared->erasing; @@ -356,7 +356,7 @@ static void put_chip(struct map_info *map, struct flchip *chip) /* give back the ownership */ struct flchip *loaner = shared->writing; mutex_lock(&loaner->mutex); - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); mutex_unlock(&chip->mutex); put_chip(map, loaner); mutex_lock(&chip->mutex); @@ -374,11 +374,11 @@ static void put_chip(struct map_info *map, struct flchip *chip) * Don't let the switch below mess things up since * we don't have ownership to resume anything. */ - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); wake_up(&chip->wq); return; } - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); } switch (chip->oldstate) { diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 6629d09f3b38..701d942c6795 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -319,14 +319,6 @@ config MTD_CFI_FLAGADM Mapping for the Flaga digital module. If you don't have one, ignore this setting. -config MTD_REDWOOD - tristate "CFI Flash devices mapped on IBM Redwood" - depends on MTD_CFI - help - This enables access routines for the flash chips on the IBM - Redwood board. If you have one of these boards and would like to - use the flash chips on it, say 'Y'. - config MTD_SOLUTIONENGINE tristate "CFI Flash device mapped on Hitachi SolutionEngine" depends on SUPERH && SOLUTION_ENGINE && MTD_CFI && MTD_REDBOOT_PARTS diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index bb035cd54c72..f216bb573713 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -44,7 +44,6 @@ obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o obj-$(CONFIG_MTD_EDB7312) += edb7312.o obj-$(CONFIG_MTD_IMPA7) += impa7.o obj-$(CONFIG_MTD_FORTUNET) += fortunet.o -obj-$(CONFIG_MTD_REDWOOD) += redwood.o obj-$(CONFIG_MTD_UCLINUX) += uclinux.o obj-$(CONFIG_MTD_NETtel) += nettel.o obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index e0a5e0426ead..1f9fde0dad35 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c @@ -118,7 +118,7 @@ static void ixp4xx_copy_from(struct map_info *map, void *to, *dest++ = BYTE1(data); src += 2; len -= 2; - } + } if (len > 0) *dest++ = BYTE0(flash_read16(src)); @@ -185,6 +185,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev) { struct flash_platform_data *plat = dev->dev.platform_data; struct ixp4xx_flash_info *info; + const char *part_type = NULL; + int nr_parts = 0; int err = -1; if (!plat) @@ -218,9 +220,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev) */ info->map.bankwidth = 2; info->map.name = dev_name(&dev->dev); - info->map.read = ixp4xx_read16, - info->map.write = ixp4xx_probe_write16, - info->map.copy_from = ixp4xx_copy_from, + info->map.read = ixp4xx_read16; + info->map.write = ixp4xx_probe_write16; + info->map.copy_from = ixp4xx_copy_from; info->res = request_mem_region(dev->resource->start, resource_size(dev->resource), @@ -248,11 +250,28 @@ static int ixp4xx_flash_probe(struct platform_device *dev) info->mtd->owner = THIS_MODULE; /* Use the fast version */ - info->map.write = ixp4xx_write16, + info->map.write = ixp4xx_write16; + +#ifdef CONFIG_MTD_PARTITIONS + nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions, + dev->resource->start); +#endif + if (nr_parts > 0) { + part_type = "dynamic"; + } else { + info->partitions = plat->parts; + nr_parts = plat->nr_parts; + part_type = "static"; + } + if (nr_parts == 0) { + printk(KERN_NOTICE "IXP4xx flash: no partition info " + "available, registering whole flash\n"); + err = add_mtd_device(info->mtd); + } else { + printk(KERN_NOTICE "IXP4xx flash: using %s partition " + "definition\n", part_type); + err = add_mtd_partitions(info->mtd, info->partitions, nr_parts); - err = parse_mtd_partitions(info->mtd, probes, &info->partitions, dev->resource->start); - if (err > 0) { - err = add_mtd_partitions(info->mtd, info->partitions, err); if(err) printk(KERN_ERR "Could not parse partitions\n"); } diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 426461a5f0d4..4c18b98a3110 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c @@ -106,12 +106,12 @@ static int physmap_flash_probe(struct platform_device *dev) for (i = 0; i < dev->num_resources; i++) { printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n", - (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1), + (unsigned long long)resource_size(&dev->resource[i]), (unsigned long long)dev->resource[i].start); if (!devm_request_mem_region(&dev->dev, dev->resource[i].start, - dev->resource[i].end - dev->resource[i].start + 1, + resource_size(&dev->resource[i]), dev_name(&dev->dev))) { dev_err(&dev->dev, "Could not reserve memory region\n"); err = -ENOMEM; @@ -120,7 +120,7 @@ static int physmap_flash_probe(struct platform_device *dev) info->map[i].name = dev_name(&dev->dev); info->map[i].phys = dev->resource[i].start; - info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1; + info->map[i].size = resource_size(&dev->resource[i]); info->map[i].bankwidth = physmap_data->width; info->map[i].set_vpp = physmap_data->set_vpp; info->map[i].pfow_base = physmap_data->pfow_base; @@ -136,8 +136,12 @@ static int physmap_flash_probe(struct platform_device *dev) simple_map_init(&info->map[i]); probe_type = rom_probe_types; - for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++) - info->mtd[i] = do_map_probe(*probe_type, &info->map[i]); + if (physmap_data->probe_type == NULL) { + for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++) + info->mtd[i] = do_map_probe(*probe_type, &info->map[i]); + } else + info->mtd[i] = do_map_probe(physmap_data->probe_type, &info->map[i]); + if (info->mtd[i] == NULL) { dev_err(&dev->dev, "map_probe failed\n"); err = -ENXIO; diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index ba124baa646d..6ac5f9f28ac3 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -353,7 +353,7 @@ static int __devinit of_flash_probe(struct of_device *dev, &info->parts, 0); if (err < 0) { of_free_probes(part_probe_types); - return err; + goto err_out; } of_free_probes(part_probe_types); @@ -361,14 +361,14 @@ static int __devinit of_flash_probe(struct of_device *dev, if (err == 0) { err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts); if (err < 0) - return err; + goto err_out; } #endif if (err == 0) { err = parse_obsolete_partitions(dev, info, dp); if (err < 0) - return err; + goto err_out; } if (err > 0) diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c deleted file mode 100644 index d2c9db00db0c..000000000000 --- a/drivers/mtd/maps/redwood.c +++ /dev/null @@ -1,131 +0,0 @@ -/* - * drivers/mtd/maps/redwood.c - * - * FLASH map for the IBM Redwood 4/5/6 boards. - * - * Author: MontaVista Software, Inc. <source@mvista.com> - * - * 2001-2003 (c) MontaVista, Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/init.h> - -#include <linux/mtd/mtd.h> -#include <linux/mtd/map.h> -#include <linux/mtd/partitions.h> - -#include <asm/io.h> - -#define WINDOW_ADDR 0xffc00000 -#define WINDOW_SIZE 0x00400000 - -#define RW_PART0_OF 0 -#define RW_PART0_SZ 0x10000 -#define RW_PART1_OF RW_PART0_SZ -#define RW_PART1_SZ 0x200000 - 0x10000 -#define RW_PART2_OF 0x200000 -#define RW_PART2_SZ 0x10000 -#define RW_PART3_OF 0x210000 -#define RW_PART3_SZ 0x200000 - (0x10000 + 0x20000) -#define RW_PART4_OF 0x3e0000 -#define RW_PART4_SZ 0x20000 - -static struct mtd_partition redwood_flash_partitions[] = { - { - .name = "Redwood OpenBIOS Vital Product Data", - .offset = RW_PART0_OF, - .size = RW_PART0_SZ, - .mask_flags = MTD_WRITEABLE /* force read-only */ - }, - { - .name = "Redwood kernel", - .offset = RW_PART1_OF, - .size = RW_PART1_SZ - }, - { - .name = "Redwood OpenBIOS non-volatile storage", - .offset = RW_PART2_OF, - .size = RW_PART2_SZ, - .mask_flags = MTD_WRITEABLE /* force read-only */ - }, - { - .name = "Redwood filesystem", - .offset = RW_PART3_OF, - .size = RW_PART3_SZ - }, - { - .name = "Redwood OpenBIOS", - .offset = RW_PART4_OF, - .size = RW_PART4_SZ, - .mask_flags = MTD_WRITEABLE /* force read-only */ - } -}; - -struct map_info redwood_flash_map = { - .name = "IBM Redwood", - .size = WINDOW_SIZE, - .bankwidth = 2, - .phys = WINDOW_ADDR, -}; - - -#define NUM_REDWOOD_FLASH_PARTITIONS ARRAY_SIZE(redwood_flash_partitions) - -static struct mtd_info *redwood_mtd; - -static int __init init_redwood_flash(void) -{ - int err; - - printk(KERN_NOTICE "redwood: flash mapping: %x at %x\n", - WINDOW_SIZE, WINDOW_ADDR); - - redwood_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); - - if (!redwood_flash_map.virt) { - printk("init_redwood_flash: failed to ioremap\n"); - return -EIO; - } - simple_map_init(&redwood_flash_map); - - redwood_mtd = do_map_probe("cfi_probe",&redwood_flash_map); - - if (redwood_mtd) { - redwood_mtd->owner = THIS_MODULE; - err = add_mtd_partitions(redwood_mtd, - redwood_flash_partitions, - NUM_REDWOOD_FLASH_PARTITIONS); - if (err) { - printk("init_redwood_flash: add_mtd_partitions failed\n"); - iounmap(redwood_flash_map.virt); - } - return err; - - } - - iounmap(redwood_flash_map.virt); - return -ENXIO; -} - -static void __exit cleanup_redwood_flash(void) -{ - if (redwood_mtd) { - del_mtd_partitions(redwood_mtd); - /* moved iounmap after map_destroy - armin */ - map_destroy(redwood_mtd); - iounmap((void *)redwood_flash_map.virt); - } -} - -module_init(init_redwood_flash); -module_exit(cleanup_redwood_flash); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("MontaVista Software <source@mvista.com>"); -MODULE_DESCRIPTION("MTD map driver for the IBM Redwood reference boards"); diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 03e19c1965cc..1d2144d77470 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -1,7 +1,21 @@ /* - * (C) 2003 David Woodhouse <dwmw2@infradead.org> + * Interface to Linux block layer for MTD 'translation layers'. * - * Interface to Linux 2.5 block layer for MTD 'translation layers'. + * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ @@ -245,6 +259,7 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, switch (cmd) { case BLKFLSBUF: ret = dev->tr->flush ? dev->tr->flush(dev) : 0; + break; default: ret = -ENOTTY; } @@ -409,13 +424,14 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) BUG(); } - /* Stop new requests to arrive */ - del_gendisk(old->disk); - if (old->disk_attributes) sysfs_remove_group(&disk_to_dev(old->disk)->kobj, old->disk_attributes); + /* Stop new requests to arrive */ + del_gendisk(old->disk); + + /* Stop the thread */ kthread_stop(old->thread); diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index e6edbec609fd..1e74ad961040 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c @@ -1,8 +1,23 @@ /* * Direct MTD block device access * - * (C) 2000-2003 Nicolas Pitre <nico@fluxnic.net> - * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> + * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> + * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #include <linux/fs.h> diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c index d0d3f79f9d03..795a8c0a05b8 100644 --- a/drivers/mtd/mtdblock_ro.c +++ b/drivers/mtd/mtdblock_ro.c @@ -1,7 +1,22 @@ /* - * (C) 2003 David Woodhouse <dwmw2@infradead.org> - * * Simple read-only (writable only for RAM) mtdblock driver + * + * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #include <linux/init.h> diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 91c8013cf0d9..a825002123c8 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -1,5 +1,19 @@ /* - * Character-device access to raw MTD devices. + * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ @@ -18,7 +32,7 @@ #include <linux/mount.h> #include <linux/mtd/mtd.h> -#include <linux/mtd/compatmac.h> +#include <linux/mtd/map.h> #include <asm/uaccess.h> @@ -675,6 +689,20 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) break; } + case MEMISLOCKED: + { + struct erase_info_user einfo; + + if (copy_from_user(&einfo, argp, sizeof(einfo))) + return -EFAULT; + + if (!mtd->is_locked) + ret = -EOPNOTSUPP; + else + ret = mtd->is_locked(mtd, einfo.start, einfo.length); + break; + } + /* Legacy interface */ case MEMGETOOBSEL: { @@ -950,9 +978,34 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma) #ifdef CONFIG_MMU struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; + struct map_info *map = mtd->priv; + unsigned long start; + unsigned long off; + u32 len; + + if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { + off = vma->vm_pgoff << PAGE_SHIFT; + start = map->phys; + len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); + start &= PAGE_MASK; + if ((vma->vm_end - vma->vm_start + off) > len) + return -EINVAL; + + off += start; + vma->vm_pgoff = off >> PAGE_SHIFT; + vma->vm_flags |= VM_IO | VM_RESERVED; + +#ifdef pgprot_noncached + if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +#endif + if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; - if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) return 0; + } return -ENOSYS; #else return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS; diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 7e075621bbf4..bf8de0943103 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -1,11 +1,25 @@ /* * MTD device concatenation layer * - * (C) 2002 Robert Kaiser <rkaiser@sysgo.de> + * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de> + * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org> * * NAND support by Christian Gan <cgan@iders.ca> * - * This code is GPL + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #include <linux/kernel.h> @@ -540,10 +554,12 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) else size = len; - err = subdev->lock(subdev, ofs, size); - - if (err) - break; + if (subdev->lock) { + err = subdev->lock(subdev, ofs, size); + if (err) + break; + } else + err = -EOPNOTSUPP; len -= size; if (len == 0) @@ -578,10 +594,12 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) else size = len; - err = subdev->unlock(subdev, ofs, size); - - if (err) - break; + if (subdev->unlock) { + err = subdev->unlock(subdev, ofs, size); + if (err) + break; + } else + err = -EOPNOTSUPP; len -= size; if (len == 0) diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index a1b8b70d2d0a..527cebf58da4 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -2,9 +2,23 @@ * Core registration and callback routines for MTD * drivers and users. * - * bdi bits are: - * Copyright © 2006 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) + * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> + * Copyright © 2006 Red Hat UK Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #include <linux/module.h> @@ -17,7 +31,6 @@ #include <linux/err.h> #include <linux/ioctl.h> #include <linux/init.h> -#include <linux/mtd/compatmac.h> #include <linux/proc_fs.h> #include <linux/idr.h> #include <linux/backing-dev.h> diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 328313c3dccb..1ee72f3f0512 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -1,7 +1,7 @@ /* * MTD Oops/Panic logger * - * Copyright (C) 2007 Nokia Corporation. All rights reserved. + * Copyright © 2007 Nokia Corporation. All rights reserved. * * Author: Richard Purdie <rpurdie@openedhand.com> * diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index b8043a9ba32d..dc6558568876 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -1,12 +1,24 @@ /* * Simple MTD partitioning layer * - * (C) 2000 Nicolas Pitre <nico@fluxnic.net> + * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> + * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> + * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> * - * This code is GPL + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * - * 02-21-2002 Thomas Gleixner <gleixner@autronix.de> - * added support for read_oob, write_oob */ #include <linux/module.h> @@ -17,7 +29,6 @@ #include <linux/kmod.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> -#include <linux/mtd/compatmac.h> /* Our partition linked list */ static LIST_HEAD(mtd_partitions); @@ -264,6 +275,14 @@ static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return part->master->unlock(part->master, ofs + part->offset, len); } +static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct mtd_part *part = PART(mtd); + if ((len + ofs) > mtd->size) + return -EINVAL; + return part->master->is_locked(part->master, ofs + part->offset, len); +} + static void part_sync(struct mtd_info *mtd) { struct mtd_part *part = PART(mtd); @@ -402,6 +421,8 @@ static struct mtd_part *add_one_partition(struct mtd_info *master, slave->mtd.lock = part_lock; if (master->unlock) slave->mtd.unlock = part_unlock; + if (master->is_locked) + slave->mtd.is_locked = part_is_locked; if (master->block_isbad) slave->mtd.block_isbad = part_block_isbad; if (master->block_markbad) diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index bd9a443ccf69..38e2ab07e7a3 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c @@ -1,6 +1,8 @@ /* MTD-based superblock management * * Copyright © 2001-2007 Red Hat, Inc. All Rights Reserved. + * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> + * * Written by: David Howells <dhowells@redhat.com> * David Woodhouse <dwmw2@infradead.org> * diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 362d177efe1b..8b4b67c8a391 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -37,7 +37,6 @@ config MTD_SM_COMMON config MTD_NAND_MUSEUM_IDS bool "Enable chip ids for obsolete ancient NAND devices" - depends on MTD_NAND default n help Enable this option only when your board has first generation @@ -61,6 +60,7 @@ config MTD_NAND_DENALI config MTD_NAND_DENALI_SCRATCH_REG_ADDR hex "Denali NAND size scratch register address" default "0xFF108018" + depends on MTD_NAND_DENALI help Some platforms place the NAND chip size in a scratch register because (some versions of) the driver aren't able to automatically @@ -101,13 +101,13 @@ config MTD_NAND_AMS_DELTA config MTD_NAND_OMAP2 tristate "NAND Flash device on OMAP2 and OMAP3" - depends on ARM && MTD_NAND && (ARCH_OMAP2 || ARCH_OMAP3) + depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3) help Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. config MTD_NAND_OMAP_PREFETCH bool "GPMC prefetch support for NAND Flash device" - depends on MTD_NAND && MTD_NAND_OMAP2 + depends on MTD_NAND_OMAP2 default y help The NAND device can be accessed for Read/Write using GPMC PREFETCH engine @@ -146,7 +146,7 @@ config MTD_NAND_AU1550 config MTD_NAND_BF5XX tristate "Blackfin on-chip NAND Flash Controller driver" - depends on (BF54x || BF52x) && MTD_NAND + depends on BF54x || BF52x help This enables the Blackfin on-chip NAND flash controller @@ -236,7 +236,7 @@ config MTD_NAND_S3C2410_CLKSTOP config MTD_NAND_BCM_UMI tristate "NAND Flash support for BCM Reference Boards" - depends on ARCH_BCMRING && MTD_NAND + depends on ARCH_BCMRING help This enables the NAND flash controller on the BCM UMI block. @@ -395,7 +395,7 @@ endchoice config MTD_NAND_PXA3xx tristate "Support for NAND flash devices on PXA3xx" - depends on MTD_NAND && (PXA3xx || ARCH_MMP) + depends on PXA3xx || ARCH_MMP help This enables the driver for the NAND flash device found on PXA3xx processors @@ -409,18 +409,18 @@ config MTD_NAND_PXA3xx_BUILTIN config MTD_NAND_CM_X270 tristate "Support for NAND Flash on CM-X270 modules" - depends on MTD_NAND && MACH_ARMCORE + depends on MACH_ARMCORE config MTD_NAND_PASEMI tristate "NAND support for PA Semi PWRficient" - depends on MTD_NAND && PPC_PASEMI + depends on PPC_PASEMI help Enables support for NAND Flash interface on PA Semi PWRficient based boards config MTD_NAND_TMIO tristate "NAND Flash device on Toshiba Mobile IO Controller" - depends on MTD_NAND && MFD_TMIO + depends on MFD_TMIO help Support for NAND flash connected to a Toshiba Mobile IO Controller in some PDAs, including the Sharp SL6000x. @@ -434,7 +434,6 @@ config MTD_NAND_NANDSIM config MTD_NAND_PLATFORM tristate "Support for generic platform NAND driver" - depends on MTD_NAND help This implements a generic NAND driver for on-SOC platform devices. You will need to provide platform-specific functions @@ -442,14 +441,14 @@ config MTD_NAND_PLATFORM config MTD_ALAUDA tristate "MTD driver for Olympus MAUSB-10 and Fujifilm DPC-R1" - depends on MTD_NAND && USB + depends on USB help These two (and possibly other) Alauda-based cardreaders for SmartMedia and xD allow raw flash access. config MTD_NAND_ORION tristate "NAND Flash support for Marvell Orion SoC" - depends on PLAT_ORION && MTD_NAND + depends on PLAT_ORION help This enables the NAND flash controller on Orion machines. @@ -458,7 +457,7 @@ config MTD_NAND_ORION config MTD_NAND_FSL_ELBC tristate "NAND support for Freescale eLBC controllers" - depends on MTD_NAND && PPC_OF + depends on PPC_OF help Various Freescale chips, including the 8313, include a NAND Flash Controller Module with built-in hardware ECC capabilities. @@ -467,7 +466,7 @@ config MTD_NAND_FSL_ELBC config MTD_NAND_FSL_UPM tristate "Support for NAND on Freescale UPM" - depends on MTD_NAND && (PPC_83xx || PPC_85xx) + depends on PPC_83xx || PPC_85xx select FSL_LBC help Enables support for NAND Flash chips wired onto Freescale PowerPC @@ -482,7 +481,7 @@ config MTD_NAND_MPC5121_NFC config MTD_NAND_MXC tristate "MXC NAND support" - depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 + depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX51 help This enables the driver for the NAND flash controller on the MXC processors. @@ -495,7 +494,7 @@ config MTD_NAND_NOMADIK config MTD_NAND_SH_FLCTL tristate "Support for NAND on Renesas SuperH FLCTL" - depends on MTD_NAND && (SUPERH || ARCH_SHMOBILE) + depends on SUPERH || ARCH_SHMOBILE help Several Renesas SuperH CPU has FLCTL. This option enables support for NAND Flash using FLCTL. @@ -515,7 +514,7 @@ config MTD_NAND_TXX9NDFMC config MTD_NAND_SOCRATES tristate "Support for NAND on Socrates board" - depends on MTD_NAND && SOCRATES + depends on SOCRATES help Enables support for NAND Flash chips wired onto Socrates board. diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 04d30887ca7f..ccce0f03b5dc 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -364,7 +364,7 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode) } } -#ifdef CONFIG_MTD_PARTITIONS +#ifdef CONFIG_MTD_CMDLINE_PARTS static const char *part_probes[] = { "cmdlinepart", NULL }; #endif diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 2974995e194d..a382e3dd0a5d 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -20,9 +20,6 @@ * - DMA supported in ECC_HW * - YAFFS tested as rootfs in both ECC_HW and ECC_SW * - * TODO: - * Enable JFFS2 over NAND as rootfs - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -206,7 +203,7 @@ static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd, if (ctrl & NAND_CLE) bfin_write_NFC_CMD(cmd); - else + else if (ctrl & NAND_ALE) bfin_write_NFC_ADDR(cmd); SSYNC(); } @@ -218,9 +215,9 @@ static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd, */ static int bf5xx_nand_devready(struct mtd_info *mtd) { - unsigned short val = bfin_read_NFC_IRQSTAT(); + unsigned short val = bfin_read_NFC_STAT(); - if ((val & NBUSYIRQ) == NBUSYIRQ) + if ((val & NBUSY) == NBUSY) return 1; else return 0; @@ -317,18 +314,16 @@ static int bf5xx_nand_correct_data_256(struct mtd_info *mtd, u_char *dat, static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { - struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); - struct bf5xx_nand_platform *plat = info->platform; - unsigned short page_size = (plat->page_size ? 512 : 256); + struct nand_chip *chip = mtd->priv; int ret; ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc); - /* If page size is 512, correct second 256 bytes */ - if (page_size == 512) { + /* If ecc size is 512, correct second 256 bytes */ + if (chip->ecc.size == 512) { dat += 256; - read_ecc += 8; - calc_ecc += 8; + read_ecc += 3; + calc_ecc += 3; ret |= bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc); } @@ -344,13 +339,12 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); - struct bf5xx_nand_platform *plat = info->platform; - u16 page_size = (plat->page_size ? 512 : 256); + struct nand_chip *chip = mtd->priv; u16 ecc0, ecc1; u32 code[2]; u8 *p; - /* first 4 bytes ECC code for 256 page size */ + /* first 3 bytes ECC code for 256 page size */ ecc0 = bfin_read_NFC_ECC0(); ecc1 = bfin_read_NFC_ECC1(); @@ -358,12 +352,11 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd, dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]); - /* first 3 bytes in ecc_code for 256 page size */ p = (u8 *) code; memcpy(ecc_code, p, 3); - /* second 4 bytes ECC code for 512 page size */ - if (page_size == 512) { + /* second 3 bytes ECC code for 512 ecc size */ + if (chip->ecc.size == 512) { ecc0 = bfin_read_NFC_ECC2(); ecc1 = bfin_read_NFC_ECC3(); code[1] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11); @@ -483,8 +476,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, uint8_t *buf, int is_read) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); - struct bf5xx_nand_platform *plat = info->platform; - unsigned short page_size = (plat->page_size ? 512 : 256); + struct nand_chip *chip = mtd->priv; unsigned short val; dev_dbg(info->device, " mtd->%p, buf->%p, is_read %d\n", @@ -498,10 +490,10 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, */ if (is_read) invalidate_dcache_range((unsigned int)buf, - (unsigned int)(buf + page_size)); + (unsigned int)(buf + chip->ecc.size)); else flush_dcache_range((unsigned int)buf, - (unsigned int)(buf + page_size)); + (unsigned int)(buf + chip->ecc.size)); /* * This register must be written before each page is @@ -510,6 +502,8 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, */ bfin_write_NFC_RST(ECC_RST); SSYNC(); + while (bfin_read_NFC_RST() & ECC_RST) + cpu_relax(); disable_dma(CH_NFC); clear_dma_irqstat(CH_NFC); @@ -520,13 +514,13 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd, /* The DMAs have different size on BF52x and BF54x */ #ifdef CONFIG_BF52x - set_dma_x_count(CH_NFC, (page_size >> 1)); + set_dma_x_count(CH_NFC, (chip->ecc.size >> 1)); set_dma_x_modify(CH_NFC, 2); val = DI_EN | WDSIZE_16; #endif #ifdef CONFIG_BF54x - set_dma_x_count(CH_NFC, (page_size >> 2)); + set_dma_x_count(CH_NFC, (chip->ecc.size >> 2)); set_dma_x_modify(CH_NFC, 4); val = DI_EN | WDSIZE_32; #endif @@ -548,12 +542,11 @@ static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); - struct bf5xx_nand_platform *plat = info->platform; - unsigned short page_size = (plat->page_size ? 512 : 256); + struct nand_chip *chip = mtd->priv; dev_dbg(info->device, "mtd->%p, buf->%p, int %d\n", mtd, buf, len); - if (len == page_size) + if (len == chip->ecc.size) bf5xx_nand_dma_rw(mtd, buf, 1); else bf5xx_nand_read_buf(mtd, buf, len); @@ -563,17 +556,32 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); - struct bf5xx_nand_platform *plat = info->platform; - unsigned short page_size = (plat->page_size ? 512 : 256); + struct nand_chip *chip = mtd->priv; dev_dbg(info->device, "mtd->%p, buf->%p, len %d\n", mtd, buf, len); - if (len == page_size) + if (len == chip->ecc.size) bf5xx_nand_dma_rw(mtd, (uint8_t *)buf, 0); else bf5xx_nand_write_buf(mtd, buf, len); } +static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int page) +{ + bf5xx_nand_read_buf(mtd, buf, mtd->writesize); + bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + +static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf) +{ + bf5xx_nand_write_buf(mtd, buf, mtd->writesize); + bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); +} + /* * System initialization functions */ @@ -627,15 +635,14 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info) /* setup NFC_CTL register */ dev_info(info->device, - "page_size=%d, data_width=%d, wr_dly=%d, rd_dly=%d\n", - (plat->page_size ? 512 : 256), + "data_width=%d, wr_dly=%d, rd_dly=%d\n", (plat->data_width ? 16 : 8), plat->wr_dly, plat->rd_dly); - val = (plat->page_size << NFC_PG_SIZE_OFFSET) | + val = (1 << NFC_PG_SIZE_OFFSET) | (plat->data_width << NFC_NWIDTH_OFFSET) | (plat->rd_dly << NFC_RDDLY_OFFSET) | - (plat->rd_dly << NFC_WRDLY_OFFSET); + (plat->wr_dly << NFC_WRDLY_OFFSET); dev_dbg(info->device, "NFC_CTL is 0x%04x\n", val); bfin_write_NFC_CTL(val); @@ -698,6 +705,33 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev) return 0; } +static int bf5xx_nand_scan(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + int ret; + + ret = nand_scan_ident(mtd, 1); + if (ret) + return ret; + + if (hardware_ecc) { + /* + * for nand with page size > 512B, think it as several sections with 512B + */ + if (likely(mtd->writesize >= 512)) { + chip->ecc.size = 512; + chip->ecc.bytes = 6; + } else { + chip->ecc.size = 256; + chip->ecc.bytes = 3; + bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET)); + SSYNC(); + } + } + + return nand_scan_tail(mtd); +} + /* * bf5xx_nand_probe * @@ -783,27 +817,20 @@ static int __devinit bf5xx_nand_probe(struct platform_device *pdev) chip->badblock_pattern = &bootrom_bbt; chip->ecc.layout = &bootrom_ecclayout; #endif - - if (plat->page_size == NFC_PG_SIZE_256) { - chip->ecc.bytes = 3; - chip->ecc.size = 256; - } else if (plat->page_size == NFC_PG_SIZE_512) { - chip->ecc.bytes = 6; - chip->ecc.size = 512; - } - chip->read_buf = bf5xx_nand_dma_read_buf; chip->write_buf = bf5xx_nand_dma_write_buf; chip->ecc.calculate = bf5xx_nand_calculate_ecc; chip->ecc.correct = bf5xx_nand_correct_data; chip->ecc.mode = NAND_ECC_HW; chip->ecc.hwctl = bf5xx_nand_enable_hwecc; + chip->ecc.read_page_raw = bf5xx_nand_read_page_raw; + chip->ecc.write_page_raw = bf5xx_nand_write_page_raw; } else { chip->ecc.mode = NAND_ECC_SOFT; } /* scan hardware nand chip and setup mtd info data struct */ - if (nand_scan(mtd, 1)) { + if (bf5xx_nand_scan(mtd)) { err = -ENXIO; goto out_err_nand_scan; } diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 9c9d893affeb..2ac7367afe77 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -311,7 +311,9 @@ static int nand_davinci_correct_4bit(struct mtd_info *mtd, unsigned short ecc10[8]; unsigned short *ecc16; u32 syndrome[4]; + u32 ecc_state; unsigned num_errors, corrected; + unsigned long timeo = jiffies + msecs_to_jiffies(100); /* All bytes 0xff? It's an erased page; ignore its ECC. */ for (i = 0; i < 10; i++) { @@ -361,6 +363,21 @@ compare: */ davinci_nand_writel(info, NANDFCR_OFFSET, davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13)); + + /* + * ECC_STATE field reads 0x3 (Error correction complete) immediately + * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately + * begin trying to poll for the state, you may fall right out of your + * loop without any of the correction calculations having taken place. + * The recommendation from the hardware team is to wait till ECC_STATE + * reads less than 4, which means ECC HW has entered correction state. + */ + do { + ecc_state = (davinci_nand_readl(info, + NANDFSR_OFFSET) >> 8) & 0x0f; + cpu_relax(); + } while ((ecc_state < 4) && time_before(jiffies, timeo)); + for (;;) { u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET); diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 3dfda9cc677d..618fb42b86b0 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -21,6 +21,7 @@ #include <linux/delay.h> #include <linux/wait.h> #include <linux/mutex.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/mtd/mtd.h> #include <linux/module.h> @@ -29,15 +30,15 @@ MODULE_LICENSE("GPL"); -/* We define a module parameter that allows the user to override +/* We define a module parameter that allows the user to override * the hardware and decide what timing mode should be used. */ #define NAND_DEFAULT_TIMINGS -1 static int onfi_timing_mode = NAND_DEFAULT_TIMINGS; module_param(onfi_timing_mode, int, S_IRUGO); -MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates" - " use default timings"); +MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting." + " -1 indicates use default timings"); #define DENALI_NAND_NAME "denali-nand" @@ -54,13 +55,13 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates INTR_STATUS0__RST_COMP | \ INTR_STATUS0__ERASE_COMP) -/* indicates whether or not the internal value for the flash bank is +/* indicates whether or not the internal value for the flash bank is valid or not */ -#define CHIP_SELECT_INVALID -1 +#define CHIP_SELECT_INVALID -1 #define SUPPORT_8BITECC 1 -/* This macro divides two integers and rounds fractional values up +/* This macro divides two integers and rounds fractional values up * to the nearest integer value. */ #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y))) @@ -83,7 +84,7 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates #define ADDR_CYCLE 1 #define STATUS_CYCLE 2 -/* this is a helper macro that allows us to +/* this is a helper macro that allows us to * format the bank into the proper bits for the controller */ #define BANK(x) ((x) << 24) @@ -95,59 +96,64 @@ static const struct pci_device_id denali_pci_ids[] = { }; -/* these are static lookup tables that give us easy access to - registers in the NAND controller. +/* these are static lookup tables that give us easy access to + registers in the NAND controller. */ -static const uint32_t intr_status_addresses[4] = {INTR_STATUS0, - INTR_STATUS1, - INTR_STATUS2, +static const uint32_t intr_status_addresses[4] = {INTR_STATUS0, + INTR_STATUS1, + INTR_STATUS2, INTR_STATUS3}; static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0, - DEVICE_RESET__BANK1, - DEVICE_RESET__BANK2, - DEVICE_RESET__BANK3}; + DEVICE_RESET__BANK1, + DEVICE_RESET__BANK2, + DEVICE_RESET__BANK3}; static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT, - INTR_STATUS1__TIME_OUT, - INTR_STATUS2__TIME_OUT, - INTR_STATUS3__TIME_OUT}; + INTR_STATUS1__TIME_OUT, + INTR_STATUS2__TIME_OUT, + INTR_STATUS3__TIME_OUT}; static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP, - INTR_STATUS1__RST_COMP, - INTR_STATUS2__RST_COMP, - INTR_STATUS3__RST_COMP}; + INTR_STATUS1__RST_COMP, + INTR_STATUS2__RST_COMP, + INTR_STATUS3__RST_COMP}; /* specifies the debug level of the driver */ -static int nand_debug_level = 0; +static int nand_debug_level; /* forward declarations */ static void clear_interrupts(struct denali_nand_info *denali); -static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask); -static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask); +static uint32_t wait_for_irq(struct denali_nand_info *denali, + uint32_t irq_mask); +static void denali_irq_enable(struct denali_nand_info *denali, + uint32_t int_mask); static uint32_t read_interrupt_status(struct denali_nand_info *denali); #define DEBUG_DENALI 0 /* This is a wrapper for writing to the denali registers. * this allows us to create debug information so we can - * observe how the driver is programming the device. + * observe how the driver is programming the device. * it uses standard linux convention for (val, addr) */ static void denali_write32(uint32_t value, void *addr) { - iowrite32(value, addr); + iowrite32(value, addr); #if DEBUG_DENALI - printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff)); + printk(KERN_INFO "wrote: 0x%x -> 0x%x\n", value, + (uint32_t)((uint32_t)addr & 0x1fff)); #endif -} +} -/* Certain operations for the denali NAND controller use an indexed mode to read/write - data. The operation is performed by writing the address value of the command to - the device memory followed by the data. This function abstracts this common - operation. +/* Certain operations for the denali NAND controller use + * an indexed mode to read/write data. The operation is + * performed by writing the address value of the command + * to the device memory followed by the data. This function + * abstracts this common operation. */ -static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data) +static void index_addr(struct denali_nand_info *denali, + uint32_t address, uint32_t data) { denali_write32(address, denali->flash_mem); denali_write32(data, denali->flash_mem + 0x10); @@ -161,7 +167,7 @@ static void index_addr_read_data(struct denali_nand_info *denali, *pdata = ioread32(denali->flash_mem + 0x10); } -/* We need to buffer some data for some of the NAND core routines. +/* We need to buffer some data for some of the NAND core routines. * The operations manage buffering that data. */ static void reset_buf(struct denali_nand_info *denali) { @@ -183,7 +189,7 @@ static void read_status(struct denali_nand_info *denali) reset_buf(denali); /* initiate a device status read */ - cmd = MODE_11 | BANK(denali->flash_bank); + cmd = MODE_11 | BANK(denali->flash_bank); index_addr(denali, cmd | COMMAND_CYCLE, 0x70); denali_write32(cmd | STATUS_CYCLE, denali->flash_mem); @@ -191,7 +197,8 @@ static void read_status(struct denali_nand_info *denali) write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10)); #if DEBUG_DENALI - printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]); + printk(KERN_INFO "device reporting status value of 0x%2x\n", + denali->buf.buf[0]); #endif } @@ -199,7 +206,7 @@ static void read_status(struct denali_nand_info *denali) static void reset_bank(struct denali_nand_info *denali) { uint32_t irq_status = 0; - uint32_t irq_mask = reset_complete[denali->flash_bank] | + uint32_t irq_mask = reset_complete[denali->flash_bank] | operation_timeout[denali->flash_bank]; int bank = 0; @@ -209,15 +216,13 @@ static void reset_bank(struct denali_nand_info *denali) denali_write32(bank, denali->flash_reg + DEVICE_RESET); irq_status = wait_for_irq(denali, irq_mask); - + if (irq_status & operation_timeout[denali->flash_bank]) - { printk(KERN_ERR "reset bank failed.\n"); - } } /* Reset the flash controller */ -static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali) +static uint16_t denali_nand_reset(struct denali_nand_info *denali) { uint32_t i; @@ -229,8 +234,10 @@ static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali) denali->flash_reg + intr_status_addresses[i]); for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) { - denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET); - while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) & + denali_write32(device_reset_banks[i], + denali->flash_reg + DEVICE_RESET); + while (!(ioread32(denali->flash_reg + + intr_status_addresses[i]) & (reset_complete[i] | operation_timeout[i]))) ; if (ioread32(denali->flash_reg + intr_status_addresses[i]) & @@ -246,11 +253,12 @@ static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali) return PASS; } -/* this routine calculates the ONFI timing values for a given mode and programs - * the clocking register accordingly. The mode is determined by the get_onfi_nand_para - routine. +/* this routine calculates the ONFI timing values for a given mode and + * programs the clocking register accordingly. The mode is determined by + * the get_onfi_nand_para routine. */ -static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode) +static void nand_onfi_timing_set(struct denali_nand_info *denali, + uint16_t mode) { uint16_t Trea[6] = {40, 30, 25, 20, 20, 16}; uint16_t Trp[6] = {50, 25, 17, 15, 12, 10}; @@ -347,136 +355,24 @@ static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT); } -/* configures the initial ECC settings for the controller */ -static void set_ecc_config(struct denali_nand_info *denali) -{ -#if SUPPORT_8BITECC - if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) || - (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128)) - denali_write32(8, denali->flash_reg + ECC_CORRECTION); -#endif - - if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE) - == 1) { - denali->dev_info.wECCBytesPerSector = 4; - denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected; - denali->dev_info.wNumPageSpareFlag = - denali->dev_info.wPageSpareSize - - denali->dev_info.wPageDataSize / - (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) * - denali->dev_info.wECCBytesPerSector - - denali->dev_info.wSpareSkipBytes; - } else { - denali->dev_info.wECCBytesPerSector = - (ioread32(denali->flash_reg + ECC_CORRECTION) & - ECC_CORRECTION__VALUE) * 13 / 8; - if ((denali->dev_info.wECCBytesPerSector) % 2 == 0) - denali->dev_info.wECCBytesPerSector += 2; - else - denali->dev_info.wECCBytesPerSector += 1; - - denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected; - denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize - - denali->dev_info.wPageDataSize / - (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) * - denali->dev_info.wECCBytesPerSector - - denali->dev_info.wSpareSkipBytes; - } -} - /* queries the NAND device to see what ONFI modes it supports. */ static uint16_t get_onfi_nand_para(struct denali_nand_info *denali) { int i; - uint16_t blks_lun_l, blks_lun_h, n_of_luns; - uint32_t blockperlun, id; - - denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET); - - while (!((ioread32(denali->flash_reg + INTR_STATUS0) & - INTR_STATUS0__RST_COMP) | - (ioread32(denali->flash_reg + INTR_STATUS0) & - INTR_STATUS0__TIME_OUT))) - ; - - if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) { - denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET); - while (!((ioread32(denali->flash_reg + INTR_STATUS1) & - INTR_STATUS1__RST_COMP) | - (ioread32(denali->flash_reg + INTR_STATUS1) & - INTR_STATUS1__TIME_OUT))) - ; - - if (ioread32(denali->flash_reg + INTR_STATUS1) & - INTR_STATUS1__RST_COMP) { - denali_write32(DEVICE_RESET__BANK2, - denali->flash_reg + DEVICE_RESET); - while (!((ioread32(denali->flash_reg + INTR_STATUS2) & - INTR_STATUS2__RST_COMP) | - (ioread32(denali->flash_reg + INTR_STATUS2) & - INTR_STATUS2__TIME_OUT))) - ; - - if (ioread32(denali->flash_reg + INTR_STATUS2) & - INTR_STATUS2__RST_COMP) { - denali_write32(DEVICE_RESET__BANK3, - denali->flash_reg + DEVICE_RESET); - while (!((ioread32(denali->flash_reg + INTR_STATUS3) & - INTR_STATUS3__RST_COMP) | - (ioread32(denali->flash_reg + INTR_STATUS3) & - INTR_STATUS3__TIME_OUT))) - ; - } else { - printk(KERN_ERR "Getting a time out for bank 2!\n"); - } - } else { - printk(KERN_ERR "Getting a time out for bank 1!\n"); - } - } - - denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0); - denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1); - denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2); - denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3); - - denali->dev_info.wONFIDevFeatures = - ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES); - denali->dev_info.wONFIOptCommands = - ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS); - denali->dev_info.wONFITimingMode = - ioread32(denali->flash_reg + ONFI_TIMING_MODE); - denali->dev_info.wONFIPgmCacheTimingMode = - ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE); - - n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & - ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS; - blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L); - blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U); - - blockperlun = (blks_lun_h << 16) | blks_lun_l; - - denali->dev_info.wTotalBlocks = n_of_luns * blockperlun; - + /* we needn't to do a reset here because driver has already + * reset all the banks before + * */ if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) & ONFI_TIMING_MODE__VALUE)) return FAIL; for (i = 5; i > 0; i--) { - if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i)) + if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & + (0x01 << i)) break; } - NAND_ONFi_Timing_Mode(denali, i); - - index_addr(denali, MODE_11 | 0, 0x90); - index_addr(denali, MODE_11 | 1, 0); - - for (i = 0; i < 3; i++) - index_addr_read_data(denali, MODE_11 | 2, &id); - - nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id); - - denali->dev_info.MLCDevice = id & 0x0C; + nand_onfi_timing_set(denali, i); /* By now, all the ONFI devices we know support the page cache */ /* rw feature. So here we enable the pipeline_rw_ahead feature */ @@ -486,25 +382,10 @@ static uint16_t get_onfi_nand_para(struct denali_nand_info *denali) return PASS; } -static void get_samsung_nand_para(struct denali_nand_info *denali) +static void get_samsung_nand_para(struct denali_nand_info *denali, + uint8_t device_id) { - uint8_t no_of_planes; - uint32_t blk_size; - uint64_t plane_size, capacity; - uint32_t id_bytes[5]; - int i; - - index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90); - index_addr(denali, (uint32_t)(MODE_11 | 1), 0); - for (i = 0; i < 5; i++) - index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]); - - nand_dbg_print(NAND_DBG_DEBUG, - "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", - id_bytes[0], id_bytes[1], id_bytes[2], - id_bytes[3], id_bytes[4]); - - if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */ + if (device_id == 0xd3) { /* Samsung K9WAG08U1A */ /* Set timing register values according to datasheet */ denali_write32(5, denali->flash_reg + ACC_CLKS); denali_write32(20, denali->flash_reg + RE_2_WE); @@ -514,19 +395,10 @@ static void get_samsung_nand_para(struct denali_nand_info *denali) denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT); denali_write32(2, denali->flash_reg + CS_SETUP_CNT); } - - no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2); - plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4); - blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4); - capacity = (uint64_t)128 * plane_size * no_of_planes; - - do_div(capacity, blk_size); - denali->dev_info.wTotalBlocks = capacity; } static void get_toshiba_nand_para(struct denali_nand_info *denali) { - void __iomem *scratch_reg; uint32_t tmp; /* Workaround to fix a controller bug which reports a wrong */ @@ -536,81 +408,52 @@ static void get_toshiba_nand_para(struct denali_nand_info *denali) denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) * ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); + denali_write32(tmp, + denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); #if SUPPORT_15BITECC denali_write32(15, denali->flash_reg + ECC_CORRECTION); #elif SUPPORT_8BITECC denali_write32(8, denali->flash_reg + ECC_CORRECTION); #endif } - - /* As Toshiba NAND can not provide it's block number, */ - /* so here we need user to provide the correct block */ - /* number in a scratch register before the Linux NAND */ - /* driver is loaded. If no valid value found in the scratch */ - /* register, then we use default block number value */ - scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE); - if (!scratch_reg) { - printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d", - __FILE__, __LINE__); - denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; - } else { - nand_dbg_print(NAND_DBG_WARN, - "Spectra: ioremap reg address: 0x%p\n", scratch_reg); - denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg); - if (denali->dev_info.wTotalBlocks < 512) - denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; - iounmap(scratch_reg); - } } -static void get_hynix_nand_para(struct denali_nand_info *denali) +static void get_hynix_nand_para(struct denali_nand_info *denali, + uint8_t device_id) { - void __iomem *scratch_reg; uint32_t main_size, spare_size; - switch (denali->dev_info.wDeviceID) { + switch (device_id) { case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK); denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED); - spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED); - denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); - denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); + main_size = 4096 * + ioread32(denali->flash_reg + DEVICES_CONNECTED); + spare_size = 224 * + ioread32(denali->flash_reg + DEVICES_CONNECTED); + denali_write32(main_size, + denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); + denali_write32(spare_size, + denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); denali_write32(0, denali->flash_reg + DEVICE_WIDTH); #if SUPPORT_15BITECC denali_write32(15, denali->flash_reg + ECC_CORRECTION); #elif SUPPORT_8BITECC denali_write32(8, denali->flash_reg + ECC_CORRECTION); #endif - denali->dev_info.MLCDevice = 1; break; default: nand_dbg_print(NAND_DBG_WARN, "Spectra: Unknown Hynix NAND (Device ID: 0x%x)." "Will use default parameter values instead.\n", - denali->dev_info.wDeviceID); - } - - scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE); - if (!scratch_reg) { - printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d", - __FILE__, __LINE__); - denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; - } else { - nand_dbg_print(NAND_DBG_WARN, - "Spectra: ioremap reg address: 0x%p\n", scratch_reg); - denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg); - if (denali->dev_info.wTotalBlocks < 512) - denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; - iounmap(scratch_reg); + device_id); } } /* determines how many NAND chips are connected to the controller. Note for - Intel CE4100 devices we don't support more than one device. + Intel CE4100 devices we don't support more than one device. */ static void find_valid_banks(struct denali_nand_info *denali) { @@ -621,7 +464,8 @@ static void find_valid_banks(struct denali_nand_info *denali) for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) { index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90); index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0); - index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]); + index_addr_read_data(denali, + (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]); nand_dbg_print(NAND_DBG_DEBUG, "Return 1st ID for bank[%d]: %x\n", i, id[i]); @@ -637,14 +481,12 @@ static void find_valid_banks(struct denali_nand_info *denali) } } - if (denali->platform == INTEL_CE4100) - { + if (denali->platform == INTEL_CE4100) { /* Platform limitations of the CE4100 device limit * users to a single chip solution for NAND. - * Multichip support is not enabled. - */ - if (denali->total_used_banks != 1) - { + * Multichip support is not enabled. + */ + if (denali->total_used_banks != 1) { printk(KERN_ERR "Sorry, Intel CE4100 only supports " "a single NAND device.\n"); BUG(); @@ -656,150 +498,60 @@ static void find_valid_banks(struct denali_nand_info *denali) static void detect_partition_feature(struct denali_nand_info *denali) { + /* For MRST platform, denali->fwblks represent the + * number of blocks firmware is taken, + * FW is in protect partition and MTD driver has no + * permission to access it. So let driver know how many + * blocks it can't touch. + * */ if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) { if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) & PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) { - denali->dev_info.wSpectraStartBlock = + denali->fwblks = ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & MIN_MAX_BANK_1__MIN_VALUE) * - denali->dev_info.wTotalBlocks) + denali->blksperchip) + (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) & MIN_BLK_ADDR_1__VALUE); - - denali->dev_info.wSpectraEndBlock = - (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & - MIN_MAX_BANK_1__MAX_VALUE) >> 2) * - denali->dev_info.wTotalBlocks) - + - (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) & - MAX_BLK_ADDR_1__VALUE); - - denali->dev_info.wTotalBlocks *= denali->total_used_banks; - - if (denali->dev_info.wSpectraEndBlock >= - denali->dev_info.wTotalBlocks) { - denali->dev_info.wSpectraEndBlock = - denali->dev_info.wTotalBlocks - 1; - } - - denali->dev_info.wDataBlockNum = - denali->dev_info.wSpectraEndBlock - - denali->dev_info.wSpectraStartBlock + 1; - } else { - denali->dev_info.wTotalBlocks *= denali->total_used_banks; - denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK; - denali->dev_info.wSpectraEndBlock = - denali->dev_info.wTotalBlocks - 1; - denali->dev_info.wDataBlockNum = - denali->dev_info.wSpectraEndBlock - - denali->dev_info.wSpectraStartBlock + 1; - } - } else { - denali->dev_info.wTotalBlocks *= denali->total_used_banks; - denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK; - denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1; - denali->dev_info.wDataBlockNum = - denali->dev_info.wSpectraEndBlock - - denali->dev_info.wSpectraStartBlock + 1; - } + } else + denali->fwblks = SPECTRA_START_BLOCK; + } else + denali->fwblks = SPECTRA_START_BLOCK; } -static void dump_device_info(struct denali_nand_info *denali) -{ - nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n"); - nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n", - denali->dev_info.wDeviceMaker); - nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n", - denali->dev_info.wDeviceID); - nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n", - denali->dev_info.wDeviceType); - nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n", - denali->dev_info.wSpectraStartBlock); - nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n", - denali->dev_info.wSpectraEndBlock); - nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n", - denali->dev_info.wTotalBlocks); - nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n", - denali->dev_info.wPagesPerBlock); - nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n", - denali->dev_info.wPageSize); - nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n", - denali->dev_info.wPageDataSize); - nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n", - denali->dev_info.wPageSpareSize); - nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n", - denali->dev_info.wNumPageSpareFlag); - nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n", - denali->dev_info.wECCBytesPerSector); - nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n", - denali->dev_info.wBlockSize); - nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n", - denali->dev_info.wBlockDataSize); - nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n", - denali->dev_info.wDataBlockNum); - nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n", - denali->dev_info.bPlaneNum); - nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n", - denali->dev_info.wDeviceMainAreaSize); - nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n", - denali->dev_info.wDeviceSpareAreaSize); - nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n", - denali->dev_info.wDevicesConnected); - nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n", - denali->dev_info.wDeviceWidth); - nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n", - denali->dev_info.wHWRevision); - nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n", - denali->dev_info.wHWFeatures); - nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n", - denali->dev_info.wONFIDevFeatures); - nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n", - denali->dev_info.wONFIOptCommands); - nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n", - denali->dev_info.wONFITimingMode); - nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n", - denali->dev_info.wONFIPgmCacheTimingMode); - nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n", - denali->dev_info.MLCDevice ? "Yes" : "No"); - nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n", - denali->dev_info.wSpareSkipBytes); - nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n", - denali->dev_info.nBitsInPageNumber); - nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n", - denali->dev_info.nBitsInPageDataSize); - nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n", - denali->dev_info.nBitsInBlockDataSize); -} - -static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali) +static uint16_t denali_nand_timing_set(struct denali_nand_info *denali) { uint16_t status = PASS; - uint8_t no_of_planes; + uint32_t id_bytes[5], addr; + uint8_t i, maf_id, device_id; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); - denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID); - denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID); - denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0); - denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1); - denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2); - - denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c; + /* Use read id method to get device ID and other + * params. For some NAND chips, controller can't + * report the correct device ID by reading from + * DEVICE_ID register + * */ + addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); + index_addr(denali, (uint32_t)addr | 0, 0x90); + index_addr(denali, (uint32_t)addr | 1, 0); + for (i = 0; i < 5; i++) + index_addr_read_data(denali, addr | 2, &id_bytes[i]); + maf_id = id_bytes[0]; + device_id = id_bytes[1]; if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */ if (FAIL == get_onfi_nand_para(denali)) return FAIL; - } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */ - get_samsung_nand_para(denali); - } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */ + } else if (maf_id == 0xEC) { /* Samsung NAND */ + get_samsung_nand_para(denali, device_id); + } else if (maf_id == 0x98) { /* Toshiba NAND */ get_toshiba_nand_para(denali); - } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */ - get_hynix_nand_para(denali); - } else { - denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; + } else if (maf_id == 0xAD) { /* Hynix NAND */ + get_hynix_nand_para(denali, device_id); } nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" @@ -814,88 +566,20 @@ static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali) ioread32(denali->flash_reg + RDWR_EN_HI_CNT), ioread32(denali->flash_reg + CS_SETUP_CNT)); - denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION); - denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES); - - denali->dev_info.wDeviceMainAreaSize = - ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE); - denali->dev_info.wDeviceSpareAreaSize = - ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - - denali->dev_info.wPageDataSize = - ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); - - /* Note: When using the Micon 4K NAND device, the controller will report - * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes. - * And if force set it to 218 bytes, the controller can not work - * correctly. So just let it be. But keep in mind that this bug may - * cause - * other problems in future. - Yunpeng 2008-10-10 - */ - denali->dev_info.wPageSpareSize = - ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); - - denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK); - - denali->dev_info.wPageSize = - denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize; - denali->dev_info.wBlockSize = - denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock; - denali->dev_info.wBlockDataSize = - denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize; - - denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH); - denali->dev_info.wDeviceType = - ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8); - - denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED); - - denali->dev_info.wSpareSkipBytes = - ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) * - denali->dev_info.wDevicesConnected; - - denali->dev_info.nBitsInPageNumber = - ilog2(denali->dev_info.wPagesPerBlock); - denali->dev_info.nBitsInPageDataSize = - ilog2(denali->dev_info.wPageDataSize); - denali->dev_info.nBitsInBlockDataSize = - ilog2(denali->dev_info.wBlockDataSize); - - set_ecc_config(denali); - - no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) & - NUMBER_OF_PLANES__VALUE; - - switch (no_of_planes) { - case 0: - case 1: - case 3: - case 7: - denali->dev_info.bPlaneNum = no_of_planes + 1; - break; - default: - status = FAIL; - break; - } - find_valid_banks(denali); detect_partition_feature(denali); - dump_device_info(denali); - /* If the user specified to override the default timings - * with a specific ONFI mode, we apply those changes here. + * with a specific ONFI mode, we apply those changes here. */ if (onfi_timing_mode != NAND_DEFAULT_TIMINGS) - { - NAND_ONFi_Timing_Mode(denali, onfi_timing_mode); - } + nand_onfi_timing_set(denali, onfi_timing_mode); return status; } -static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, +static void denali_set_intr_modes(struct denali_nand_info *denali, uint16_t INT_ENABLE) { nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", @@ -912,7 +596,7 @@ static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, */ static inline bool is_flash_bank_valid(int flash_bank) { - return (flash_bank >= 0 && flash_bank < 4); + return (flash_bank >= 0 && flash_bank < 4); } static void denali_irq_init(struct denali_nand_info *denali) @@ -920,7 +604,7 @@ static void denali_irq_init(struct denali_nand_info *denali) uint32_t int_mask = 0; /* Disable global interrupts */ - NAND_LLD_Enable_Disable_Interrupts(denali, false); + denali_set_intr_modes(denali, false); int_mask = DENALI_IRQ_ALL; @@ -935,11 +619,12 @@ static void denali_irq_init(struct denali_nand_info *denali) static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali) { - NAND_LLD_Enable_Disable_Interrupts(denali, false); + denali_set_intr_modes(denali, false); free_irq(irqnum, denali); } -static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask) +static void denali_irq_enable(struct denali_nand_info *denali, + uint32_t int_mask) { denali_write32(int_mask, denali->flash_reg + INTR_EN0); denali_write32(int_mask, denali->flash_reg + INTR_EN1); @@ -948,15 +633,16 @@ static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask } /* This function only returns when an interrupt that this driver cares about - * occurs. This is to reduce the overhead of servicing interrupts + * occurs. This is to reduce the overhead of servicing interrupts */ static inline uint32_t denali_irq_detected(struct denali_nand_info *denali) { - return (read_interrupt_status(denali) & DENALI_IRQ_ALL); + return read_interrupt_status(denali) & DENALI_IRQ_ALL; } /* Interrupts are cleared by writing a 1 to the appropriate status bit */ -static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask) +static inline void clear_interrupt(struct denali_nand_info *denali, + uint32_t irq_mask) { uint32_t intr_status_reg = 0; @@ -995,17 +681,15 @@ static void print_irq_log(struct denali_nand_info *denali) { int i = 0; - printk("ISR debug log index = %X\n", denali->idx); + printk(KERN_INFO "ISR debug log index = %X\n", denali->idx); for (i = 0; i < 32; i++) - { - printk("%08X: %08X\n", i, denali->irq_debug_array[i]); - } + printk(KERN_INFO "%08X: %08X\n", i, denali->irq_debug_array[i]); } #endif -/* This is the interrupt service routine. It handles all interrupts - * sent to this device. Note that on CE4100, this is a shared - * interrupt. +/* This is the interrupt service routine. It handles all interrupts + * sent to this device. Note that on CE4100, this is a shared + * interrupt. */ static irqreturn_t denali_isr(int irq, void *dev_id) { @@ -1015,20 +699,20 @@ static irqreturn_t denali_isr(int irq, void *dev_id) spin_lock(&denali->irq_lock); - /* check to see if a valid NAND chip has - * been selected. + /* check to see if a valid NAND chip has + * been selected. */ - if (is_flash_bank_valid(denali->flash_bank)) - { - /* check to see if controller generated + if (is_flash_bank_valid(denali->flash_bank)) { + /* check to see if controller generated * the interrupt, since this is a shared interrupt */ - if ((irq_status = denali_irq_detected(denali)) != 0) - { + irq_status = denali_irq_detected(denali); + if (irq_status != 0) { #if DEBUG_DENALI - denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status; + denali->irq_debug_array[denali->idx++] = + 0x10000000 | irq_status; denali->idx %= 32; - printk("IRQ status = 0x%04x\n", irq_status); + printk(KERN_INFO "IRQ status = 0x%04x\n", irq_status); #endif /* handle interrupt */ /* first acknowledge it */ @@ -1054,61 +738,62 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) bool retry = false; unsigned long timeout = msecs_to_jiffies(1000); - do - { + do { #if DEBUG_DENALI - printk("waiting for 0x%x\n", irq_mask); + printk(KERN_INFO "waiting for 0x%x\n", irq_mask); #endif - comp_res = wait_for_completion_timeout(&denali->complete, timeout); + comp_res = + wait_for_completion_timeout(&denali->complete, timeout); spin_lock_irq(&denali->irq_lock); intr_status = denali->irq_status; #if DEBUG_DENALI - denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status; + denali->irq_debug_array[denali->idx++] = + 0x20000000 | (irq_mask << 16) | intr_status; denali->idx %= 32; #endif - if (intr_status & irq_mask) - { + if (intr_status & irq_mask) { denali->irq_status &= ~irq_mask; spin_unlock_irq(&denali->irq_lock); #if DEBUG_DENALI - if (retry) printk("status on retry = 0x%x\n", intr_status); + if (retry) + printk(KERN_INFO "status on retry = 0x%x\n", + intr_status); #endif /* our interrupt was detected */ break; - } - else - { - /* these are not the interrupts you are looking for - - need to wait again */ + } else { + /* these are not the interrupts you are looking for - + * need to wait again */ spin_unlock_irq(&denali->irq_lock); #if DEBUG_DENALI print_irq_log(denali); - printk("received irq nobody cared: irq_status = 0x%x," - " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res); + printk(KERN_INFO "received irq nobody cared:" + " irq_status = 0x%x, irq_mask = 0x%x," + " timeout = %ld\n", intr_status, + irq_mask, comp_res); #endif retry = true; } } while (comp_res != 0); - if (comp_res == 0) - { + if (comp_res == 0) { /* timeout */ - printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n", - intr_status, irq_mask); + printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n", + intr_status, irq_mask); intr_status = 0; } return intr_status; } -/* This helper function setups the registers for ECC and whether or not +/* This helper function setups the registers for ECC and whether or not the spare area will be transfered. */ -static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, +static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, bool transfer_spare) { - int ecc_en_flag = 0, transfer_spare_flag = 0; + int ecc_en_flag = 0, transfer_spare_flag = 0; /* set ECC, transfer spare bits if needed */ ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; @@ -1116,85 +801,85 @@ static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, /* Enable spare area/ECC per user's request. */ denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE); - denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG); + denali_write32(transfer_spare_flag, + denali->flash_reg + TRANSFER_SPARE_REG); } -/* sends a pipeline command operation to the controller. See the Denali NAND - controller's user guide for more information (section 4.2.3.6). +/* sends a pipeline command operation to the controller. See the Denali NAND + controller's user guide for more information (section 4.2.3.6). */ -static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en, - bool transfer_spare, int access_type, - int op) +static int denali_send_pipeline_cmd(struct denali_nand_info *denali, + bool ecc_en, + bool transfer_spare, + int access_type, + int op) { int status = PASS; - uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0, + uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0, irq_mask = 0; - if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP; - else if (op == DENALI_WRITE) irq_mask = 0; - else BUG(); + if (op == DENALI_READ) + irq_mask = INTR_STATUS0__LOAD_COMP; + else if (op == DENALI_WRITE) + irq_mask = 0; + else + BUG(); setup_ecc_for_xfer(denali, ecc_en, transfer_spare); #if DEBUG_DENALI spin_lock_irq(&denali->irq_lock); - denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4); + denali->irq_debug_array[denali->idx++] = + 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | + (access_type << 4); denali->idx %= 32; spin_unlock_irq(&denali->irq_lock); #endif /* clear interrupts */ - clear_interrupts(denali); + clear_interrupts(denali); addr = BANK(denali->flash_bank) | denali->page; - if (op == DENALI_WRITE && access_type != SPARE_ACCESS) - { - cmd = MODE_01 | addr; + if (op == DENALI_WRITE && access_type != SPARE_ACCESS) { + cmd = MODE_01 | addr; denali_write32(cmd, denali->flash_mem); - } - else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) - { + } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) { /* read spare area */ - cmd = MODE_10 | addr; + cmd = MODE_10 | addr; index_addr(denali, (uint32_t)cmd, access_type); - cmd = MODE_01 | addr; + cmd = MODE_01 | addr; denali_write32(cmd, denali->flash_mem); - } - else if (op == DENALI_READ) - { + } else if (op == DENALI_READ) { /* setup page read request for access type */ - cmd = MODE_10 | addr; + cmd = MODE_10 | addr; index_addr(denali, (uint32_t)cmd, access_type); /* page 33 of the NAND controller spec indicates we should not - use the pipeline commands in Spare area only mode. So we + use the pipeline commands in Spare area only mode. So we don't. */ - if (access_type == SPARE_ACCESS) - { + if (access_type == SPARE_ACCESS) { cmd = MODE_01 | addr; denali_write32(cmd, denali->flash_mem); - } - else - { - index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count); - - /* wait for command to be accepted - * can always use status0 bit as the mask is identical for each + } else { + index_addr(denali, (uint32_t)cmd, + 0x2000 | op | page_count); + + /* wait for command to be accepted + * can always use status0 bit as the + * mask is identical for each * bank. */ irq_status = wait_for_irq(denali, irq_mask); - if (irq_status == 0) - { + if (irq_status == 0) { printk(KERN_ERR "cmd, page, addr on timeout " - "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr); + "(0x%x, 0x%x, 0x%x)\n", cmd, + denali->page, addr); status = FAIL; - } - else - { + } else { cmd = MODE_01 | addr; denali_write32(cmd, denali->flash_mem); } @@ -1204,36 +889,35 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en } /* helper function that simply writes a buffer to the flash */ -static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf, - int len) +static int write_data_to_flash_mem(struct denali_nand_info *denali, + const uint8_t *buf, + int len) { uint32_t i = 0, *buf32; - /* verify that the len is a multiple of 4. see comment in - * read_data_from_flash_mem() */ + /* verify that the len is a multiple of 4. see comment in + * read_data_from_flash_mem() */ BUG_ON((len % 4) != 0); /* write the data to the flash memory */ buf32 = (uint32_t *)buf; for (i = 0; i < len / 4; i++) - { denali_write32(*buf32++, denali->flash_mem + 0x10); - } - return i*4; /* intent is to return the number of bytes read */ + return i*4; /* intent is to return the number of bytes read */ } /* helper function that simply reads a buffer from the flash */ -static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf, - int len) +static int read_data_from_flash_mem(struct denali_nand_info *denali, + uint8_t *buf, + int len) { uint32_t i = 0, *buf32; /* we assume that len will be a multiple of 4, if not * it would be nice to know about it ASAP rather than - * have random failures... - * - * This assumption is based on the fact that this - * function is designed to be used to read flash pages, + * have random failures... + * This assumption is based on the fact that this + * function is designed to be used to read flash pages, * which are typically multiples of 4... */ @@ -1242,10 +926,8 @@ static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *bu /* transfer the data from the flash */ buf32 = (uint32_t *)buf; for (i = 0; i < len / 4; i++) - { *buf32++ = ioread32(denali->flash_mem + 0x10); - } - return i*4; /* intent is to return the number of bytes read */ + return i*4; /* intent is to return the number of bytes read */ } /* writes OOB data to the device */ @@ -1253,38 +935,35 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); uint32_t irq_status = 0; - uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP | + uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP | INTR_STATUS0__PROGRAM_FAIL; int status = 0; denali->page = page; - if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS, - DENALI_WRITE) == PASS) - { + if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS, + DENALI_WRITE) == PASS) { write_data_to_flash_mem(denali, buf, mtd->oobsize); #if DEBUG_DENALI spin_lock_irq(&denali->irq_lock); - denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize; + denali->irq_debug_array[denali->idx++] = + 0x80000000 | mtd->oobsize; denali->idx %= 32; spin_unlock_irq(&denali->irq_lock); #endif - + /* wait for operation to complete */ irq_status = wait_for_irq(denali, irq_mask); - if (irq_status == 0) - { + if (irq_status == 0) { printk(KERN_ERR "OOB write failed\n"); status = -EIO; } - } - else - { + } else { printk(KERN_ERR "unable to send pipeline command\n"); - status = -EIO; + status = -EIO; } return status; } @@ -1293,60 +972,56 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0; + uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, + irq_status = 0, addr = 0x0, cmd = 0x0; denali->page = page; #if DEBUG_DENALI - printk("read_oob %d\n", page); + printk(KERN_INFO "read_oob %d\n", page); #endif - if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, - DENALI_READ) == PASS) - { - read_data_from_flash_mem(denali, buf, mtd->oobsize); + if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, + DENALI_READ) == PASS) { + read_data_from_flash_mem(denali, buf, mtd->oobsize); - /* wait for command to be accepted + /* wait for command to be accepted * can always use status0 bit as the mask is identical for each * bank. */ irq_status = wait_for_irq(denali, irq_mask); if (irq_status == 0) - { - printk(KERN_ERR "page on OOB timeout %d\n", denali->page); - } + printk(KERN_ERR "page on OOB timeout %d\n", + denali->page); /* We set the device back to MAIN_ACCESS here as I observed * instability with the controller if you do a block erase * and the last transaction was a SPARE_ACCESS. Block erase * is reliable (according to the MTD test infrastructure) - * if you are in MAIN_ACCESS. + * if you are in MAIN_ACCESS. */ addr = BANK(denali->flash_bank) | denali->page; - cmd = MODE_10 | addr; + cmd = MODE_10 | addr; index_addr(denali, (uint32_t)cmd, MAIN_ACCESS); #if DEBUG_DENALI spin_lock_irq(&denali->irq_lock); - denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize; + denali->irq_debug_array[denali->idx++] = + 0x60000000 | mtd->oobsize; denali->idx %= 32; spin_unlock_irq(&denali->irq_lock); #endif } } -/* this function examines buffers to see if they contain data that +/* this function examines buffers to see if they contain data that * indicate that the buffer is part of an erased region of flash. */ bool is_erased(uint8_t *buf, int len) { int i = 0; for (i = 0; i < len; i++) - { if (buf[i] != 0xFF) - { return false; - } - } return true; } #define ECC_SECTOR_SIZE 512 @@ -1358,65 +1033,59 @@ bool is_erased(uint8_t *buf, int len) #define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8) #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) -static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, +static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, uint8_t *oobbuf, uint32_t irq_status) { bool check_erased_page = false; - if (irq_status & INTR_STATUS0__ECC_ERR) - { + if (irq_status & INTR_STATUS0__ECC_ERR) { /* read the ECC errors. we'll ignore them for now */ uint32_t err_address = 0, err_correction_info = 0; uint32_t err_byte = 0, err_sector = 0, err_device = 0; uint32_t err_correction_value = 0; - do - { - err_address = ioread32(denali->flash_reg + + do { + err_address = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS); err_sector = ECC_SECTOR(err_address); err_byte = ECC_BYTE(err_address); - err_correction_info = ioread32(denali->flash_reg + + err_correction_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO); - err_correction_value = + err_correction_value = ECC_CORRECTION_VALUE(err_correction_info); err_device = ECC_ERR_DEVICE(err_correction_info); - if (ECC_ERROR_CORRECTABLE(err_correction_info)) - { + if (ECC_ERROR_CORRECTABLE(err_correction_info)) { /* offset in our buffer is computed as: - sector number * sector size + offset in + sector number * sector size + offset in sector */ - int offset = err_sector * ECC_SECTOR_SIZE + + int offset = err_sector * ECC_SECTOR_SIZE + err_byte; - if (offset < denali->mtd.writesize) - { + if (offset < denali->mtd.writesize) { /* correct the ECC error */ buf[offset] ^= err_correction_value; denali->mtd.ecc_stats.corrected++; - } - else - { + } else { /* bummer, couldn't correct the error */ printk(KERN_ERR "ECC offset invalid\n"); denali->mtd.ecc_stats.failed++; } - } - else - { - /* if the error is not correctable, need to - * look at the page to see if it is an erased page. - * if so, then it's not a real ECC error */ + } else { + /* if the error is not correctable, need to + * look at the page to see if it is an erased + * page. if so, then it's not a real ECC error + * */ check_erased_page = true; } -#if DEBUG_DENALI - printk("Detected ECC error in page %d: err_addr = 0x%08x," - " info to fix is 0x%08x\n", denali->page, err_address, - err_correction_info); +#if DEBUG_DENALI + printk(KERN_INFO "Detected ECC error in page %d:" + " err_addr = 0x%08x, info to fix is" + " 0x%08x\n", denali->page, err_address, + err_correction_info); #endif } while (!ECC_LAST_ERR(err_correction_info)); } @@ -1428,7 +1097,8 @@ static void denali_enable_dma(struct denali_nand_info *denali, bool en) { uint32_t reg_val = 0x0; - if (en) reg_val = DMA_ENABLE__FLAG; + if (en) + reg_val = DMA_ENABLE__FLAG; denali_write32(reg_val, denali->flash_reg + DMA_ENABLE); ioread32(denali->flash_reg + DMA_ENABLE); @@ -1458,9 +1128,9 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op) index_addr(denali, mode | 0x14000, 0x2400); } -/* writes a page. user specifies type, and this function handles the +/* writes a page. user specifies type, and this function handles the configuration details. */ -static void write_page(struct mtd_info *mtd, struct nand_chip *chip, +static void write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, bool raw_xfer) { struct denali_nand_info *denali = mtd_to_denali(mtd); @@ -1470,7 +1140,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip, size_t size = denali->mtd.writesize + denali->mtd.oobsize; uint32_t irq_status = 0; - uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP | + uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP | INTR_STATUS0__PROGRAM_FAIL; /* if it is a raw xfer, we want to disable ecc, and send @@ -1483,74 +1153,73 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip, /* copy buffer into DMA buffer */ memcpy(denali->buf.buf, buf, mtd->writesize); - if (raw_xfer) - { + if (raw_xfer) { /* transfer the data to the spare area */ - memcpy(denali->buf.buf + mtd->writesize, - chip->oob_poi, - mtd->oobsize); + memcpy(denali->buf.buf + mtd->writesize, + chip->oob_poi, + mtd->oobsize); } pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE); clear_interrupts(denali); - denali_enable_dma(denali, true); + denali_enable_dma(denali, true); denali_setup_dma(denali, DENALI_WRITE); /* wait for operation to complete */ irq_status = wait_for_irq(denali, irq_mask); - if (irq_status == 0) - { - printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer); - denali->status = - (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL : - PASS; + if (irq_status == 0) { + printk(KERN_ERR "timeout on write_page" + " (type = %d)\n", raw_xfer); + denali->status = + (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? + NAND_STATUS_FAIL : PASS; } - denali_enable_dma(denali, false); + denali_enable_dma(denali, false); pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE); } /* NAND core entry points */ -/* this is the callback that the NAND core calls to write a page. Since - writing a page with ECC or without is similar, all the work is done +/* this is the callback that the NAND core calls to write a page. Since + writing a page with ECC or without is similar, all the work is done by write_page above. */ -static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, +static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf) { /* for regular page writes, we let HW handle all the ECC - * data written to the device. */ + * data written to the device. */ write_page(mtd, chip, buf, false); } -/* This is the callback that the NAND core calls to write a page without ECC. +/* This is the callback that the NAND core calls to write a page without ECC. raw access is similiar to ECC page writes, so all the work is done in the - write_page() function above. + write_page() function above. */ -static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, +static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf) { - /* for raw page writes, we want to disable ECC and simply write + /* for raw page writes, we want to disable ECC and simply write whatever data is in the buffer. */ write_page(mtd, chip, buf, true); } -static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, +static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { - return write_oob_data(mtd, chip->oob_poi, page); + return write_oob_data(mtd, chip->oob_poi, page); } -static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, +static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd) { read_oob_data(mtd, chip->oob_poi, page); - return 0; /* notify NAND core to send command to - * NAND device. */ + return 0; /* notify NAND core to send command to + NAND device. */ } static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, @@ -1563,7 +1232,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, size_t size = denali->mtd.writesize + denali->mtd.oobsize; uint32_t irq_status = 0; - uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE | + uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE | INTR_STATUS0__ECC_ERR; bool check_erased_page = false; @@ -1581,26 +1250,20 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); memcpy(buf, denali->buf.buf, mtd->writesize); - + check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status); denali_enable_dma(denali, false); - if (check_erased_page) - { + if (check_erased_page) { read_oob_data(&denali->mtd, chip->oob_poi, denali->page); /* check ECC failures that may have occurred on erased pages */ - if (check_erased_page) - { + if (check_erased_page) { if (!is_erased(buf, denali->mtd.writesize)) - { denali->mtd.ecc_stats.failed++; - } if (!is_erased(buf, denali->mtd.oobsize)) - { denali->mtd.ecc_stats.failed++; - } - } + } } return 0; } @@ -1616,7 +1279,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, uint32_t irq_status = 0; uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP; - + setup_ecc_for_xfer(denali, false, true); denali_enable_dma(denali, true); @@ -1644,12 +1307,10 @@ static uint8_t denali_read_byte(struct mtd_info *mtd) uint8_t result = 0xff; if (denali->buf.head < denali->buf.tail) - { result = denali->buf.buf[denali->buf.head++]; - } #if DEBUG_DENALI - printk("read byte -> 0x%02x\n", result); + printk(KERN_INFO "read byte -> 0x%02x\n", result); #endif return result; } @@ -1658,7 +1319,7 @@ static void denali_select_chip(struct mtd_info *mtd, int chip) { struct denali_nand_info *denali = mtd_to_denali(mtd); #if DEBUG_DENALI - printk("denali select chip %d\n", chip); + printk(KERN_INFO "denali select chip %d\n", chip); #endif spin_lock_irq(&denali->irq_lock); denali->flash_bank = chip; @@ -1672,7 +1333,7 @@ static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) denali->status = 0; #if DEBUG_DENALI - printk("waitfunc %d\n", status); + printk(KERN_INFO "waitfunc %d\n", status); #endif return status; } @@ -1684,76 +1345,74 @@ static void denali_erase(struct mtd_info *mtd, int page) uint32_t cmd = 0x0, irq_status = 0; #if DEBUG_DENALI - printk("erase page: %d\n", page); + printk(KERN_INFO "erase page: %d\n", page); #endif /* clear interrupts */ - clear_interrupts(denali); + clear_interrupts(denali); /* setup page read request for access type */ cmd = MODE_10 | BANK(denali->flash_bank) | page; index_addr(denali, (uint32_t)cmd, 0x1); /* wait for erase to complete or failure to occur */ - irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP | + irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL); - denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL : - PASS; + denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? + NAND_STATUS_FAIL : PASS; } -static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, +static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t addr, id; + int i; #if DEBUG_DENALI - printk("cmdfunc: 0x%x %d %d\n", cmd, col, page); + printk(KERN_INFO "cmdfunc: 0x%x %d %d\n", cmd, col, page); #endif - switch (cmd) - { - case NAND_CMD_PAGEPROG: - break; - case NAND_CMD_STATUS: - read_status(denali); - break; - case NAND_CMD_READID: - reset_buf(denali); - if (denali->flash_bank < denali->total_used_banks) - { - /* write manufacturer information into nand - buffer for NAND subsystem to fetch. - */ - write_byte_to_buf(denali, denali->dev_info.wDeviceMaker); - write_byte_to_buf(denali, denali->dev_info.wDeviceID); - write_byte_to_buf(denali, denali->dev_info.bDeviceParam0); - write_byte_to_buf(denali, denali->dev_info.bDeviceParam1); - write_byte_to_buf(denali, denali->dev_info.bDeviceParam2); - } - else - { - int i; - for (i = 0; i < 5; i++) - write_byte_to_buf(denali, 0xff); - } - break; - case NAND_CMD_READ0: - case NAND_CMD_SEQIN: - denali->page = page; - break; - case NAND_CMD_RESET: - reset_bank(denali); - break; - case NAND_CMD_READOOB: - /* TODO: Read OOB data */ - break; - default: - printk(KERN_ERR ": unsupported command received 0x%x\n", cmd); - break; + switch (cmd) { + case NAND_CMD_PAGEPROG: + break; + case NAND_CMD_STATUS: + read_status(denali); + break; + case NAND_CMD_READID: + reset_buf(denali); + /*sometimes ManufactureId read from register is not right + * e.g. some of Micron MT29F32G08QAA MLC NAND chips + * So here we send READID cmd to NAND insteand + * */ + addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); + index_addr(denali, (uint32_t)addr | 0, 0x90); + index_addr(denali, (uint32_t)addr | 1, 0); + for (i = 0; i < 5; i++) { + index_addr_read_data(denali, + (uint32_t)addr | 2, + &id); + write_byte_to_buf(denali, id); + } + break; + case NAND_CMD_READ0: + case NAND_CMD_SEQIN: + denali->page = page; + break; + case NAND_CMD_RESET: + reset_bank(denali); + break; + case NAND_CMD_READOOB: + /* TODO: Read OOB data */ + break; + default: + printk(KERN_ERR ": unsupported command" + " received 0x%x\n", cmd); + break; } } /* stubs for ECC functions not used by the NAND core */ -static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, +static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, uint8_t *ecc_code) { printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n"); @@ -1761,7 +1420,7 @@ static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, return -EIO; } -static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data, +static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data, uint8_t *read_ecc, uint8_t *calc_ecc) { printk(KERN_ERR "denali_ecc_correct called unexpectedly\n"); @@ -1779,10 +1438,18 @@ static void denali_ecc_hwctl(struct mtd_info *mtd, int mode) /* Initialization code to bring the device up to a known good state */ static void denali_hw_init(struct denali_nand_info *denali) { + /* tell driver how many bit controller will skip before + * writing ECC code in OOB, this register may be already + * set by firmware. So we read this value out. + * if this value is 0, just let it be. + * */ + denali->bbtskipbytes = ioread32(denali->flash_reg + + SPARE_AREA_SKIP_BYTES); denali_irq_init(denali); - NAND_Flash_Reset(denali); + denali_nand_reset(denali); denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED); - denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE); + denali_write32(CHIP_EN_DONT_CARE__FLAG, + denali->flash_reg + CHIP_ENABLE_DONT_CARE); denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES); denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER); @@ -1792,25 +1459,18 @@ static void denali_hw_init(struct denali_nand_info *denali) denali_write32(1, denali->flash_reg + ECC_ENABLE); } -/* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */ -#define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE) -static struct nand_ecclayout nand_oob_slc = { - .eccbytes = 4, - .eccpos = { 0, 1, 2, 3 }, /* not used */ - .oobfree = {{ - .offset = ECC_BYTES_SLC, - .length = 64 - ECC_BYTES_SLC - }} +/* Althogh controller spec said SLC ECC is forceb to be 4bit, + * but denali controller in MRST only support 15bit and 8bit ECC + * correction + * */ +#define ECC_8BITS 14 +static struct nand_ecclayout nand_8bit_oob = { + .eccbytes = 14, }; -#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE) -static struct nand_ecclayout nand_oob_mlc_14bit = { - .eccbytes = 14, - .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */ - .oobfree = {{ - .offset = ECC_BYTES_MLC, - .length = 64 - ECC_BYTES_MLC - }} +#define ECC_15BITS 26 +static struct nand_ecclayout nand_15bit_oob = { + .eccbytes = 26, }; static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; @@ -1842,12 +1502,12 @@ void denali_drv_init(struct denali_nand_info *denali) denali->idx = 0; /* setup interrupt handler */ - /* the completion object will be used to notify + /* the completion object will be used to notify * the callee that the interrupt is done */ init_completion(&denali->complete); /* the spinlock will be used to synchronize the ISR - * with any element that might be access shared + * with any element that might be access shared * data (interrupt status) */ spin_lock_init(&denali->irq_lock); @@ -1880,13 +1540,12 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (id->driver_data == INTEL_CE4100) { - /* Due to a silicon limitation, we can only support - * ONFI timing mode 1 and below. - */ - if (onfi_timing_mode < -1 || onfi_timing_mode > 1) - { - printk("Intel CE4100 only supports ONFI timing mode 1 " - "or below\n"); + /* Due to a silicon limitation, we can only support + * ONFI timing mode 1 and below. + */ + if (onfi_timing_mode < -1 || onfi_timing_mode > 1) { + printk(KERN_ERR "Intel CE4100 only supports" + " ONFI timing mode 1 or below\n"); ret = -EINVAL; goto failed_enable; } @@ -1905,7 +1564,9 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) mem_base = csr_base + csr_len; mem_len = csr_len; nand_dbg_print(NAND_DBG_WARN, - "Spectra: No second BAR for PCI device; assuming %08Lx\n", + "Spectra: No second" + " BAR for PCI device;" + " assuming %08Lx\n", (uint64_t)csr_base); } } @@ -1913,16 +1574,16 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) /* Is 32-bit DMA supported? */ ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); - if (ret) - { + if (ret) { printk(KERN_ERR "Spectra: no usable DMA configuration\n"); goto failed_enable; } - denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE, - PCI_DMA_BIDIRECTIONAL); + denali->buf.dma_buf = + pci_map_single(dev, denali->buf.buf, + DENALI_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) - { + if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) { printk(KERN_ERR "Spectra: failed to map DMA buffer\n"); goto failed_enable; } @@ -1970,22 +1631,11 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) } /* now that our ISR is registered, we can enable interrupts */ - NAND_LLD_Enable_Disable_Interrupts(denali, true); + denali_set_intr_modes(denali, true); pci_set_drvdata(dev, denali); - NAND_Read_Device_ID(denali); - - /* MTD supported page sizes vary by kernel. We validate our - kernel supports the device here. - */ - if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) - { - ret = -ENODEV; - printk(KERN_ERR "Spectra: device size not supported by this " - "version of MTD."); - goto failed_nand; - } + denali_nand_timing_set(denali); nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" "acc_clks: %d, re_2_we: %d, we_2_re: %d," @@ -2009,18 +1659,46 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) denali->nand.read_byte = denali_read_byte; denali->nand.waitfunc = denali_waitfunc; - /* scan for NAND devices attached to the controller + /* scan for NAND devices attached to the controller * this is the first stage in a two step process to register - * with the nand subsystem */ - if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) - { + * with the nand subsystem */ + if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) { ret = -ENXIO; goto failed_nand; } - - /* second stage of the NAND scan - * this stage requires information regarding ECC and - * bad block management. */ + + /* MTD supported page sizes vary by kernel. We validate our + * kernel supports the device here. + */ + if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) { + ret = -ENODEV; + printk(KERN_ERR "Spectra: device size not supported by this " + "version of MTD."); + goto failed_nand; + } + + /* support for multi nand + * MTD known nothing about multi nand, + * so we should tell it the real pagesize + * and anything necessery + */ + denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED); + denali->nand.chipsize <<= (denali->devnum - 1); + denali->nand.page_shift += (denali->devnum - 1); + denali->nand.pagemask = (denali->nand.chipsize >> + denali->nand.page_shift) - 1; + denali->nand.bbt_erase_shift += (denali->devnum - 1); + denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift; + denali->nand.chip_shift += (denali->devnum - 1); + denali->mtd.writesize <<= (denali->devnum - 1); + denali->mtd.oobsize <<= (denali->devnum - 1); + denali->mtd.erasesize <<= (denali->devnum - 1); + denali->mtd.size = denali->nand.numchips * denali->nand.chipsize; + denali->bbtskipbytes *= denali->devnum; + + /* second stage of the NAND scan + * this stage requires information regarding ECC and + * bad block management. */ /* Bad block management */ denali->nand.bbt_td = &bbt_main_descr; @@ -2030,26 +1708,57 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; - if (denali->dev_info.MLCDevice) - { - denali->nand.ecc.layout = &nand_oob_mlc_14bit; - denali->nand.ecc.bytes = ECC_BYTES_MLC; - } - else /* SLC */ - { - denali->nand.ecc.layout = &nand_oob_slc; - denali->nand.ecc.bytes = ECC_BYTES_SLC; + /* Denali Controller only support 15bit and 8bit ECC in MRST, + * so just let controller do 15bit ECC for MLC and 8bit ECC for + * SLC if possible. + * */ + if (denali->nand.cellinfo & 0xc && + (denali->mtd.oobsize > (denali->bbtskipbytes + + ECC_15BITS * (denali->mtd.writesize / + ECC_SECTOR_SIZE)))) { + /* if MLC OOB size is large enough, use 15bit ECC*/ + denali->nand.ecc.layout = &nand_15bit_oob; + denali->nand.ecc.bytes = ECC_15BITS; + denali_write32(15, denali->flash_reg + ECC_CORRECTION); + } else if (denali->mtd.oobsize < (denali->bbtskipbytes + + ECC_8BITS * (denali->mtd.writesize / + ECC_SECTOR_SIZE))) { + printk(KERN_ERR "Your NAND chip OOB is not large enough to" + " contain 8bit ECC correction codes"); + goto failed_nand; + } else { + denali->nand.ecc.layout = &nand_8bit_oob; + denali->nand.ecc.bytes = ECC_8BITS; + denali_write32(8, denali->flash_reg + ECC_CORRECTION); } - /* These functions are required by the NAND core framework, otherwise, - the NAND core will assert. However, we don't need them, so we'll stub - them out. */ + denali->nand.ecc.bytes *= denali->devnum; + denali->nand.ecc.layout->eccbytes *= + denali->mtd.writesize / ECC_SECTOR_SIZE; + denali->nand.ecc.layout->oobfree[0].offset = + denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes; + denali->nand.ecc.layout->oobfree[0].length = + denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes - + denali->bbtskipbytes; + + /* Let driver know the total blocks number and + * how many blocks contained by each nand chip. + * blksperchip will help driver to know how many + * blocks is taken by FW. + * */ + denali->totalblks = denali->mtd.size >> + denali->nand.phys_erase_shift; + denali->blksperchip = denali->totalblks / denali->nand.numchips; + + /* These functions are required by the NAND core framework, otherwise, + * the NAND core will assert. However, we don't need them, so we'll stub + * them out. */ denali->nand.ecc.calculate = denali_ecc_calculate; denali->nand.ecc.correct = denali_ecc_correct; denali->nand.ecc.hwctl = denali_ecc_hwctl; /* override the default read operations */ - denali->nand.ecc.size = denali->mtd.writesize; + denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum; denali->nand.ecc.read_page = denali_read_page; denali->nand.ecc.read_page_raw = denali_read_page_raw; denali->nand.ecc.write_page = denali_write_page; @@ -2058,15 +1767,15 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) denali->nand.ecc.write_oob = denali_write_oob; denali->nand.erase_cmd = denali_erase; - if (nand_scan_tail(&denali->mtd)) - { + if (nand_scan_tail(&denali->mtd)) { ret = -ENXIO; goto failed_nand; } ret = add_mtd_device(&denali->mtd); if (ret) { - printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret); + printk(KERN_ERR "Spectra: Failed to register" + " MTD device: %d\n", ret); goto failed_nand; } return 0; @@ -2079,7 +1788,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) failed_remap_csr: pci_release_regions(dev); failed_req_csr: - pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, + pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, PCI_DMA_BIDIRECTIONAL); failed_enable: kfree(denali); @@ -2103,7 +1812,7 @@ static void denali_pci_remove(struct pci_dev *dev) iounmap(denali->flash_mem); pci_release_regions(dev); pci_disable_device(dev); - pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, + pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, PCI_DMA_BIDIRECTIONAL); pci_set_drvdata(dev, NULL); kfree(denali); @@ -2120,7 +1829,8 @@ static struct pci_driver denali_pci_driver = { static int __devinit denali_init(void) { - printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__); + printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", + __DATE__, __TIME__); return pci_register_driver(&denali_pci_driver); } diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h index 422a29ab2f60..b680474e6333 100644 --- a/drivers/mtd/nand/denali.h +++ b/drivers/mtd/nand/denali.h @@ -17,7 +17,7 @@ * */ -#include <linux/mtd/nand.h> +#include <linux/mtd/nand.h> #define DEVICE_RESET 0x0 #define DEVICE_RESET__BANK0 0x0001 @@ -29,7 +29,7 @@ #define TRANSFER_SPARE_REG__FLAG 0x0001 #define LOAD_WAIT_CNT 0x20 -#define LOAD_WAIT_CNT__VALUE 0xffff +#define LOAD_WAIT_CNT__VALUE 0xffff #define PROGRAM_WAIT_CNT 0x30 #define PROGRAM_WAIT_CNT__VALUE 0xffff @@ -83,7 +83,7 @@ #define RE_2_WE 0x120 #define RE_2_WE__VALUE 0x003f -#define ACC_CLKS 0x130 +#define ACC_CLKS 0x130 #define ACC_CLKS__VALUE 0x000f #define NUMBER_OF_PLANES 0x140 @@ -140,7 +140,7 @@ #define DEVICES_CONNECTED 0x250 #define DEVICES_CONNECTED__VALUE 0x0007 -#define DIE_MASK 0x260 +#define DIE_MASK 0x260 #define DIE_MASK__VALUE 0x00ff #define FIRST_BLOCK_OF_NEXT_PLANE 0x270 @@ -152,7 +152,7 @@ #define RE_2_RE 0x290 #define RE_2_RE__VALUE 0x003f -#define MANUFACTURER_ID 0x300 +#define MANUFACTURER_ID 0x300 #define MANUFACTURER_ID__VALUE 0x00ff #define DEVICE_ID 0x310 @@ -173,13 +173,13 @@ #define LOGICAL_PAGE_SPARE_SIZE 0x360 #define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff -#define REVISION 0x370 +#define REVISION 0x370 #define REVISION__VALUE 0xffff #define ONFI_DEVICE_FEATURES 0x380 #define ONFI_DEVICE_FEATURES__VALUE 0x003f -#define ONFI_OPTIONAL_COMMANDS 0x390 +#define ONFI_OPTIONAL_COMMANDS 0x390 #define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f #define ONFI_TIMING_MODE 0x3a0 @@ -201,12 +201,12 @@ #define FEATURES 0x3f0 #define FEATURES__N_BANKS 0x0003 #define FEATURES__ECC_MAX_ERR 0x003c -#define FEATURES__DMA 0x0040 +#define FEATURES__DMA 0x0040 #define FEATURES__CMD_DMA 0x0080 #define FEATURES__PARTITION 0x0100 #define FEATURES__XDMA_SIDEBAND 0x0200 #define FEATURES__GPREG 0x0400 -#define FEATURES__INDEX_ADDR 0x0800 +#define FEATURES__INDEX_ADDR 0x0800 #define TRANSFER_MODE 0x400 #define TRANSFER_MODE__VALUE 0x0003 @@ -235,12 +235,12 @@ #define INTR_EN0__DMA_CMD_COMP 0x0004 #define INTR_EN0__TIME_OUT 0x0008 #define INTR_EN0__PROGRAM_FAIL 0x0010 -#define INTR_EN0__ERASE_FAIL 0x0020 +#define INTR_EN0__ERASE_FAIL 0x0020 #define INTR_EN0__LOAD_COMP 0x0040 #define INTR_EN0__PROGRAM_COMP 0x0080 -#define INTR_EN0__ERASE_COMP 0x0100 +#define INTR_EN0__ERASE_COMP 0x0100 #define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200 -#define INTR_EN0__LOCKED_BLK 0x0400 +#define INTR_EN0__LOCKED_BLK 0x0400 #define INTR_EN0__UNSUP_CMD 0x0800 #define INTR_EN0__INT_ACT 0x1000 #define INTR_EN0__RST_COMP 0x2000 @@ -253,7 +253,7 @@ #define ERR_PAGE_ADDR0 0x440 #define ERR_PAGE_ADDR0__VALUE 0xffff -#define ERR_BLOCK_ADDR0 0x450 +#define ERR_BLOCK_ADDR0 0x450 #define ERR_BLOCK_ADDR0__VALUE 0xffff #define INTR_STATUS1 0x460 @@ -280,12 +280,12 @@ #define INTR_EN1__DMA_CMD_COMP 0x0004 #define INTR_EN1__TIME_OUT 0x0008 #define INTR_EN1__PROGRAM_FAIL 0x0010 -#define INTR_EN1__ERASE_FAIL 0x0020 +#define INTR_EN1__ERASE_FAIL 0x0020 #define INTR_EN1__LOAD_COMP 0x0040 #define INTR_EN1__PROGRAM_COMP 0x0080 -#define INTR_EN1__ERASE_COMP 0x0100 +#define INTR_EN1__ERASE_COMP 0x0100 #define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200 -#define INTR_EN1__LOCKED_BLK 0x0400 +#define INTR_EN1__LOCKED_BLK 0x0400 #define INTR_EN1__UNSUP_CMD 0x0800 #define INTR_EN1__INT_ACT 0x1000 #define INTR_EN1__RST_COMP 0x2000 @@ -298,7 +298,7 @@ #define ERR_PAGE_ADDR1 0x490 #define ERR_PAGE_ADDR1__VALUE 0xffff -#define ERR_BLOCK_ADDR1 0x4a0 +#define ERR_BLOCK_ADDR1 0x4a0 #define ERR_BLOCK_ADDR1__VALUE 0xffff #define INTR_STATUS2 0x4b0 @@ -325,12 +325,12 @@ #define INTR_EN2__DMA_CMD_COMP 0x0004 #define INTR_EN2__TIME_OUT 0x0008 #define INTR_EN2__PROGRAM_FAIL 0x0010 -#define INTR_EN2__ERASE_FAIL 0x0020 +#define INTR_EN2__ERASE_FAIL 0x0020 #define INTR_EN2__LOAD_COMP 0x0040 #define INTR_EN2__PROGRAM_COMP 0x0080 -#define INTR_EN2__ERASE_COMP 0x0100 +#define INTR_EN2__ERASE_COMP 0x0100 #define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200 -#define INTR_EN2__LOCKED_BLK 0x0400 +#define INTR_EN2__LOCKED_BLK 0x0400 #define INTR_EN2__UNSUP_CMD 0x0800 #define INTR_EN2__INT_ACT 0x1000 #define INTR_EN2__RST_COMP 0x2000 @@ -343,7 +343,7 @@ #define ERR_PAGE_ADDR2 0x4e0 #define ERR_PAGE_ADDR2__VALUE 0xffff -#define ERR_BLOCK_ADDR2 0x4f0 +#define ERR_BLOCK_ADDR2 0x4f0 #define ERR_BLOCK_ADDR2__VALUE 0xffff #define INTR_STATUS3 0x500 @@ -370,12 +370,12 @@ #define INTR_EN3__DMA_CMD_COMP 0x0004 #define INTR_EN3__TIME_OUT 0x0008 #define INTR_EN3__PROGRAM_FAIL 0x0010 -#define INTR_EN3__ERASE_FAIL 0x0020 +#define INTR_EN3__ERASE_FAIL 0x0020 #define INTR_EN3__LOAD_COMP 0x0040 #define INTR_EN3__PROGRAM_COMP 0x0080 -#define INTR_EN3__ERASE_COMP 0x0100 +#define INTR_EN3__ERASE_COMP 0x0100 #define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200 -#define INTR_EN3__LOCKED_BLK 0x0400 +#define INTR_EN3__LOCKED_BLK 0x0400 #define INTR_EN3__UNSUP_CMD 0x0800 #define INTR_EN3__INT_ACT 0x1000 #define INTR_EN3__RST_COMP 0x2000 @@ -388,7 +388,7 @@ #define ERR_PAGE_ADDR3 0x530 #define ERR_PAGE_ADDR3__VALUE 0xffff -#define ERR_BLOCK_ADDR3 0x540 +#define ERR_BLOCK_ADDR3 0x540 #define ERR_BLOCK_ADDR3__VALUE 0xffff #define DATA_INTR 0x550 @@ -412,9 +412,9 @@ #define GPREG_3__VALUE 0xffff #define ECC_THRESHOLD 0x600 -#define ECC_THRESHOLD__VALUE 0x03ff +#define ECC_THRESHOLD__VALUE 0x03ff -#define ECC_ERROR_BLOCK_ADDRESS 0x610 +#define ECC_ERROR_BLOCK_ADDRESS 0x610 #define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff #define ECC_ERROR_PAGE_ADDRESS 0x620 @@ -466,7 +466,7 @@ #define CHNL_ACTIVE__CHANNEL3 0x0008 #define ACTIVE_SRC_ID 0x800 -#define ACTIVE_SRC_ID__VALUE 0x00ff +#define ACTIVE_SRC_ID__VALUE 0x00ff #define PTN_INTR 0x810 #define PTN_INTR__CONFIG_ERROR 0x0001 @@ -485,7 +485,7 @@ #define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 #define PERM_SRC_ID_0 0x830 -#define PERM_SRC_ID_0__SRCID 0x00ff +#define PERM_SRC_ID_0__SRCID 0x00ff #define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800 #define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000 #define PERM_SRC_ID_0__READ_ACTIVE 0x4000 @@ -502,7 +502,7 @@ #define MIN_MAX_BANK_0__MAX_VALUE 0x000c #define PERM_SRC_ID_1 0x870 -#define PERM_SRC_ID_1__SRCID 0x00ff +#define PERM_SRC_ID_1__SRCID 0x00ff #define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800 #define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000 #define PERM_SRC_ID_1__READ_ACTIVE 0x4000 @@ -519,7 +519,7 @@ #define MIN_MAX_BANK_1__MAX_VALUE 0x000c #define PERM_SRC_ID_2 0x8b0 -#define PERM_SRC_ID_2__SRCID 0x00ff +#define PERM_SRC_ID_2__SRCID 0x00ff #define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800 #define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000 #define PERM_SRC_ID_2__READ_ACTIVE 0x4000 @@ -536,7 +536,7 @@ #define MIN_MAX_BANK_2__MAX_VALUE 0x000c #define PERM_SRC_ID_3 0x8f0 -#define PERM_SRC_ID_3__SRCID 0x00ff +#define PERM_SRC_ID_3__SRCID 0x00ff #define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800 #define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000 #define PERM_SRC_ID_3__READ_ACTIVE 0x4000 @@ -553,7 +553,7 @@ #define MIN_MAX_BANK_3__MAX_VALUE 0x000c #define PERM_SRC_ID_4 0x930 -#define PERM_SRC_ID_4__SRCID 0x00ff +#define PERM_SRC_ID_4__SRCID 0x00ff #define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800 #define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000 #define PERM_SRC_ID_4__READ_ACTIVE 0x4000 @@ -570,7 +570,7 @@ #define MIN_MAX_BANK_4__MAX_VALUE 0x000c #define PERM_SRC_ID_5 0x970 -#define PERM_SRC_ID_5__SRCID 0x00ff +#define PERM_SRC_ID_5__SRCID 0x00ff #define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800 #define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000 #define PERM_SRC_ID_5__READ_ACTIVE 0x4000 @@ -587,7 +587,7 @@ #define MIN_MAX_BANK_5__MAX_VALUE 0x000c #define PERM_SRC_ID_6 0x9b0 -#define PERM_SRC_ID_6__SRCID 0x00ff +#define PERM_SRC_ID_6__SRCID 0x00ff #define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800 #define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000 #define PERM_SRC_ID_6__READ_ACTIVE 0x4000 @@ -604,7 +604,7 @@ #define MIN_MAX_BANK_6__MAX_VALUE 0x000c #define PERM_SRC_ID_7 0x9f0 -#define PERM_SRC_ID_7__SRCID 0x00ff +#define PERM_SRC_ID_7__SRCID 0x00ff #define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800 #define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000 #define PERM_SRC_ID_7__READ_ACTIVE 0x4000 @@ -620,47 +620,6 @@ #define MIN_MAX_BANK_7__MIN_VALUE 0x0003 #define MIN_MAX_BANK_7__MAX_VALUE 0x000c -/* flash.h */ -struct device_info_tag { - uint16_t wDeviceMaker; - uint16_t wDeviceID; - uint8_t bDeviceParam0; - uint8_t bDeviceParam1; - uint8_t bDeviceParam2; - uint32_t wDeviceType; - uint32_t wSpectraStartBlock; - uint32_t wSpectraEndBlock; - uint32_t wTotalBlocks; - uint16_t wPagesPerBlock; - uint16_t wPageSize; - uint16_t wPageDataSize; - uint16_t wPageSpareSize; - uint16_t wNumPageSpareFlag; - uint16_t wECCBytesPerSector; - uint32_t wBlockSize; - uint32_t wBlockDataSize; - uint32_t wDataBlockNum; - uint8_t bPlaneNum; - uint16_t wDeviceMainAreaSize; - uint16_t wDeviceSpareAreaSize; - uint16_t wDevicesConnected; - uint16_t wDeviceWidth; - uint16_t wHWRevision; - uint16_t wHWFeatures; - - uint16_t wONFIDevFeatures; - uint16_t wONFIOptCommands; - uint16_t wONFITimingMode; - uint16_t wONFIPgmCacheTimingMode; - - uint16_t MLCDevice; - uint16_t wSpareSkipBytes; - - uint8_t nBitsInPageNumber; - uint8_t nBitsInPageDataSize; - uint8_t nBitsInBlockDataSize; -}; - /* ffsdefs.h */ #define CLEAR 0 /*use this to clear a field instead of "fail"*/ #define SET 1 /*use this to set a field instead of "pass"*/ @@ -684,11 +643,11 @@ struct device_info_tag { #define NAND_DBG_TRACE 3 #ifdef VERBOSE -#define nand_dbg_print(level, args...) \ - do { \ - if (level <= nand_debug_level) \ - printk(KERN_ALERT args); \ - } while (0) +#define nand_dbg_print(level, args...) \ + do { \ + if (level <= nand_debug_level) \ + printk(KERN_ALERT args); \ + } while (0) #else #define nand_dbg_print(level, args...) #endif @@ -772,10 +731,9 @@ struct device_info_tag { #define ECC_SECTOR_SIZE 512 #define LLD_MAX_FLASH_BANKS 4 -#define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE +#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) -struct nand_buf -{ +struct nand_buf { int head; int tail; uint8_t buf[DENALI_BUF_SIZE]; @@ -788,7 +746,6 @@ struct nand_buf struct denali_nand_info { struct mtd_info mtd; struct nand_chip nand; - struct device_info_tag dev_info; int flash_bank; /* currently selected chip */ int status; int platform; @@ -806,11 +763,12 @@ struct denali_nand_info { uint32_t irq_status; int irq_debug_array[32]; int idx; -}; -static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali); -static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali); -static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE); + uint32_t devnum; /* represent how many nands connected */ + uint32_t fwblks; /* represent how many blocks FW used */ + uint32_t totalblks; + uint32_t blksperchip; + uint32_t bbtskipbytes; +}; #endif /*_LLD_NAND_*/ - diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index 47067bc98248..b7f8de7b2780 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c @@ -29,7 +29,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/doc2000.h> -#include <linux/mtd/compatmac.h> #include <linux/mtd/partitions.h> #include <linux/mtd/inftl.h> @@ -146,6 +145,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc) uint8_t parity; uint16_t ds[4], s[5], tmp, errval[8], syn[4]; + memset(syn, 0, sizeof(syn)); /* Convert the ecc bytes into words */ ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8); ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6); @@ -169,9 +169,9 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc) s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)]; } - /* Calc s[i] = s[i] / alpha^(v + i) */ + /* Calc syn[i] = s[i] / alpha^(v + i) */ for (i = 0; i < NROOTS; i++) { - if (syn[i]) + if (s[i]) syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i)); } /* Call the decoder library */ diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 0d76b169482f..fcf8ceb277d4 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -39,60 +39,96 @@ #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) +#define nfc_is_v3_2() cpu_is_mx51() +#define nfc_is_v3() nfc_is_v3_2() /* Addresses for NFC registers */ -#define NFC_BUF_SIZE 0xE00 -#define NFC_BUF_ADDR 0xE04 -#define NFC_FLASH_ADDR 0xE06 -#define NFC_FLASH_CMD 0xE08 -#define NFC_CONFIG 0xE0A -#define NFC_ECC_STATUS_RESULT 0xE0C -#define NFC_RSLTMAIN_AREA 0xE0E -#define NFC_RSLTSPARE_AREA 0xE10 -#define NFC_WRPROT 0xE12 -#define NFC_V1_UNLOCKSTART_BLKADDR 0xe14 -#define NFC_V1_UNLOCKEND_BLKADDR 0xe16 -#define NFC_V21_UNLOCKSTART_BLKADDR 0xe20 -#define NFC_V21_UNLOCKEND_BLKADDR 0xe22 -#define NFC_NF_WRPRST 0xE18 -#define NFC_CONFIG1 0xE1A -#define NFC_CONFIG2 0xE1C - -/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register - * for Command operation */ -#define NFC_CMD 0x1 - -/* Set INT to 0, FADD to 1, rest to 0 in NFC_CONFIG2 Register - * for Address operation */ -#define NFC_ADDR 0x2 - -/* Set INT to 0, FDI to 1, rest to 0 in NFC_CONFIG2 Register - * for Input operation */ -#define NFC_INPUT 0x4 - -/* Set INT to 0, FDO to 001, rest to 0 in NFC_CONFIG2 Register - * for Data Output operation */ -#define NFC_OUTPUT 0x8 - -/* Set INT to 0, FD0 to 010, rest to 0 in NFC_CONFIG2 Register - * for Read ID operation */ -#define NFC_ID 0x10 - -/* Set INT to 0, FDO to 100, rest to 0 in NFC_CONFIG2 Register - * for Read Status operation */ -#define NFC_STATUS 0x20 - -/* Set INT to 1, rest to 0 in NFC_CONFIG2 Register for Read - * Status operation */ -#define NFC_INT 0x8000 - -#define NFC_SP_EN (1 << 2) -#define NFC_ECC_EN (1 << 3) -#define NFC_INT_MSK (1 << 4) -#define NFC_BIG (1 << 5) -#define NFC_RST (1 << 6) -#define NFC_CE (1 << 7) -#define NFC_ONE_CYCLE (1 << 8) +#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00) +#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04) +#define NFC_V1_V2_FLASH_ADDR (host->regs + 0x06) +#define NFC_V1_V2_FLASH_CMD (host->regs + 0x08) +#define NFC_V1_V2_CONFIG (host->regs + 0x0a) +#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c) +#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e) +#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10) +#define NFC_V1_V2_WRPROT (host->regs + 0x12) +#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) +#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) +#define NFC_V21_UNLOCKSTART_BLKADDR (host->regs + 0x20) +#define NFC_V21_UNLOCKEND_BLKADDR (host->regs + 0x22) +#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18) +#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a) +#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c) + +#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0) +#define NFC_V1_V2_CONFIG1_SP_EN (1 << 2) +#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3) +#define NFC_V1_V2_CONFIG1_INT_MSK (1 << 4) +#define NFC_V1_V2_CONFIG1_BIG (1 << 5) +#define NFC_V1_V2_CONFIG1_RST (1 << 6) +#define NFC_V1_V2_CONFIG1_CE (1 << 7) +#define NFC_V1_V2_CONFIG1_ONE_CYCLE (1 << 8) + +#define NFC_V1_V2_CONFIG2_INT (1 << 15) + +/* + * Operation modes for the NFC. Valid for v1, v2 and v3 + * type controllers. + */ +#define NFC_CMD (1 << 0) +#define NFC_ADDR (1 << 1) +#define NFC_INPUT (1 << 2) +#define NFC_OUTPUT (1 << 3) +#define NFC_ID (1 << 4) +#define NFC_STATUS (1 << 5) + +#define NFC_V3_FLASH_CMD (host->regs_axi + 0x00) +#define NFC_V3_FLASH_ADDR0 (host->regs_axi + 0x04) + +#define NFC_V3_CONFIG1 (host->regs_axi + 0x34) +#define NFC_V3_CONFIG1_SP_EN (1 << 0) +#define NFC_V3_CONFIG1_RBA(x) (((x) & 0x7 ) << 4) + +#define NFC_V3_ECC_STATUS_RESULT (host->regs_axi + 0x38) + +#define NFC_V3_LAUNCH (host->regs_axi + 0x40) + +#define NFC_V3_WRPROT (host->regs_ip + 0x0) +#define NFC_V3_WRPROT_LOCK_TIGHT (1 << 0) +#define NFC_V3_WRPROT_LOCK (1 << 1) +#define NFC_V3_WRPROT_UNLOCK (1 << 2) +#define NFC_V3_WRPROT_BLS_UNLOCK (2 << 6) + +#define NFC_V3_WRPROT_UNLOCK_BLK_ADD0 (host->regs_ip + 0x04) + +#define NFC_V3_CONFIG2 (host->regs_ip + 0x24) +#define NFC_V3_CONFIG2_PS_512 (0 << 0) +#define NFC_V3_CONFIG2_PS_2048 (1 << 0) +#define NFC_V3_CONFIG2_PS_4096 (2 << 0) +#define NFC_V3_CONFIG2_ONE_CYCLE (1 << 2) +#define NFC_V3_CONFIG2_ECC_EN (1 << 3) +#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4) +#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5) +#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6) +#define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7) +#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12) +#define NFC_V3_CONFIG2_INT_MSK (1 << 15) +#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24) +#define NFC_V3_CONFIG2_SPAS(x) (((x) & 0xff) << 16) + +#define NFC_V3_CONFIG3 (host->regs_ip + 0x28) +#define NFC_V3_CONFIG3_ADD_OP(x) (((x) & 0x3) << 0) +#define NFC_V3_CONFIG3_FW8 (1 << 3) +#define NFC_V3_CONFIG3_SBB(x) (((x) & 0x7) << 8) +#define NFC_V3_CONFIG3_NUM_OF_DEVICES(x) (((x) & 0x7) << 12) +#define NFC_V3_CONFIG3_RBB_MODE (1 << 15) +#define NFC_V3_CONFIG3_NO_SDMA (1 << 20) + +#define NFC_V3_IPC (host->regs_ip + 0x2C) +#define NFC_V3_IPC_CREQ (1 << 0) +#define NFC_V3_IPC_INT (1 << 31) + +#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34) struct mxc_nand_host { struct mtd_info mtd; @@ -102,20 +138,30 @@ struct mxc_nand_host { void *spare0; void *main_area0; - void *main_area1; void __iomem *base; void __iomem *regs; + void __iomem *regs_axi; + void __iomem *regs_ip; int status_request; struct clk *clk; int clk_act; int irq; + int eccsize; wait_queue_head_t irq_waitq; uint8_t *data_buf; unsigned int buf_start; int spare_len; + + void (*preset)(struct mtd_info *); + void (*send_cmd)(struct mxc_nand_host *, uint16_t, int); + void (*send_addr)(struct mxc_nand_host *, uint16_t, int); + void (*send_page)(struct mtd_info *, unsigned int); + void (*send_read_id)(struct mxc_nand_host *); + uint16_t (*get_dev_status)(struct mxc_nand_host *); + int (*check_int)(struct mxc_nand_host *); }; /* OOB placement block for use with hardware ecc generation */ @@ -175,34 +221,52 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static int check_int_v3(struct mxc_nand_host *host) +{ + uint32_t tmp; + + tmp = readl(NFC_V3_IPC); + if (!(tmp & NFC_V3_IPC_INT)) + return 0; + + tmp &= ~NFC_V3_IPC_INT; + writel(tmp, NFC_V3_IPC); + + return 1; +} + +static int check_int_v1_v2(struct mxc_nand_host *host) +{ + uint32_t tmp; + + tmp = readw(NFC_V1_V2_CONFIG2); + if (!(tmp & NFC_V1_V2_CONFIG2_INT)) + return 0; + + writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); + + return 1; +} + /* This function polls the NANDFC to wait for the basic operation to * complete by checking the INT bit of config2 register. */ static void wait_op_done(struct mxc_nand_host *host, int useirq) { - uint16_t tmp; int max_retries = 8000; if (useirq) { - if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) { + if (!host->check_int(host)) { enable_irq(host->irq); - wait_event(host->irq_waitq, - readw(host->regs + NFC_CONFIG2) & NFC_INT); - - tmp = readw(host->regs + NFC_CONFIG2); - tmp &= ~NFC_INT; - writew(tmp, host->regs + NFC_CONFIG2); + wait_event(host->irq_waitq, host->check_int(host)); } } else { while (max_retries-- > 0) { - if (readw(host->regs + NFC_CONFIG2) & NFC_INT) { - tmp = readw(host->regs + NFC_CONFIG2); - tmp &= ~NFC_INT; - writew(tmp, host->regs + NFC_CONFIG2); + if (host->check_int(host)) break; - } + udelay(1); } if (max_retries < 0) @@ -211,21 +275,33 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq) } } +static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq) +{ + /* fill command */ + writel(cmd, NFC_V3_FLASH_CMD); + + /* send out command */ + writel(NFC_CMD, NFC_V3_LAUNCH); + + /* Wait for operation to complete */ + wait_op_done(host, useirq); +} + /* This function issues the specified command to the NAND device and * waits for completion. */ -static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq) +static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) { DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); - writew(cmd, host->regs + NFC_FLASH_CMD); - writew(NFC_CMD, host->regs + NFC_CONFIG2); + writew(cmd, NFC_V1_V2_FLASH_CMD); + writew(NFC_CMD, NFC_V1_V2_CONFIG2); if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) { int max_retries = 100; /* Reset completion is indicated by NFC_CONFIG2 */ /* being set to 0 */ while (max_retries-- > 0) { - if (readw(host->regs + NFC_CONFIG2) == 0) { + if (readw(NFC_V1_V2_CONFIG2) == 0) { break; } udelay(1); @@ -239,21 +315,48 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq) } } +static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast) +{ + /* fill address */ + writel(addr, NFC_V3_FLASH_ADDR0); + + /* send out address */ + writel(NFC_ADDR, NFC_V3_LAUNCH); + + wait_op_done(host, 0); +} + /* This function sends an address (or partial address) to the * NAND device. The address is used to select the source/destination for * a NAND command. */ -static void send_addr(struct mxc_nand_host *host, uint16_t addr, int islast) +static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) { DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast); - writew(addr, host->regs + NFC_FLASH_ADDR); - writew(NFC_ADDR, host->regs + NFC_CONFIG2); + writew(addr, NFC_V1_V2_FLASH_ADDR); + writew(NFC_ADDR, NFC_V1_V2_CONFIG2); /* Wait for operation to complete */ wait_op_done(host, islast); } -static void send_page(struct mtd_info *mtd, unsigned int ops) +static void send_page_v3(struct mtd_info *mtd, unsigned int ops) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + uint32_t tmp; + + tmp = readl(NFC_V3_CONFIG1); + tmp &= ~(7 << 4); + writel(tmp, NFC_V3_CONFIG1); + + /* transfer data from NFC ram to nand */ + writel(ops, NFC_V3_LAUNCH); + + wait_op_done(host, false); +} + +static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops) { struct nand_chip *nand_chip = mtd->priv; struct mxc_nand_host *host = nand_chip->priv; @@ -267,24 +370,34 @@ static void send_page(struct mtd_info *mtd, unsigned int ops) for (i = 0; i < bufs; i++) { /* NANDFC buffer 0 is used for page read/write */ - writew(i, host->regs + NFC_BUF_ADDR); + writew(i, NFC_V1_V2_BUF_ADDR); - writew(ops, host->regs + NFC_CONFIG2); + writew(ops, NFC_V1_V2_CONFIG2); /* Wait for operation to complete */ wait_op_done(host, true); } } +static void send_read_id_v3(struct mxc_nand_host *host) +{ + /* Read ID into main buffer */ + writel(NFC_ID, NFC_V3_LAUNCH); + + wait_op_done(host, true); + + memcpy(host->data_buf, host->main_area0, 16); +} + /* Request the NANDFC to perform a read of the NAND device ID. */ -static void send_read_id(struct mxc_nand_host *host) +static void send_read_id_v1_v2(struct mxc_nand_host *host) { struct nand_chip *this = &host->nand; /* NANDFC buffer 0 is used for device ID output */ - writew(0x0, host->regs + NFC_BUF_ADDR); + writew(0x0, NFC_V1_V2_BUF_ADDR); - writew(NFC_ID, host->regs + NFC_CONFIG2); + writew(NFC_ID, NFC_V1_V2_CONFIG2); /* Wait for operation to complete */ wait_op_done(host, true); @@ -301,29 +414,36 @@ static void send_read_id(struct mxc_nand_host *host) memcpy(host->data_buf, host->main_area0, 16); } +static uint16_t get_dev_status_v3(struct mxc_nand_host *host) +{ + writew(NFC_STATUS, NFC_V3_LAUNCH); + wait_op_done(host, true); + + return readl(NFC_V3_CONFIG1) >> 16; +} + /* This function requests the NANDFC to perform a read of the * NAND device status and returns the current status. */ -static uint16_t get_dev_status(struct mxc_nand_host *host) +static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host) { - void __iomem *main_buf = host->main_area1; + void __iomem *main_buf = host->main_area0; uint32_t store; uint16_t ret; - /* Issue status request to NAND device */ - /* store the main area1 first word, later do recovery */ - store = readl(main_buf); - /* NANDFC buffer 1 is used for device status to prevent - * corruption of read/write buffer on status requests. */ - writew(1, host->regs + NFC_BUF_ADDR); + writew(0x0, NFC_V1_V2_BUF_ADDR); - writew(NFC_STATUS, host->regs + NFC_CONFIG2); + /* + * The device status is stored in main_area0. To + * prevent corruption of the buffer save the value + * and restore it afterwards. + */ + store = readl(main_buf); - /* Wait for operation to complete */ + writew(NFC_STATUS, NFC_V1_V2_CONFIG2); wait_op_done(host, true); - /* Status is placed in first word of main buffer */ - /* get status, then recovery area 1 data */ ret = readw(main_buf); + writel(store, main_buf); return ret; @@ -347,7 +467,7 @@ static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode) */ } -static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat, +static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { struct nand_chip *nand_chip = mtd->priv; @@ -358,7 +478,7 @@ static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat, * additional correction. 2-Bit errors cannot be corrected by * HW ECC, so we need to return failure */ - uint16_t ecc_status = readw(host->regs + NFC_ECC_STATUS_RESULT); + uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT); if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { DEBUG(MTD_DEBUG_LEVEL0, @@ -369,6 +489,43 @@ static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat, return 0; } +static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat, + u_char *read_ecc, u_char *calc_ecc) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + u32 ecc_stat, err; + int no_subpages = 1; + int ret = 0; + u8 ecc_bit_mask, err_limit; + + ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf; + err_limit = (host->eccsize == 4) ? 0x4 : 0x8; + + no_subpages = mtd->writesize >> 9; + + if (nfc_is_v21()) + ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT); + else + ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT); + + do { + err = ecc_stat & ecc_bit_mask; + if (err > err_limit) { + printk(KERN_WARNING "UnCorrectable RS-ECC Error\n"); + return -1; + } else { + ret += err; + } + ecc_stat >>= 4; + } while (--no_subpages); + + mtd->ecc_stats.corrected += ret; + pr_debug("%d Symbol Correctable RS-ECC Error\n", ret); + + return ret; +} + static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { @@ -383,7 +540,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd) /* Check for status request */ if (host->status_request) - return get_dev_status(host) & 0xFF; + return host->get_dev_status(host) & 0xFF; ret = *(uint8_t *)(host->data_buf + host->buf_start); host->buf_start++; @@ -519,71 +676,163 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) * we will used the saved column address to index into * the full page. */ - send_addr(host, 0, page_addr == -1); + host->send_addr(host, 0, page_addr == -1); if (mtd->writesize > 512) /* another col addr cycle for 2k page */ - send_addr(host, 0, false); + host->send_addr(host, 0, false); } /* Write out page address, if necessary */ if (page_addr != -1) { /* paddr_0 - p_addr_7 */ - send_addr(host, (page_addr & 0xff), false); + host->send_addr(host, (page_addr & 0xff), false); if (mtd->writesize > 512) { if (mtd->size >= 0x10000000) { /* paddr_8 - paddr_15 */ - send_addr(host, (page_addr >> 8) & 0xff, false); - send_addr(host, (page_addr >> 16) & 0xff, true); + host->send_addr(host, (page_addr >> 8) & 0xff, false); + host->send_addr(host, (page_addr >> 16) & 0xff, true); } else /* paddr_8 - paddr_15 */ - send_addr(host, (page_addr >> 8) & 0xff, true); + host->send_addr(host, (page_addr >> 8) & 0xff, true); } else { /* One more address cycle for higher density devices */ if (mtd->size >= 0x4000000) { /* paddr_8 - paddr_15 */ - send_addr(host, (page_addr >> 8) & 0xff, false); - send_addr(host, (page_addr >> 16) & 0xff, true); + host->send_addr(host, (page_addr >> 8) & 0xff, false); + host->send_addr(host, (page_addr >> 16) & 0xff, true); } else /* paddr_8 - paddr_15 */ - send_addr(host, (page_addr >> 8) & 0xff, true); + host->send_addr(host, (page_addr >> 8) & 0xff, true); } } } -static void preset(struct mtd_info *mtd) +/* + * v2 and v3 type controllers can do 4bit or 8bit ecc depending + * on how much oob the nand chip has. For 8bit ecc we need at least + * 26 bytes of oob data per 512 byte block. + */ +static int get_eccsize(struct mtd_info *mtd) +{ + int oobbytes_per_512 = 0; + + oobbytes_per_512 = mtd->oobsize * 512 / mtd->writesize; + + if (oobbytes_per_512 < 26) + return 4; + else + return 8; +} + +static void preset_v1_v2(struct mtd_info *mtd) { struct nand_chip *nand_chip = mtd->priv; struct mxc_nand_host *host = nand_chip->priv; uint16_t tmp; /* enable interrupt, disable spare enable */ - tmp = readw(host->regs + NFC_CONFIG1); - tmp &= ~NFC_INT_MSK; - tmp &= ~NFC_SP_EN; + tmp = readw(NFC_V1_V2_CONFIG1); + tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK; + tmp &= ~NFC_V1_V2_CONFIG1_SP_EN; if (nand_chip->ecc.mode == NAND_ECC_HW) { - tmp |= NFC_ECC_EN; + tmp |= NFC_V1_V2_CONFIG1_ECC_EN; + } else { + tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN; + } + + if (nfc_is_v21() && mtd->writesize) { + host->eccsize = get_eccsize(mtd); + if (host->eccsize == 4) + tmp |= NFC_V2_CONFIG1_ECC_MODE_4; } else { - tmp &= ~NFC_ECC_EN; + host->eccsize = 1; } - writew(tmp, host->regs + NFC_CONFIG1); + + writew(tmp, NFC_V1_V2_CONFIG1); /* preset operation */ /* Unlock the internal RAM Buffer */ - writew(0x2, host->regs + NFC_CONFIG); + writew(0x2, NFC_V1_V2_CONFIG); /* Blocks to be unlocked */ if (nfc_is_v21()) { - writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR); - writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR); + writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR); + writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR); } else if (nfc_is_v1()) { - writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR); - writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR); + writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); + writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR); } else BUG(); /* Unlock Block Command for given address range */ - writew(0x4, host->regs + NFC_WRPROT); + writew(0x4, NFC_V1_V2_WRPROT); +} + +static void preset_v3(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + struct mxc_nand_host *host = chip->priv; + uint32_t config2, config3; + int i, addr_phases; + + writel(NFC_V3_CONFIG1_RBA(0), NFC_V3_CONFIG1); + writel(NFC_V3_IPC_CREQ, NFC_V3_IPC); + + /* Unlock the internal RAM Buffer */ + writel(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK, + NFC_V3_WRPROT); + + /* Blocks to be unlocked */ + for (i = 0; i < NAND_MAX_CHIPS; i++) + writel(0x0 | (0xffff << 16), + NFC_V3_WRPROT_UNLOCK_BLK_ADD0 + (i << 2)); + + writel(0, NFC_V3_IPC); + + config2 = NFC_V3_CONFIG2_ONE_CYCLE | + NFC_V3_CONFIG2_2CMD_PHASES | + NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) | + NFC_V3_CONFIG2_ST_CMD(0x70) | + NFC_V3_CONFIG2_NUM_ADDR_PHASE0; + + if (chip->ecc.mode == NAND_ECC_HW) + config2 |= NFC_V3_CONFIG2_ECC_EN; + + addr_phases = fls(chip->pagemask) >> 3; + + if (mtd->writesize == 2048) { + config2 |= NFC_V3_CONFIG2_PS_2048; + config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases); + } else if (mtd->writesize == 4096) { + config2 |= NFC_V3_CONFIG2_PS_4096; + config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases); + } else { + config2 |= NFC_V3_CONFIG2_PS_512; + config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases - 1); + } + + if (mtd->writesize) { + config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6); + host->eccsize = get_eccsize(mtd); + if (host->eccsize == 8) + config2 |= NFC_V3_CONFIG2_ECC_MODE_8; + } + + writel(config2, NFC_V3_CONFIG2); + + config3 = NFC_V3_CONFIG3_NUM_OF_DEVICES(0) | + NFC_V3_CONFIG3_NO_SDMA | + NFC_V3_CONFIG3_RBB_MODE | + NFC_V3_CONFIG3_SBB(6) | /* Reset default */ + NFC_V3_CONFIG3_ADD_OP(0); + + if (!(chip->options & NAND_BUSWIDTH_16)) + config3 |= NFC_V3_CONFIG3_FW8; + + writel(config3, NFC_V3_CONFIG3); + + writel(0, NFC_V3_DELAY_LINE); } /* Used by the upper layer to write command to NAND Flash for @@ -604,15 +853,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, /* Command pre-processing step */ switch (command) { case NAND_CMD_RESET: - send_cmd(host, command, false); - preset(mtd); + host->preset(mtd); + host->send_cmd(host, command, false); break; case NAND_CMD_STATUS: host->buf_start = 0; host->status_request = true; - send_cmd(host, command, true); + host->send_cmd(host, command, true); mxc_do_addr_cycle(mtd, column, page_addr); break; @@ -625,13 +874,13 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, command = NAND_CMD_READ0; /* only READ0 is valid */ - send_cmd(host, command, false); + host->send_cmd(host, command, false); mxc_do_addr_cycle(mtd, column, page_addr); if (mtd->writesize > 512) - send_cmd(host, NAND_CMD_READSTART, true); + host->send_cmd(host, NAND_CMD_READSTART, true); - send_page(mtd, NFC_OUTPUT); + host->send_page(mtd, NFC_OUTPUT); memcpy(host->data_buf, host->main_area0, mtd->writesize); copy_spare(mtd, true); @@ -644,28 +893,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, host->buf_start = column; - send_cmd(host, command, false); + host->send_cmd(host, command, false); mxc_do_addr_cycle(mtd, column, page_addr); break; case NAND_CMD_PAGEPROG: memcpy(host->main_area0, host->data_buf, mtd->writesize); copy_spare(mtd, false); - send_page(mtd, NFC_INPUT); - send_cmd(host, command, true); + host->send_page(mtd, NFC_INPUT); + host->send_cmd(host, command, true); mxc_do_addr_cycle(mtd, column, page_addr); break; case NAND_CMD_READID: - send_cmd(host, command, true); + host->send_cmd(host, command, true); mxc_do_addr_cycle(mtd, column, page_addr); - send_read_id(host); + host->send_read_id(host); host->buf_start = column; break; case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: - send_cmd(host, command, false); + host->send_cmd(host, command, false); mxc_do_addr_cycle(mtd, column, page_addr); break; @@ -761,22 +1010,55 @@ static int __init mxcnd_probe(struct platform_device *pdev) } host->main_area0 = host->base; - host->main_area1 = host->base + 0x200; + + if (nfc_is_v1() || nfc_is_v21()) { + host->preset = preset_v1_v2; + host->send_cmd = send_cmd_v1_v2; + host->send_addr = send_addr_v1_v2; + host->send_page = send_page_v1_v2; + host->send_read_id = send_read_id_v1_v2; + host->get_dev_status = get_dev_status_v1_v2; + host->check_int = check_int_v1_v2; + } if (nfc_is_v21()) { - host->regs = host->base + 0x1000; + host->regs = host->base + 0x1e00; host->spare0 = host->base + 0x1000; host->spare_len = 64; oob_smallpage = &nandv2_hw_eccoob_smallpage; oob_largepage = &nandv2_hw_eccoob_largepage; this->ecc.bytes = 9; } else if (nfc_is_v1()) { - host->regs = host->base; + host->regs = host->base + 0xe00; host->spare0 = host->base + 0x800; host->spare_len = 16; oob_smallpage = &nandv1_hw_eccoob_smallpage; oob_largepage = &nandv1_hw_eccoob_largepage; this->ecc.bytes = 3; + host->eccsize = 1; + } else if (nfc_is_v3_2()) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + err = -ENODEV; + goto eirq; + } + host->regs_ip = ioremap(res->start, resource_size(res)); + if (!host->regs_ip) { + err = -ENOMEM; + goto eirq; + } + host->regs_axi = host->base + 0x1e00; + host->spare0 = host->base + 0x1000; + host->spare_len = 64; + host->preset = preset_v3; + host->send_cmd = send_cmd_v3; + host->send_addr = send_addr_v3; + host->send_page = send_page_v3; + host->send_read_id = send_read_id_v3; + host->check_int = check_int_v3; + host->get_dev_status = get_dev_status_v3; + oob_smallpage = &nandv2_hw_eccoob_smallpage; + oob_largepage = &nandv2_hw_eccoob_largepage; } else BUG(); @@ -786,7 +1068,10 @@ static int __init mxcnd_probe(struct platform_device *pdev) if (pdata->hw_ecc) { this->ecc.calculate = mxc_nand_calculate_ecc; this->ecc.hwctl = mxc_nand_enable_hwecc; - this->ecc.correct = mxc_nand_correct_data; + if (nfc_is_v1()) + this->ecc.correct = mxc_nand_correct_data_v1; + else + this->ecc.correct = mxc_nand_correct_data_v2_v3; this->ecc.mode = NAND_ECC_HW; } else { this->ecc.mode = NAND_ECC_SOFT; @@ -817,6 +1102,9 @@ static int __init mxcnd_probe(struct platform_device *pdev) goto escan; } + /* Call preset again, with correct writesize this time */ + host->preset(mtd); + if (mtd->writesize == 2048) this->ecc.layout = oob_largepage; @@ -848,6 +1136,8 @@ static int __init mxcnd_probe(struct platform_device *pdev) escan: free_irq(host->irq, host); eirq: + if (host->regs_ip) + iounmap(host->regs_ip); iounmap(host->base); eres: clk_put(host->clk); @@ -867,59 +1157,19 @@ static int __devexit mxcnd_remove(struct platform_device *pdev) nand_release(&host->mtd); free_irq(host->irq, host); + if (host->regs_ip) + iounmap(host->regs_ip); iounmap(host->base); kfree(host); return 0; } -#ifdef CONFIG_PM -static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct mtd_info *mtd = platform_get_drvdata(pdev); - struct nand_chip *nand_chip = mtd->priv; - struct mxc_nand_host *host = nand_chip->priv; - int ret = 0; - - DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); - - ret = mtd->suspend(mtd); - - /* - * nand_suspend locks the device for exclusive access, so - * the clock must already be off. - */ - BUG_ON(!ret && host->clk_act); - - return ret; -} - -static int mxcnd_resume(struct platform_device *pdev) -{ - struct mtd_info *mtd = platform_get_drvdata(pdev); - struct nand_chip *nand_chip = mtd->priv; - struct mxc_nand_host *host = nand_chip->priv; - int ret = 0; - - DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); - - mtd->resume(mtd); - - return ret; -} - -#else -# define mxcnd_suspend NULL -# define mxcnd_resume NULL -#endif /* CONFIG_PM */ - static struct platform_driver mxcnd_driver = { .driver = { .name = DRIVER_NAME, - }, + }, .remove = __devexit_p(mxcnd_remove), - .suspend = mxcnd_suspend, - .resume = mxcnd_resume, }; static int __init mxc_nd_init(void) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 4a7b86423ee9..16a1714df008 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -42,7 +42,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> -#include <linux/mtd/compatmac.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/leds.h> @@ -347,7 +346,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) struct nand_chip *chip = mtd->priv; u16 bad; - if (chip->options & NAND_BB_LAST_PAGE) + if (chip->options & NAND_BBT_SCANLASTPAGE) ofs += mtd->erasesize - mtd->writesize; page = (int)(ofs >> chip->page_shift) & chip->pagemask; @@ -397,9 +396,9 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) { struct nand_chip *chip = mtd->priv; uint8_t buf[2] = { 0, 0 }; - int block, ret; + int block, ret, i = 0; - if (chip->options & NAND_BB_LAST_PAGE) + if (chip->options & NAND_BBT_SCANLASTPAGE) ofs += mtd->erasesize - mtd->writesize; /* Get block number */ @@ -411,17 +410,31 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) if (chip->options & NAND_USE_FLASH_BBT) ret = nand_update_bbt(mtd, ofs); else { - /* We write two bytes, so we dont have to mess with 16 bit - * access - */ nand_get_device(chip, mtd, FL_WRITING); - ofs += mtd->oobsize; - chip->ops.len = chip->ops.ooblen = 2; - chip->ops.datbuf = NULL; - chip->ops.oobbuf = buf; - chip->ops.ooboffs = chip->badblockpos & ~0x01; - ret = nand_do_write_oob(mtd, ofs, &chip->ops); + /* Write to first two pages and to byte 1 and 6 if necessary. + * If we write to more than one location, the first error + * encountered quits the procedure. We write two bytes per + * location, so we dont have to mess with 16 bit access. + */ + do { + chip->ops.len = chip->ops.ooblen = 2; + chip->ops.datbuf = NULL; + chip->ops.oobbuf = buf; + chip->ops.ooboffs = chip->badblockpos & ~0x01; + + ret = nand_do_write_oob(mtd, ofs, &chip->ops); + + if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) { + chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS + & ~0x01; + ret = nand_do_write_oob(mtd, ofs, &chip->ops); + } + i++; + ofs += mtd->writesize; + } while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) && + i < 2); + nand_release_device(mtd); } if (!ret) @@ -2920,9 +2933,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1; /* Set the bad block position */ - chip->badblockpos = mtd->writesize > 512 ? - NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; - chip->badblockbits = 8; + if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO || + (*maf_id == NAND_MFR_SAMSUNG && + mtd->writesize == 512) || + *maf_id == NAND_MFR_AMD)) + chip->badblockpos = NAND_SMALL_BADBLOCK_POS; + else + chip->badblockpos = NAND_LARGE_BADBLOCK_POS; + /* Get chip options, preserve non chip based options */ chip->options &= ~NAND_CHIPOPTIONS_MSK; @@ -2941,12 +2959,32 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, /* * Bad block marker is stored in the last page of each block - * on Samsung and Hynix MLC devices + * on Samsung and Hynix MLC devices; stored in first two pages + * of each block on Micron devices with 2KiB pages and on + * SLC Samsung, Hynix, and AMD/Spansion. All others scan only + * the first page. */ if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && (*maf_id == NAND_MFR_SAMSUNG || *maf_id == NAND_MFR_HYNIX)) - chip->options |= NAND_BB_LAST_PAGE; + chip->options |= NAND_BBT_SCANLASTPAGE; + else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && + (*maf_id == NAND_MFR_SAMSUNG || + *maf_id == NAND_MFR_HYNIX || + *maf_id == NAND_MFR_AMD)) || + (mtd->writesize == 2048 && + *maf_id == NAND_MFR_MICRON)) + chip->options |= NAND_BBT_SCAN2NDPAGE; + + /* + * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6 + */ + if (!(busw & NAND_BUSWIDTH_16) && + *maf_id == NAND_MFR_STMICRO && + mtd->writesize == 2048) { + chip->options |= NAND_BBT_SCANBYTE1AND6; + chip->badblockpos = 0; + } /* Check for AND chips with 4 page planes */ if (chip->options & NAND_4PAGE_ARRAY) @@ -3306,6 +3344,11 @@ void nand_release(struct mtd_info *mtd) kfree(chip->bbt); if (!(chip->options & NAND_OWN_BUFFERS)) kfree(chip->buffers); + + /* Free bad block descriptor memory */ + if (chip->badblock_pattern && chip->badblock_pattern->options + & NAND_BBT_DYNAMICSTRUCT) + kfree(chip->badblock_pattern); } EXPORT_SYMBOL_GPL(nand_lock); diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index ad97c0ce73b2..5fedf4a74f16 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -55,7 +55,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> -#include <linux/mtd/compatmac.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/vmalloc.h> @@ -93,6 +92,28 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc return -1; } + /* Check both positions 1 and 6 for pattern? */ + if (td->options & NAND_BBT_SCANBYTE1AND6) { + if (td->options & NAND_BBT_SCANEMPTY) { + p += td->len; + end += NAND_SMALL_BADBLOCK_POS - td->offs; + /* Check region between positions 1 and 6 */ + for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len; + i++) { + if (*p++ != 0xff) + return -1; + } + } + else { + p += NAND_SMALL_BADBLOCK_POS - td->offs; + } + /* Compare the pattern */ + for (i = 0; i < td->len; i++) { + if (p[i] != td->pattern[i]) + return -1; + } + } + if (td->options & NAND_BBT_SCANEMPTY) { p += td->len; end += td->len; @@ -124,6 +145,13 @@ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) if (p[td->offs + i] != td->pattern[i]) return -1; } + /* Need to check location 1 AND 6? */ + if (td->options & NAND_BBT_SCANBYTE1AND6) { + for (i = 0; i < td->len; i++) { + if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i]) + return -1; + } + } return 0; } @@ -397,12 +425,10 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, if (bd->options & NAND_BBT_SCANALLPAGES) len = 1 << (this->bbt_erase_shift - this->page_shift); - else { - if (bd->options & NAND_BBT_SCAN2NDPAGE) - len = 2; - else - len = 1; - } + else if (bd->options & NAND_BBT_SCAN2NDPAGE) + len = 2; + else + len = 1; if (!(bd->options & NAND_BBT_SCANEMPTY)) { /* We need only read few bytes from the OOB area */ @@ -432,7 +458,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, from = (loff_t)startblock << (this->bbt_erase_shift - 1); } - if (this->options & NAND_BB_LAST_PAGE) + if (this->options & NAND_BBT_SCANLASTPAGE) from += mtd->erasesize - (mtd->writesize * len); for (i = startblock; i < numblocks;) { @@ -1092,30 +1118,16 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs) * while scanning a device for factory marked good / bad blocks. */ static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; -static struct nand_bbt_descr smallpage_memorybased = { - .options = NAND_BBT_SCAN2NDPAGE, - .offs = 5, - .len = 1, - .pattern = scan_ff_pattern -}; - -static struct nand_bbt_descr largepage_memorybased = { - .options = 0, - .offs = 0, - .len = 2, - .pattern = scan_ff_pattern -}; - static struct nand_bbt_descr smallpage_flashbased = { .options = NAND_BBT_SCAN2NDPAGE, - .offs = 5, + .offs = NAND_SMALL_BADBLOCK_POS, .len = 1, .pattern = scan_ff_pattern }; static struct nand_bbt_descr largepage_flashbased = { .options = NAND_BBT_SCAN2NDPAGE, - .offs = 0, + .offs = NAND_LARGE_BADBLOCK_POS, .len = 2, .pattern = scan_ff_pattern }; @@ -1154,6 +1166,43 @@ static struct nand_bbt_descr bbt_mirror_descr = { .pattern = mirror_pattern }; +#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \ + NAND_BBT_SCANBYTE1AND6) +/** + * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure + * @this: NAND chip to create descriptor for + * + * This function allocates and initializes a nand_bbt_descr for BBM detection + * based on the properties of "this". The new descriptor is stored in + * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when + * passed to this function. + * + * TODO: Handle other flags, replace other static structs + * (e.g. handle NAND_BBT_FLASH for flash-based BBT, + * replace smallpage_flashbased) + * + */ +static int nand_create_default_bbt_descr(struct nand_chip *this) +{ + struct nand_bbt_descr *bd; + if (this->badblock_pattern) { + printk(KERN_WARNING "BBT descr already allocated; not replacing.\n"); + return -EINVAL; + } + bd = kzalloc(sizeof(*bd), GFP_KERNEL); + if (!bd) { + printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n"); + return -ENOMEM; + } + bd->options = this->options & BBT_SCAN_OPTIONS; + bd->offs = this->badblockpos; + bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1; + bd->pattern = scan_ff_pattern; + bd->options |= NAND_BBT_DYNAMICSTRUCT; + this->badblock_pattern = bd; + return 0; +} + /** * nand_default_bbt - [NAND Interface] Select a default bad block table for the device * @mtd: MTD device structure @@ -1196,10 +1245,8 @@ int nand_default_bbt(struct mtd_info *mtd) } else { this->bbt_td = NULL; this->bbt_md = NULL; - if (!this->badblock_pattern) { - this->badblock_pattern = (mtd->writesize > 512) ? - &largepage_memorybased : &smallpage_memorybased; - } + if (!this->badblock_pattern) + nand_create_default_bbt_descr(this); } return nand_scan_bbt(mtd, this->badblock_pattern); } diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 89907ed99009..a04b89105b65 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -85,6 +85,7 @@ struct nand_flash_dev nand_flash_ids[] = { {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS}, {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, + {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16}, /* 2 Gigabit */ {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS}, @@ -110,6 +111,9 @@ struct nand_flash_dev nand_flash_ids[] = { {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16}, {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16}, + /* 32 Gigabit */ + {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS16}, + /* * Renesas AND 1 Gigabit. Those chips do not support extended id and * have a strange page/block layout ! The chosen minimum erasesize is diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 261337efe0ee..c25648bb5793 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -553,8 +553,8 @@ static uint64_t divide(uint64_t n, uint32_t d) */ static int init_nandsim(struct mtd_info *mtd) { - struct nand_chip *chip = (struct nand_chip *)mtd->priv; - struct nandsim *ns = (struct nandsim *)(chip->priv); + struct nand_chip *chip = mtd->priv; + struct nandsim *ns = chip->priv; int i, ret = 0; uint64_t remains; uint64_t next_offset; @@ -1877,7 +1877,7 @@ static void switch_state(struct nandsim *ns) static u_char ns_nand_read_byte(struct mtd_info *mtd) { - struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv; + struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; u_char outb = 0x00; /* Sanity and correctness checks */ @@ -1950,7 +1950,7 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd) static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte) { - struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv; + struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; /* Sanity and correctness checks */ if (!ns->lines.ce) { @@ -2132,7 +2132,7 @@ static uint16_t ns_nand_read_word(struct mtd_info *mtd) static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { - struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv; + struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; /* Check that chip is expecting data input */ if (!(ns->state & STATE_DATAIN_MASK)) { @@ -2159,7 +2159,7 @@ static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) { - struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv; + struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv; /* Sanity and correctness checks */ if (!ns->lines.ce) { @@ -2352,7 +2352,7 @@ module_init(ns_init_module); */ static void __exit ns_cleanup_module(void) { - struct nandsim *ns = (struct nandsim *)(((struct nand_chip *)nsmtd->priv)->priv); + struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv; int i; free_nandsim(ns); /* Free nandsim private resources */ diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 8d467315f02b..90e143e5ad3e 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -91,7 +91,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) } /* Scan to find existance of the device */ - if (nand_scan(&data->mtd, 1)) { + if (nand_scan(&data->mtd, pdata->chip.nr_chips)) { err = -ENXIO; goto out; } diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index bcfc851fe550..5169ca6a66bc 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c @@ -64,8 +64,8 @@ static inline void r852_write_reg_dword(struct r852_device *dev, /* returns pointer to our private structure */ static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) { - struct nand_chip *chip = (struct nand_chip *)mtd->priv; - return (struct r852_device *)chip->priv; + struct nand_chip *chip = mtd->priv; + return chip->priv; } @@ -380,7 +380,7 @@ void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl) */ int r852_wait(struct mtd_info *mtd, struct nand_chip *chip) { - struct r852_device *dev = (struct r852_device *)chip->priv; + struct r852_device *dev = chip->priv; unsigned long timeout; int status; diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c index a033c4cd8e16..67440b5beef8 100644 --- a/drivers/mtd/nand/rtc_from4.c +++ b/drivers/mtd/nand/rtc_from4.c @@ -24,7 +24,6 @@ #include <linux/rslib.h> #include <linux/bitrev.h> #include <linux/module.h> -#include <linux/mtd/compatmac.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 239aadfd01b0..33d832dddfdd 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -727,15 +727,12 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, if (set == NULL) return add_mtd_device(&mtd->mtd); - if (set->nr_partitions == 0) { - mtd->mtd.name = set->name; - nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, - &part_info, 0); - } else { - if (set->nr_partitions > 0 && set->partitions != NULL) { - nr_part = set->nr_partitions; - part_info = set->partitions; - } + mtd->mtd.name = set->name; + nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0); + + if (nr_part <= 0 && set->nr_partitions > 0) { + nr_part = set->nr_partitions; + part_info = set->partitions; } if (nr_part > 0 && part_info) diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c index ac80fb362e63..4a8f367c295c 100644 --- a/drivers/mtd/nand/sm_common.c +++ b/drivers/mtd/nand/sm_common.c @@ -109,7 +109,7 @@ static struct nand_flash_dev nand_xd_flash_ids[] = { int sm_register_device(struct mtd_info *mtd, int smartmedia) { - struct nand_chip *chip = (struct nand_chip *)mtd->priv; + struct nand_chip *chip = mtd->priv; int ret; chip->options |= NAND_SKIP_BBTSCAN; diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index a4578bf903aa..b155666acfbe 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c @@ -1,11 +1,22 @@ -/* Linux driver for NAND Flash Translation Layer */ -/* (c) 1999 Machine Vision Holdings, Inc. */ -/* Author: David Woodhouse <dwmw2@infradead.org> */ - /* - The contents of this file are distributed under the GNU General - Public License version 2. The author places no additional - restrictions of any kind on it. + * Linux driver for NAND Flash Translation Layer + * + * Copyright © 1999 Machine Vision Holdings, Inc. + * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define PRERELEASE diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c index 8b22b1836e9f..e3cd1ffad2f6 100644 --- a/drivers/mtd/nftlmount.c +++ b/drivers/mtd/nftlmount.c @@ -2,7 +2,8 @@ * NFTL mount code with extensive checks * * Author: Fabrice Bellard (fabrice.bellard@netgem.com) - * Copyright (C) 2000 Netgem S.A. + * Copyright © 2000 Netgem S.A. + * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 4f0d635674f3..8bf7dc6d1ce6 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -1,11 +1,11 @@ /* * Flash partitions described by the OF (or flattened) device tree * - * Copyright (C) 2006 MontaVista Software Inc. + * Copyright © 2006 MontaVista Software Inc. * Author: Vitaly Wool <vwool@ru.mvista.com> * * Revised to handle newer style flash binding by: - * Copyright (C) 2007 David Gibson, IBM Corporation. + * Copyright © 2007 David Gibson, IBM Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig index 9a49d68ba5f9..3f32289fdbb5 100644 --- a/drivers/mtd/onenand/Kconfig +++ b/drivers/mtd/onenand/Kconfig @@ -25,14 +25,14 @@ config MTD_ONENAND_GENERIC config MTD_ONENAND_OMAP2 tristate "OneNAND on OMAP2/OMAP3 support" - depends on MTD_ONENAND && (ARCH_OMAP2 || ARCH_OMAP3) + depends on ARCH_OMAP2 || ARCH_OMAP3 help Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU via the GPMC memory controller. config MTD_ONENAND_SAMSUNG tristate "OneNAND on Samsung SOC controller support" - depends on MTD_ONENAND && (ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210) + depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 help Support for a OneNAND flash device connected to an Samsung SOC S3C64XX/S5PC1XX controller. diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 26caf2590dae..a2bb520286f8 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -377,8 +377,11 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le default: block = onenand_block(this, addr); - page = (int) (addr - onenand_addr(this, block)) >> this->page_shift; - + if (FLEXONENAND(this)) + page = (int) (addr - onenand_addr(this, block))>>\ + this->page_shift; + else + page = (int) (addr >> this->page_shift); if (ONENAND_IS_2PLANE(this)) { /* Make the even block number */ block &= ~1; @@ -3730,17 +3733,16 @@ out: } /** - * onenand_probe - [OneNAND Interface] Probe the OneNAND device + * onenand_chip_probe - [OneNAND Interface] The generic chip probe * @param mtd MTD device structure * * OneNAND detection method: * Compare the values from command with ones from register */ -static int onenand_probe(struct mtd_info *mtd) +static int onenand_chip_probe(struct mtd_info *mtd) { struct onenand_chip *this = mtd->priv; - int bram_maf_id, bram_dev_id, maf_id, dev_id, ver_id; - int density; + int bram_maf_id, bram_dev_id, maf_id, dev_id; int syscfg; /* Save system configuration 1 */ @@ -3763,12 +3765,6 @@ static int onenand_probe(struct mtd_info *mtd) /* Restore system configuration 1 */ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); - /* Workaround */ - if (syscfg & ONENAND_SYS_CFG1_SYNC_WRITE) { - bram_maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID); - bram_dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID); - } - /* Check manufacturer ID */ if (onenand_check_maf(bram_maf_id)) return -ENXIO; @@ -3776,13 +3772,35 @@ static int onenand_probe(struct mtd_info *mtd) /* Read manufacturer and device IDs from Register */ maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID); dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID); - ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID); - this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY); /* Check OneNAND device */ if (maf_id != bram_maf_id || dev_id != bram_dev_id) return -ENXIO; + return 0; +} + +/** + * onenand_probe - [OneNAND Interface] Probe the OneNAND device + * @param mtd MTD device structure + */ +static int onenand_probe(struct mtd_info *mtd) +{ + struct onenand_chip *this = mtd->priv; + int maf_id, dev_id, ver_id; + int density; + int ret; + + ret = this->chip_probe(mtd); + if (ret) + return ret; + + /* Read manufacturer and device IDs from Register */ + maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID); + dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID); + ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID); + this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY); + /* Flash device information */ onenand_print_device_info(dev_id, ver_id); this->device_id = dev_id; @@ -3909,6 +3927,9 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) if (!this->unlock_all) this->unlock_all = onenand_unlock_all; + if (!this->chip_probe) + this->chip_probe = onenand_chip_probe; + if (!this->read_bufferram) this->read_bufferram = onenand_read_bufferram; if (!this->write_bufferram) diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c index a91fcac1af01..01ab5b3c453b 100644 --- a/drivers/mtd/onenand/onenand_bbt.c +++ b/drivers/mtd/onenand/onenand_bbt.c @@ -15,7 +15,6 @@ #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/onenand.h> -#include <linux/mtd/compatmac.h> /** * check_short_pattern - [GENERIC] check if a pattern is in the buffer diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c index 2750317cb58f..cb443af3d45f 100644 --- a/drivers/mtd/onenand/samsung.c +++ b/drivers/mtd/onenand/samsung.c @@ -630,6 +630,12 @@ normal: return 0; } +static int s5pc110_chip_probe(struct mtd_info *mtd) +{ + /* Now just return 0 */ + return 0; +} + static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state) { unsigned int flags = INT_ACT | LOAD_CMP; @@ -757,6 +763,7 @@ static void s3c_onenand_setup(struct mtd_info *mtd) /* Use generic onenand functions */ onenand->cmd_map = s5pc1xx_cmd_map; this->read_bufferram = s5pc110_read_bufferram; + this->chip_probe = s5pc110_chip_probe; return; } else { BUG(); @@ -781,7 +788,6 @@ static int s3c_onenand_probe(struct platform_device *pdev) struct mtd_info *mtd; struct resource *r; int size, err; - unsigned long onenand_ctrl_cfg = 0; pdata = pdev->dev.platform_data; /* No need to check pdata. the platform data is optional */ @@ -900,14 +906,6 @@ static int s3c_onenand_probe(struct platform_device *pdev) } onenand->phys_base = onenand->base_res->start; - - onenand_ctrl_cfg = readl(onenand->dma_addr + 0x100); - if ((onenand_ctrl_cfg & ONENAND_SYS_CFG1_SYNC_WRITE) && - onenand->dma_addr) - writel(onenand_ctrl_cfg & ~ONENAND_SYS_CFG1_SYNC_WRITE, - onenand->dma_addr + 0x100); - else - onenand_ctrl_cfg = 0; } if (onenand_scan(mtd, 1)) { @@ -915,10 +913,7 @@ static int s3c_onenand_probe(struct platform_device *pdev) goto scan_failed; } - if (onenand->type == TYPE_S5PC110) { - if (onenand_ctrl_cfg && onenand->dma_addr) - writel(onenand_ctrl_cfg, onenand->dma_addr + 0x100); - } else { + if (onenand->type != TYPE_S5PC110) { /* S3C doesn't handle subpage write */ mtd->subpage_sft = 0; this->subpagesize = mtd->writesize; diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c index 2d600a1bf2aa..7a87d07cd79f 100644 --- a/drivers/mtd/redboot.c +++ b/drivers/mtd/redboot.c @@ -1,6 +1,24 @@ /* * Parse RedBoot-style Flash Image System (FIS) tables and * produce a Linux partition array to match. + * + * Copyright © 2001 Red Hat UK Limited + * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * */ #include <linux/kernel.h> diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c index 63b83c0d9a13..cc4d1805b864 100644 --- a/drivers/mtd/rfd_ftl.c +++ b/drivers/mtd/rfd_ftl.c @@ -1,7 +1,7 @@ /* * rfd_ftl.c -- resident flash disk (flash translation layer) * - * Copyright (C) 2005 Sean Young <sean@mess.org> + * Copyright © 2005 Sean Young <sean@mess.org> * * This type of flash translation layer (FTL) is used by the Embedded BIOS * by General Software. It is known as the Resident Flash Disk (RFD), see: diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index 81c4ecdc11f5..5cd189793332 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c @@ -1,6 +1,6 @@ /* * Linux driver for SSFDC Flash Translation Layer (Read only) - * (c) 2005 Eptar srl + * © 2005 Eptar srl * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com> * * Based on NTFL and MTDBLOCK_RO drivers diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c index 6bc1b8276c62..00b937e38c1d 100644 --- a/drivers/mtd/tests/mtd_pagetest.c +++ b/drivers/mtd/tests/mtd_pagetest.c @@ -310,7 +310,7 @@ static int crosstest(void) static int erasecrosstest(void) { size_t read = 0, written = 0; - int err = 0, i, ebnum, ok = 1, ebnum2; + int err = 0, i, ebnum, ebnum2; loff_t addr0; char *readbuf = twopages; @@ -357,8 +357,7 @@ static int erasecrosstest(void) if (memcmp(writebuf, readbuf, pgsize)) { printk(PRINT_PREF "verify failed!\n"); errcnt += 1; - ok = 0; - return err; + return -1; } printk(PRINT_PREF "erasing block %d\n", ebnum); @@ -396,10 +395,10 @@ static int erasecrosstest(void) if (memcmp(writebuf, readbuf, pgsize)) { printk(PRINT_PREF "verify failed!\n"); errcnt += 1; - ok = 0; + return -1; } - if (ok && !err) + if (!err) printk(PRINT_PREF "erasecrosstest ok\n"); return err; } diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 066fd5b09fda..ad19585d960b 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -3198,17 +3198,17 @@ static int __devinit init_one(struct pci_dev *pdev, } } - err = pci_request_regions(pdev, DRV_NAME); + err = pci_enable_device(pdev); if (err) { - /* Just info, some other driver may have claimed the device. */ - dev_info(&pdev->dev, "cannot obtain PCI resources\n"); - return err; + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto out; } - err = pci_enable_device(pdev); + err = pci_request_regions(pdev, DRV_NAME); if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - goto out_release_regions; + /* Just info, some other driver may have claimed the device. */ + dev_info(&pdev->dev, "cannot obtain PCI resources\n"); + goto out_disable_device; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { @@ -3217,11 +3217,11 @@ static int __devinit init_one(struct pci_dev *pdev, if (err) { dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " "coherent allocations\n"); - goto out_disable_device; + goto out_release_regions; } } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto out_disable_device; + goto out_release_regions; } pci_set_master(pdev); @@ -3234,7 +3234,7 @@ static int __devinit init_one(struct pci_dev *pdev, adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { err = -ENOMEM; - goto out_disable_device; + goto out_release_regions; } adapter->nofail_skb = @@ -3370,11 +3370,12 @@ out_free_dev: out_free_adapter: kfree(adapter); -out_disable_device: - pci_disable_device(pdev); out_release_regions: pci_release_regions(pdev); +out_disable_device: + pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); +out: return err; } diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index a16563219ac9..7b6d07f50c71 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -2462,23 +2462,24 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, version_printed = 1; } + /* - * Reserve PCI resources for the device. If we can't get them some - * other driver may have already claimed the device ... + * Initialize generic PCI device state. */ - err = pci_request_regions(pdev, KBUILD_MODNAME); + err = pci_enable_device(pdev); if (err) { - dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + dev_err(&pdev->dev, "cannot enable PCI device\n"); return err; } /* - * Initialize generic PCI device state. + * Reserve PCI resources for the device. If we can't get them some + * other driver may have already claimed the device ... */ - err = pci_enable_device(pdev); + err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - goto err_release_regions; + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_disable_device; } /* @@ -2491,14 +2492,14 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, if (err) { dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" " coherent allocations\n"); - goto err_disable_device; + goto err_release_regions; } pci_using_dac = 1; } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err != 0) { dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto err_disable_device; + goto err_release_regions; } pci_using_dac = 0; } @@ -2514,7 +2515,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { err = -ENOMEM; - goto err_disable_device; + goto err_release_regions; } pci_set_drvdata(pdev, adapter); adapter->pdev = pdev; @@ -2750,13 +2751,13 @@ err_free_adapter: kfree(adapter); pci_set_drvdata(pdev, NULL); -err_disable_device: - pci_disable_device(pdev); - pci_clear_master(pdev); - err_release_regions: pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); + pci_clear_master(pdev); + +err_disable_device: + pci_disable_device(pdev); err_out: return err; diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index d0824e322068..7fbd052ddb0a 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -2944,8 +2944,8 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev) release_mem_region(res->start, res->end - res->start + 1); unregister_netdev(ndev); - free_netdev(ndev); iounmap(priv->remap_addr); + free_netdev(ndev); clk_disable(emac_clk); clk_put(emac_clk); diff --git a/drivers/net/e100.c b/drivers/net/e100.c index b194bad29ace..8e2eab4e7c75 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1779,6 +1779,7 @@ static int e100_tx_clean(struct nic *nic) for (cb = nic->cb_to_clean; cb->status & cpu_to_le16(cb_complete); cb = nic->cb_to_clean = cb->next) { + rmb(); /* read skb after status */ netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, "cb[%d]->status = 0x%04X\n", (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), @@ -1927,6 +1928,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, "status=0x%04X\n", rfd_status); + rmb(); /* read size after status bit */ /* If data isn't ready, nothing to indicate */ if (unlikely(!(rfd_status & cb_complete))) { diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 02833af8a0b1..5cc39ed289c6 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -3454,6 +3454,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -3643,6 +3644,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; @@ -3849,6 +3851,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 521c6ee1f32a..2b8ef44bd2b1 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -781,6 +781,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; @@ -991,6 +992,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ for (; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -1087,6 +1089,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, break; (*work_done)++; skb = buffer_info->skb; + rmb(); /* read descriptor and rx_buffer_info after status DD */ /* in the packet split case this is header only */ prefetch(skb->data - NET_IP_ALIGN); @@ -1286,6 +1289,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 77a7f87d498e..9aab85366d21 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -1087,10 +1087,7 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac) { struct vic_provinfo *vp; u8 oui[3] = VIC_PROVINFO_CISCO_OUI; - u8 *uuid; char uuid_str[38]; - static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-" - "%02X%02X-%02X%02X%02X%02X%0X%02X"; int err; err = enic_vnic_dev_deinit(enic); @@ -1121,24 +1118,14 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac) ETH_ALEN, mac); if (enic->pp.set & ENIC_SET_INSTANCE) { - uuid = enic->pp.instance_uuid; - sprintf(uuid_str, uuid_fmt, - uuid[0], uuid[1], uuid[2], uuid[3], - uuid[4], uuid[5], uuid[6], uuid[7], - uuid[8], uuid[9], uuid[10], uuid[11], - uuid[12], uuid[13], uuid[14], uuid[15]); + sprintf(uuid_str, "%pUB", enic->pp.instance_uuid); vic_provinfo_add_tlv(vp, VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, sizeof(uuid_str), uuid_str); } if (enic->pp.set & ENIC_SET_HOST) { - uuid = enic->pp.host_uuid; - sprintf(uuid_str, uuid_fmt, - uuid[0], uuid[1], uuid[2], uuid[3], - uuid[4], uuid[5], uuid[6], uuid[7], - uuid[8], uuid[9], uuid[10], uuid[11], - uuid[12], uuid[13], uuid[14], uuid[15]); + sprintf(uuid_str, "%pUB", enic->pp.host_uuid); vic_provinfo_add_tlv(vp, VIC_LINUX_PROV_TLV_HOST_UUID_STR, sizeof(uuid_str), uuid_str); diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index df5dcd23e4fc..9b4e5895f5f9 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -5353,6 +5353,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { + rmb(); /* read buffer_info after eop_desc status */ for (cleaned = false; !cleaned; count++) { tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -5558,6 +5559,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, if (*work_done >= budget) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index ec808fa8dc21..c539f7c9c3e0 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -248,6 +248,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ buffer_info = &rx_ring->buffer_info[i]; @@ -780,6 +781,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { + rmb(); /* read buffer_info after eop_desc status */ for (cleaned = false; !cleaned; count++) { tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index c6b75c83100c..45fc89b9ba64 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1816,6 +1816,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { + rmb(); /* read buffer_info after eop_desc */ for (cleaned = false; !cleaned; ) { tx_desc = IXGB_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -1976,6 +1977,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; buffer_info->skb = NULL; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 7d6a415bcf88..e32af434cc9d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -748,6 +748,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && (count < tx_ring->work_limit)) { bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { struct sk_buff *skb; tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); @@ -6155,9 +6156,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); txq += adapter->ring_feature[RING_F_FCOE].mask; return txq; +#ifdef CONFIG_IXGBE_DCB } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { txq = adapter->fcoe.up; return txq; +#endif } } #endif @@ -6216,10 +6219,14 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && (skb->protocol == htons(ETH_P_FCOE) || skb->protocol == htons(ETH_P_FIP))) { - tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK - << IXGBE_TX_FLAGS_VLAN_SHIFT); - tx_flags |= ((adapter->fcoe.up << 13) - << IXGBE_TX_FLAGS_VLAN_SHIFT); +#ifdef CONFIG_IXGBE_DCB + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK + << IXGBE_TX_FLAGS_VLAN_SHIFT); + tx_flags |= ((adapter->fcoe.up << 13) + << IXGBE_TX_FLAGS_VLAN_SHIFT); + } +#endif /* flag for FCoE offloads */ if (skb->protocol == htons(ETH_P_FCOE)) tx_flags |= IXGBE_TX_FLAGS_FCOE; diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index 3e291ccc629d..918c00359b0a 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -231,6 +231,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && (count < tx_ring->work_limit)) { bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { struct sk_buff *skb; tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); @@ -518,6 +519,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 6ce6ce1df6d2..fd86e18604e6 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -2001,27 +2001,26 @@ static void netxen_tx_timeout_task(struct work_struct *work) if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) goto request_reset; + rtnl_lock(); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { /* try to scrub interrupt */ netxen_napi_disable(adapter); - adapter->netdev->trans_start = jiffies; - netxen_napi_enable(adapter); netif_wake_queue(adapter->netdev); clear_bit(__NX_RESETTING, &adapter->state); - return; } else { clear_bit(__NX_RESETTING, &adapter->state); - if (!netxen_nic_reset_context(adapter)) { - adapter->netdev->trans_start = jiffies; - return; + if (netxen_nic_reset_context(adapter)) { + rtnl_unlock(); + goto request_reset; } - - /* context reset failed, fall through for fw reset */ } + adapter->netdev->trans_start = jiffies; + rtnl_unlock(); + return; request_reset: adapter->need_fw_reset = 1; diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 6c2e8fa0ca31..af50a530daee 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -108,9 +108,9 @@ static void ppp_async_process(unsigned long arg); static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, int len, int inbound); -static struct ppp_channel_ops async_ops = { - ppp_async_send, - ppp_async_ioctl +static const struct ppp_channel_ops async_ops = { + .start_xmit = ppp_async_send, + .ioctl = ppp_async_ioctl, }; /* diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 52938da1e542..4c95ec3fb8d4 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -97,9 +97,9 @@ static void ppp_sync_flush_output(struct syncppp *ap); static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf, char *flags, int count); -static struct ppp_channel_ops sync_ops = { - ppp_sync_send, - ppp_sync_ioctl +static const struct ppp_channel_ops sync_ops = { + .start_xmit = ppp_sync_send, + .ioctl = ppp_sync_ioctl, }; /* diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 344ef330e123..c07de359dc07 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -92,7 +92,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); static const struct proto_ops pppoe_ops; -static struct ppp_channel_ops pppoe_chan_ops; +static const struct ppp_channel_ops pppoe_chan_ops; /* per-net private data for this module */ static int pppoe_net_id __read_mostly; @@ -963,7 +963,7 @@ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) return __pppoe_xmit(sk, skb); } -static struct ppp_channel_ops pppoe_chan_ops = { +static const struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, }; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 3b03794ac3f5..7f62e2dea28f 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1457,7 +1457,6 @@ int usbnet_resume (struct usb_interface *intf) spin_lock_irq(&dev->txq.lock); while ((res = usb_get_from_anchor(&dev->deferred))) { - printk(KERN_INFO"%s has delayed data\n", __func__); skb = (struct sk_buff *)res->context; retval = usb_submit_urb(res, GFP_ATOMIC); if (retval < 0) { diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 42dffd3e5795..fd69095ef6e3 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -2763,12 +2763,12 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi vptr->dev = dev; - dev->irq = pdev->irq; - ret = pci_enable_device(pdev); if (ret < 0) goto err_free_dev; + dev->irq = pdev->irq; + ret = velocity_get_pci_info(vptr, pdev); if (ret < 0) { /* error message already printed */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bb6b67f6b0cc..4598e9d2608f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -705,6 +705,19 @@ static int virtnet_close(struct net_device *dev) return 0; } +static void virtnet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct virtio_device *vdev = vi->vdev; + + strncpy(drvinfo->driver, KBUILD_MODNAME, ARRAY_SIZE(drvinfo->driver)); + strncpy(drvinfo->version, "N/A", ARRAY_SIZE(drvinfo->version)); + strncpy(drvinfo->fw_version, "N/A", ARRAY_SIZE(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, dev_name(&vdev->dev), + ARRAY_SIZE(drvinfo->bus_info)); +} + static int virtnet_set_tx_csum(struct net_device *dev, u32 data) { struct virtnet_info *vi = netdev_priv(dev); @@ -817,6 +830,7 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) } static const struct ethtool_ops virtnet_ethtool_ops = { + .get_drvinfo = virtnet_get_drvinfo, .set_tx_csum = virtnet_set_tx_csum, .set_sg = ethtool_op_set_sg, .set_tso = ethtool_op_set_tso, diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index dabafb874c36..fe7418aefc4a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -63,6 +63,7 @@ static bool ar9002_hw_per_calibration(struct ath_hw *ah, u8 rxchainmask, struct ath9k_cal_list *currCal) { + struct ath9k_hw_cal_data *caldata = ah->caldata; bool iscaldone = false; if (currCal->calState == CAL_RUNNING) { @@ -81,14 +82,14 @@ static bool ar9002_hw_per_calibration(struct ath_hw *ah, } currCal->calData->calPostProc(ah, numChains); - ichan->CalValid |= currCal->calData->calType; + caldata->CalValid |= currCal->calData->calType; currCal->calState = CAL_DONE; iscaldone = true; } else { ar9002_hw_setup_calibration(ah, currCal); } } - } else if (!(ichan->CalValid & currCal->calData->calType)) { + } else if (!(caldata->CalValid & currCal->calData->calType)) { ath9k_hw_reset_calibration(ah, currCal); } @@ -686,8 +687,13 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah, { bool iscaldone = true; struct ath9k_cal_list *currCal = ah->cal_list_curr; + bool nfcal, nfcal_pending = false; - if (currCal && + nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF); + if (ah->caldata) + nfcal_pending = ah->caldata->nfcal_pending; + + if (currCal && !nfcal && (currCal->calState == CAL_RUNNING || currCal->calState == CAL_WAITING)) { iscaldone = ar9002_hw_per_calibration(ah, chan, @@ -703,7 +709,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah, } /* Do NF cal only at longer intervals */ - if (longcal) { + if (longcal || nfcal_pending) { /* Do periodic PAOffset Cal */ ar9002_hw_pa_cal(ah, false); ar9002_hw_olc_temp_compensation(ah); @@ -712,16 +718,18 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah, * Get the value from the previous NF cal and update * history buffer. */ - ath9k_hw_getnf(ah, chan); - - /* - * Load the NF from history buffer of the current channel. - * NF is slow time-variant, so it is OK to use a historical - * value. - */ - ath9k_hw_loadnf(ah, ah->curchan); + if (ath9k_hw_getnf(ah, chan)) { + /* + * Load the NF from history buffer of the current + * channel. + * NF is slow time-variant, so it is OK to use a + * historical value. + */ + ath9k_hw_loadnf(ah, ah->curchan); + } - ath9k_hw_start_nfcal(ah); + if (longcal) + ath9k_hw_start_nfcal(ah, false); } return iscaldone; @@ -869,8 +877,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) ar9002_hw_pa_cal(ah, true); /* Do NF Calibration after DC offset and other calibrations */ - REG_WRITE(ah, AR_PHY_AGC_CONTROL, - REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF); + ath9k_hw_start_nfcal(ah, true); + + if (ah->caldata) + ah->caldata->nfcal_pending = true; ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; @@ -901,7 +911,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) ath9k_hw_reset_calibration(ah, ah->cal_list_curr); } - chan->CalValid = 0; + if (ah->caldata) + ah->caldata->CalValid = 0; return true; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 5a0650399136..4674ea8c9c99 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -68,6 +68,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah, u8 rxchainmask, struct ath9k_cal_list *currCal) { + struct ath9k_hw_cal_data *caldata = ah->caldata; /* Cal is assumed not done until explicitly set below */ bool iscaldone = false; @@ -95,7 +96,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah, currCal->calData->calPostProc(ah, numChains); /* Calibration has finished. */ - ichan->CalValid |= currCal->calData->calType; + caldata->CalValid |= currCal->calData->calType; currCal->calState = CAL_DONE; iscaldone = true; } else { @@ -106,7 +107,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah, ar9003_hw_setup_calibration(ah, currCal); } } - } else if (!(ichan->CalValid & currCal->calData->calType)) { + } else if (!(caldata->CalValid & currCal->calData->calType)) { /* If current cal is marked invalid in channel, kick it off */ ath9k_hw_reset_calibration(ah, currCal); } @@ -149,6 +150,12 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah, /* Do NF cal only at longer intervals */ if (longcal) { /* + * Get the value from the previous NF cal and update + * history buffer. + */ + ath9k_hw_getnf(ah, chan); + + /* * Load the NF from history buffer of the current channel. * NF is slow time-variant, so it is OK to use a historical * value. @@ -156,7 +163,7 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah, ath9k_hw_loadnf(ah, ah->curchan); /* start NF calibration, without updating BB NF register */ - ath9k_hw_start_nfcal(ah); + ath9k_hw_start_nfcal(ah, false); } return iscaldone; @@ -762,6 +769,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, /* Revert chainmasks to their original values before NF cal */ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); + ath9k_hw_start_nfcal(ah, true); + /* Initialize list pointers */ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; @@ -785,7 +794,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, if (ah->cal_list_curr) ath9k_hw_reset_calibration(ah, ah->cal_list_curr); - chan->CalValid = 0; + if (ah->caldata) + ah->caldata->CalValid = 0; return true; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index ace8d2678b18..b883b174385b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -41,6 +41,20 @@ #define LE16(x) __constant_cpu_to_le16(x) #define LE32(x) __constant_cpu_to_le32(x) +/* Local defines to distinguish between extension and control CTL's */ +#define EXT_ADDITIVE (0x8000) +#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) +#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) +#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) +#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ +#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ +#define PWRINCR_3_TO_1_CHAIN 9 /* 10*log(3)*2 */ +#define PWRINCR_3_TO_2_CHAIN 3 /* floor(10*log(3/2)*2) */ +#define PWRINCR_2_TO_1_CHAIN 6 /* 10*log(2)*2 */ + +#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ +#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ + static const struct ar9300_eeprom ar9300_default = { .eepromVersion = 2, .templateVersion = 2, @@ -609,6 +623,14 @@ static const struct ar9300_eeprom ar9300_default = { } }; +static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) +{ + if (fbin == AR9300_BCHAN_UNUSED) + return fbin; + + return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); +} + static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah) { return 0; @@ -1417,9 +1439,9 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) #undef POW_SM } -static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq) +static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, + u8 *targetPowerValT2) { - u8 targetPowerValT2[ar9300RateSize]; /* XXX: hard code for now, need to get from eeprom struct */ u8 ht40PowerIncForPdadc = 0; bool is2GHz = false; @@ -1553,9 +1575,6 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq) "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); i++; } - - /* Write target power array to registers */ - ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); } static int ar9003_hw_cal_pier_get(struct ath_hw *ah, @@ -1799,14 +1818,369 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) return 0; } +static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep, + int idx, + int edge, + bool is2GHz) +{ + struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; + struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; + + if (is2GHz) + return ctl_2g[idx].ctlEdges[edge].tPower; + else + return ctl_5g[idx].ctlEdges[edge].tPower; +} + +static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep, + int idx, + unsigned int edge, + u16 freq, + bool is2GHz) +{ + struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; + struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; + + u8 *ctl_freqbin = is2GHz ? + &eep->ctl_freqbin_2G[idx][0] : + &eep->ctl_freqbin_5G[idx][0]; + + if (is2GHz) { + if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq && + ctl_2g[idx].ctlEdges[edge - 1].flag) + return ctl_2g[idx].ctlEdges[edge - 1].tPower; + } else { + if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq && + ctl_5g[idx].ctlEdges[edge - 1].flag) + return ctl_5g[idx].ctlEdges[edge - 1].tPower; + } + + return AR9300_MAX_RATE_POWER; +} + +/* + * Find the maximum conformance test limit for the given channel and CTL info + */ +static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep, + u16 freq, int idx, bool is2GHz) +{ + u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER; + u8 *ctl_freqbin = is2GHz ? + &eep->ctl_freqbin_2G[idx][0] : + &eep->ctl_freqbin_5G[idx][0]; + u16 num_edges = is2GHz ? + AR9300_NUM_BAND_EDGES_2G : AR9300_NUM_BAND_EDGES_5G; + unsigned int edge; + + /* Get the edge power */ + for (edge = 0; + (edge < num_edges) && (ctl_freqbin[edge] != AR9300_BCHAN_UNUSED); + edge++) { + /* + * If there's an exact channel match or an inband flag set + * on the lower channel use the given rdEdgePower + */ + if (freq == ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz)) { + twiceMaxEdgePower = + ar9003_hw_get_direct_edge_power(eep, idx, + edge, is2GHz); + break; + } else if ((edge > 0) && + (freq < ath9k_hw_fbin2freq(ctl_freqbin[edge], + is2GHz))) { + twiceMaxEdgePower = + ar9003_hw_get_indirect_edge_power(eep, idx, + edge, freq, + is2GHz); + /* + * Leave loop - no more affecting edges possible in + * this monotonic increasing list + */ + break; + } + } + return twiceMaxEdgePower; +} + +static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, + struct ath9k_channel *chan, + u8 *pPwrArray, u16 cfgCtl, + u8 twiceAntennaReduction, + u8 twiceMaxRegulatoryPower, + u16 powerLimit) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ath_common *common = ath9k_hw_common(ah); + struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep; + u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER; + static const u16 tpScaleReductionTable[5] = { + 0, 3, 6, 9, AR9300_MAX_RATE_POWER + }; + int i; + int16_t twiceLargestAntenna; + u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; + u16 ctlModesFor11a[] = { + CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 + }; + u16 ctlModesFor11g[] = { + CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, + CTL_11G_EXT, CTL_2GHT40 + }; + u16 numCtlModes, *pCtlMode, ctlMode, freq; + struct chan_centers centers; + u8 *ctlIndex; + u8 ctlNum; + u16 twiceMinEdgePower; + bool is2ghz = IS_CHAN_2GHZ(chan); + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + /* Compute TxPower reduction due to Antenna Gain */ + if (is2ghz) + twiceLargestAntenna = pEepData->modalHeader2G.antennaGain; + else + twiceLargestAntenna = pEepData->modalHeader5G.antennaGain; + + twiceLargestAntenna = (int16_t)min((twiceAntennaReduction) - + twiceLargestAntenna, 0); + + /* + * scaledPower is the minimum of the user input power level + * and the regulatory allowed power level + */ + maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; + + if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) { + maxRegAllowedPower -= + (tpScaleReductionTable[(regulatory->tp_scale)] * 2); + } + + scaledPower = min(powerLimit, maxRegAllowedPower); + + /* + * Reduce scaled Power by number of chains active to get + * to per chain tx power level + */ + switch (ar5416_get_ntxchains(ah->txchainmask)) { + case 1: + break; + case 2: + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; + break; + case 3: + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; + break; + } + + scaledPower = max((u16)0, scaledPower); + + /* + * Get target powers from EEPROM - our baseline for TX Power + */ + if (is2ghz) { + /* Setup for CTL modes */ + /* CTL_11B, CTL_11G, CTL_2GHT20 */ + numCtlModes = + ARRAY_SIZE(ctlModesFor11g) - + SUB_NUM_CTL_MODES_AT_2G_40; + pCtlMode = ctlModesFor11g; + if (IS_CHAN_HT40(chan)) + /* All 2G CTL's */ + numCtlModes = ARRAY_SIZE(ctlModesFor11g); + } else { + /* Setup for CTL modes */ + /* CTL_11A, CTL_5GHT20 */ + numCtlModes = ARRAY_SIZE(ctlModesFor11a) - + SUB_NUM_CTL_MODES_AT_5G_40; + pCtlMode = ctlModesFor11a; + if (IS_CHAN_HT40(chan)) + /* All 5G CTL's */ + numCtlModes = ARRAY_SIZE(ctlModesFor11a); + } + + /* + * For MIMO, need to apply regulatory caps individually across + * dynamically running modes: CCK, OFDM, HT20, HT40 + * + * The outer loop walks through each possible applicable runtime mode. + * The inner loop walks through each ctlIndex entry in EEPROM. + * The ctl value is encoded as [7:4] == test group, [3:0] == test mode. + */ + for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { + bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || + (pCtlMode[ctlMode] == CTL_2GHT40); + if (isHt40CtlMode) + freq = centers.synth_center; + else if (pCtlMode[ctlMode] & EXT_ADDITIVE) + freq = centers.ext_center; + else + freq = centers.ctl_center; + + ath_print(common, ATH_DBG_REGULATORY, + "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, " + "EXT_ADDITIVE %d\n", + ctlMode, numCtlModes, isHt40CtlMode, + (pCtlMode[ctlMode] & EXT_ADDITIVE)); + + /* walk through each CTL index stored in EEPROM */ + if (is2ghz) { + ctlIndex = pEepData->ctlIndex_2G; + ctlNum = AR9300_NUM_CTLS_2G; + } else { + ctlIndex = pEepData->ctlIndex_5G; + ctlNum = AR9300_NUM_CTLS_5G; + } + + for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) { + ath_print(common, ATH_DBG_REGULATORY, + "LOOP-Ctlidx %d: cfgCtl 0x%2.2x " + "pCtlMode 0x%2.2x ctlIndex 0x%2.2x " + "chan %dn", + i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], + chan->channel); + + /* + * compare test group from regulatory + * channel list with test mode from pCtlMode + * list + */ + if ((((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ctlIndex[i]) || + (((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ((ctlIndex[i] & CTL_MODE_M) | + SD_NO_CTL))) { + twiceMinEdgePower = + ar9003_hw_get_max_edge_power(pEepData, + freq, i, + is2ghz); + + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) + /* + * Find the minimum of all CTL + * edge powers that apply to + * this channel + */ + twiceMaxEdgePower = + min(twiceMaxEdgePower, + twiceMinEdgePower); + else { + /* specific */ + twiceMaxEdgePower = + twiceMinEdgePower; + break; + } + } + } + + minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); + + ath_print(common, ATH_DBG_REGULATORY, + "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d " + "sP %d minCtlPwr %d\n", + ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, + scaledPower, minCtlPower); + + /* Apply ctl mode to correct target power set */ + switch (pCtlMode[ctlMode]) { + case CTL_11B: + for (i = ALL_TARGET_LEGACY_1L_5L; + i <= ALL_TARGET_LEGACY_11S; i++) + pPwrArray[i] = + (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_11A: + case CTL_11G: + for (i = ALL_TARGET_LEGACY_6_24; + i <= ALL_TARGET_LEGACY_54; i++) + pPwrArray[i] = + (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_5GHT20: + case CTL_2GHT20: + for (i = ALL_TARGET_HT20_0_8_16; + i <= ALL_TARGET_HT20_21; i++) + pPwrArray[i] = + (u8)min((u16)pPwrArray[i], + minCtlPower); + pPwrArray[ALL_TARGET_HT20_22] = + (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], + minCtlPower); + pPwrArray[ALL_TARGET_HT20_23] = + (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], + minCtlPower); + break; + case CTL_5GHT40: + case CTL_2GHT40: + for (i = ALL_TARGET_HT40_0_8_16; + i <= ALL_TARGET_HT40_23; i++) + pPwrArray[i] = + (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + default: + break; + } + } /* end ctl mode checking */ +} + static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, struct ath9k_channel *chan, u16 cfgCtl, u8 twiceAntennaReduction, u8 twiceMaxRegulatoryPower, u8 powerLimit) { - ah->txpower_limit = powerLimit; - ar9003_hw_set_target_power_eeprom(ah, chan->channel); + struct ath_common *common = ath9k_hw_common(ah); + u8 targetPowerValT2[ar9300RateSize]; + unsigned int i = 0; + + ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2); + ar9003_hw_set_power_per_rate_table(ah, chan, + targetPowerValT2, cfgCtl, + twiceAntennaReduction, + twiceMaxRegulatoryPower, + powerLimit); + + while (i < ar9300RateSize) { + ath_print(common, ATH_DBG_EEPROM, + "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); + i++; + ath_print(common, ATH_DBG_EEPROM, + "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); + i++; + ath_print(common, ATH_DBG_EEPROM, + "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); + i++; + ath_print(common, ATH_DBG_EEPROM, + "TPC[%02d] 0x%08x\n\n", i, targetPowerValT2[i]); + i++; + } + + /* Write target power array to registers */ + ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); + + /* + * This is the TX power we send back to driver core, + * and it can use to pass to userspace to display our + * currently configured TX power setting. + * + * Since power is rate dependent, use one of the indices + * from the AR9300_Rates enum to select an entry from + * targetPowerValT2[] to report. Currently returns the + * power for HT40 MCS 0, HT20 MCS 0, or OFDM 6 Mbps + * as CCK power is less interesting (?). + */ + i = ALL_TARGET_LEGACY_6_24; /* legacy */ + if (IS_CHAN_HT40(chan)) + i = ALL_TARGET_HT40_0_8_16; /* ht40 */ + else if (IS_CHAN_HT20(chan)) + i = ALL_TARGET_HT20_0_8_16; /* ht20 */ + + ah->txpower_limit = targetPowerValT2[i]; + ar9003_hw_calibration_apply(ah, chan->channel); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index 49e0c865ce5c..7c38229ba670 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -577,10 +577,11 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain) } void ar9003_paprd_populate_single_table(struct ath_hw *ah, - struct ath9k_channel *chan, int chain) + struct ath9k_hw_cal_data *caldata, + int chain) { - u32 *paprd_table_val = chan->pa_table[chain]; - u32 small_signal_gain = chan->small_signal_gain[chain]; + u32 *paprd_table_val = caldata->pa_table[chain]; + u32 small_signal_gain = caldata->small_signal_gain[chain]; u32 training_power; u32 reg = 0; int i; @@ -654,17 +655,17 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain) } EXPORT_SYMBOL(ar9003_paprd_setup_gain_table); -int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan, - int chain) +int ar9003_paprd_create_curve(struct ath_hw *ah, + struct ath9k_hw_cal_data *caldata, int chain) { - u16 *small_signal_gain = &chan->small_signal_gain[chain]; - u32 *pa_table = chan->pa_table[chain]; + u16 *small_signal_gain = &caldata->small_signal_gain[chain]; + u32 *pa_table = caldata->pa_table[chain]; u32 *data_L, *data_U; int i, status = 0; u32 *buf; u32 reg; - memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain])); + memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain])); buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC); if (!buf) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index a753a431bb13..a491854fa38a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -542,7 +542,11 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah, u32 reg = INI_RA(iniArr, i, 0); u32 val = INI_RA(iniArr, i, column); - REG_WRITE(ah, reg, val); + if (reg >= 0x16000 && reg < 0x17000) + ath9k_hw_analog_shift_regwrite(ah, reg, val); + else + REG_WRITE(ah, reg, val); + DO_DELAY(regWrites); } } diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 998ae2c49ed2..07f26ee7a723 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -510,7 +510,7 @@ void ath_deinit_leds(struct ath_softc *sc); #define SC_OP_BEACONS BIT(1) #define SC_OP_RXAGGR BIT(2) #define SC_OP_TXAGGR BIT(3) -#define SC_OP_FULL_RESET BIT(4) +#define SC_OP_OFFCHANNEL BIT(4) #define SC_OP_PREAMBLE_SHORT BIT(5) #define SC_OP_PROTECT_ENABLE BIT(6) #define SC_OP_RXFLUSH BIT(7) @@ -609,6 +609,7 @@ struct ath_softc { struct ath_wiphy { struct ath_softc *sc; /* shared for all virtual wiphys */ struct ieee80211_hw *hw; + struct ath9k_hw_cal_data caldata; enum ath_wiphy_state { ATH_WIPHY_INACTIVE, ATH_WIPHY_ACTIVE, diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 139289e4e933..45208690c0ec 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -22,23 +22,6 @@ /* We can tune this as we go by monitoring really low values */ #define ATH9K_NF_TOO_LOW -60 -/* AR5416 may return very high value (like -31 dBm), in those cases the nf - * is incorrect and we should use the static NF value. Later we can try to - * find out why they are reporting these values */ - -static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf) -{ - if (nf > ATH9K_NF_TOO_LOW) { - ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, - "noise floor value detected (%d) is " - "lower than what we think is a " - "reasonable value (%d)\n", - nf, ATH9K_NF_TOO_LOW); - return false; - } - return true; -} - static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) { int16_t nfval; @@ -121,6 +104,19 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah, ah->cal_samples = 0; } +static s16 ath9k_hw_get_default_nf(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ath_nf_limits *limit; + + if (!chan || IS_CHAN_2GHZ(chan)) + limit = &ah->nf_2g; + else + limit = &ah->nf_5g; + + return limit->nominal; +} + /* This is done for the currently configured channel */ bool ath9k_hw_reset_calvalid(struct ath_hw *ah) { @@ -128,7 +124,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah) struct ieee80211_conf *conf = &common->hw->conf; struct ath9k_cal_list *currCal = ah->cal_list_curr; - if (!ah->curchan) + if (!ah->caldata) return true; if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah)) @@ -151,37 +147,55 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah) "Resetting Cal %d state for channel %u\n", currCal->calData->calType, conf->channel->center_freq); - ah->curchan->CalValid &= ~currCal->calData->calType; + ah->caldata->CalValid &= ~currCal->calData->calType; currCal->calState = CAL_WAITING; return false; } EXPORT_SYMBOL(ath9k_hw_reset_calvalid); -void ath9k_hw_start_nfcal(struct ath_hw *ah) +void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update) { + if (ah->caldata) + ah->caldata->nfcal_pending = true; + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_ENABLE_NF); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + + if (update) + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); + else + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NO_UPDATE_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); } void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) { - struct ath9k_nfcal_hist *h; + struct ath9k_nfcal_hist *h = NULL; unsigned i, j; int32_t val; u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; struct ath_common *common = ath9k_hw_common(ah); + s16 default_nf = ath9k_hw_get_default_nf(ah, chan); - h = ah->nfCalHist; + if (ah->caldata) + h = ah->caldata->nfCalHist; for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { + s16 nfval; + + if (h) + nfval = h[i].privNF; + else + nfval = default_nf; + val = REG_READ(ah, ah->nf_regs[i]); val &= 0xFFFFFE00; - val |= (((u32) (h[i].privNF) << 1) & 0x1ff); + val |= (((u32) nfval << 1) & 0x1ff); REG_WRITE(ah, ah->nf_regs[i], val); } } @@ -277,22 +291,25 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf) } } -int16_t ath9k_hw_getnf(struct ath_hw *ah, - struct ath9k_channel *chan) +bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); int16_t nf, nfThresh; int16_t nfarray[NUM_NF_READINGS] = { 0 }; struct ath9k_nfcal_hist *h; struct ieee80211_channel *c = chan->chan; + struct ath9k_hw_cal_data *caldata = ah->caldata; + + if (!caldata) + return false; chan->channelFlags &= (~CHANNEL_CW_INT); if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { ath_print(common, ATH_DBG_CALIBRATE, "NF did not complete in calibration window\n"); nf = 0; - chan->rawNoiseFloor = nf; - return chan->rawNoiseFloor; + caldata->rawNoiseFloor = nf; + return false; } else { ath9k_hw_do_getnf(ah, nfarray); ath9k_hw_nf_sanitize(ah, nfarray); @@ -307,47 +324,40 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah, } } - h = ah->nfCalHist; - + h = caldata->nfCalHist; + caldata->nfcal_pending = false; ath9k_hw_update_nfcal_hist_buffer(h, nfarray); - chan->rawNoiseFloor = h[0].privNF; - - return chan->rawNoiseFloor; + caldata->rawNoiseFloor = h[0].privNF; + return true; } -void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah) +void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, + struct ath9k_channel *chan) { - struct ath_nf_limits *limit; + struct ath9k_nfcal_hist *h; + s16 default_nf; int i, j; - if (!ah->curchan || IS_CHAN_2GHZ(ah->curchan)) - limit = &ah->nf_2g; - else - limit = &ah->nf_5g; + if (!ah->caldata) + return; + h = ah->caldata->nfCalHist; + default_nf = ath9k_hw_get_default_nf(ah, chan); for (i = 0; i < NUM_NF_READINGS; i++) { - ah->nfCalHist[i].currIndex = 0; - ah->nfCalHist[i].privNF = limit->nominal; - ah->nfCalHist[i].invalidNFcount = - AR_PHY_CCA_FILTERWINDOW_LENGTH; + h[i].currIndex = 0; + h[i].privNF = default_nf; + h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH; for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { - ah->nfCalHist[i].nfCalBuffer[j] = limit->nominal; + h[i].nfCalBuffer[j] = default_nf; } } } s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan) { - s16 nf; - - if (chan->rawNoiseFloor == 0) - nf = -96; - else - nf = chan->rawNoiseFloor; - - if (!ath9k_hw_nf_in_range(ah, nf)) - nf = ATH_DEFAULT_NOISE_FLOOR; + if (!ah->caldata || !ah->caldata->rawNoiseFloor) + return ath9k_hw_get_default_nf(ah, chan); - return nf; + return ah->caldata->rawNoiseFloor; } EXPORT_SYMBOL(ath9k_hw_getchan_noise); diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h index cd60d09cdda7..0a304b3eeeb6 100644 --- a/drivers/net/wireless/ath/ath9k/calib.h +++ b/drivers/net/wireless/ath/ath9k/calib.h @@ -108,11 +108,11 @@ struct ath9k_pacal_info{ }; bool ath9k_hw_reset_calvalid(struct ath_hw *ah); -void ath9k_hw_start_nfcal(struct ath_hw *ah); +void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update); void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan); -int16_t ath9k_hw_getnf(struct ath_hw *ah, - struct ath9k_channel *chan); -void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah); +bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan); +void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, + struct ath9k_channel *chan); s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); void ath9k_hw_reset_calibration(struct ath_hw *ah, struct ath9k_cal_list *currCal); diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 3756400e6bf9..43b9e21bc562 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -353,6 +353,8 @@ struct ath9k_htc_priv { u16 seq_no; u32 bmiss_cnt; + struct ath9k_hw_cal_data caldata[38]; + spinlock_t beacon_lock; bool tx_queues_stop; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index cf9bcc67ade2..ebed9d1691a5 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -125,6 +125,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, struct ieee80211_conf *conf = &common->hw->conf; bool fastcc = true; struct ieee80211_channel *channel = hw->conf.channel; + struct ath9k_hw_cal_data *caldata; enum htc_phymode mode; __be16 htc_mode; u8 cmd_rsp; @@ -149,7 +150,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, priv->ah->curchan->channel, channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf)); - ret = ath9k_hw_reset(ah, hchan, fastcc); + caldata = &priv->caldata[channel->hw_value]; + ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); if (ret) { ath_print(common, ATH_DBG_FATAL, "Unable to reset channel (%u Mhz) " @@ -1028,7 +1030,7 @@ static void ath9k_htc_radio_enable(struct ieee80211_hw *hw) ah->curchan = ath9k_cmn_get_curchannel(hw, ah); /* Reset the HW */ - ret = ath9k_hw_reset(ah, ah->curchan, false); + ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); if (ret) { ath_print(common, ATH_DBG_FATAL, "Unable to reset hardware; reset status %d " @@ -1091,7 +1093,7 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw) ah->curchan = ath9k_cmn_get_curchannel(hw, ah); /* Reset the HW */ - ret = ath9k_hw_reset(ah, ah->curchan, false); + ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); if (ret) { ath_print(common, ATH_DBG_FATAL, "Unable to reset hardware; reset status %d " @@ -1179,7 +1181,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) ath9k_hw_configpcipowersave(ah, 0, 0); ath9k_hw_htc_resetinit(ah); - ret = ath9k_hw_reset(ah, init_channel, false); + ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false); if (ret) { ath_print(common, ATH_DBG_FATAL, "Unable to reset hardware; reset status %d " diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8d291ccf5c88..3384ca164562 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -610,7 +610,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) else ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); - ath9k_init_nfcal_hist_buffer(ah); ah->bb_watchdog_timeout_ms = 25; common->state = ATH_HW_INITIALIZED; @@ -1183,9 +1182,6 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, ath9k_hw_spur_mitigate_freq(ah, chan); - if (!chan->oneTimeCalsDone) - chan->oneTimeCalsDone = true; - return true; } @@ -1218,7 +1214,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) EXPORT_SYMBOL(ath9k_hw_check_alive); int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, - bool bChannelChange) + struct ath9k_hw_cal_data *caldata, bool bChannelChange) { struct ath_common *common = ath9k_hw_common(ah); u32 saveLedState; @@ -1243,9 +1239,19 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return -EIO; - if (curchan && !ah->chip_fullsleep) + if (curchan && !ah->chip_fullsleep && ah->caldata) ath9k_hw_getnf(ah, curchan); + ah->caldata = caldata; + if (caldata && + (chan->channel != caldata->channel || + (chan->channelFlags & ~CHANNEL_CW_INT) != + (caldata->channelFlags & ~CHANNEL_CW_INT))) { + /* Operating channel changed, reset channel calibration data */ + memset(caldata, 0, sizeof(*caldata)); + ath9k_init_nfcal_hist_buffer(ah, chan); + } + if (bChannelChange && (ah->chip_fullsleep != true) && (ah->curchan != NULL) && @@ -1256,7 +1262,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (ath9k_hw_channel_change(ah, chan)) { ath9k_hw_loadnf(ah, ah->curchan); - ath9k_hw_start_nfcal(ah); + ath9k_hw_start_nfcal(ah, true); return 0; } } @@ -1461,11 +1467,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (ah->btcoex_hw.enabled) ath9k_hw_btcoex_enable(ah); - if (AR_SREV_9300_20_OR_LATER(ah)) { - ath9k_hw_loadnf(ah, curchan); - ath9k_hw_start_nfcal(ah); + if (AR_SREV_9300_20_OR_LATER(ah)) ar9003_hw_bb_watchdog_config(ah); - } return 0; } diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 2d30efc0b94f..399f7c1283cd 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -346,19 +346,25 @@ enum ath9k_int { CHANNEL_HT40PLUS | \ CHANNEL_HT40MINUS) -struct ath9k_channel { - struct ieee80211_channel *chan; +struct ath9k_hw_cal_data { u16 channel; u32 channelFlags; - u32 chanmode; int32_t CalValid; - bool oneTimeCalsDone; int8_t iCoff; int8_t qCoff; int16_t rawNoiseFloor; bool paprd_done; + bool nfcal_pending; u16 small_signal_gain[AR9300_MAX_CHAINS]; u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ]; + struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; +}; + +struct ath9k_channel { + struct ieee80211_channel *chan; + u16 channel; + u32 channelFlags; + u32 chanmode; }; #define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ @@ -669,7 +675,7 @@ struct ath_hw { enum nl80211_iftype opmode; enum ath9k_power_mode power_mode; - struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; + struct ath9k_hw_cal_data *caldata; struct ath9k_pacal_info pacal_info; struct ar5416Stats stats; struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; @@ -863,7 +869,7 @@ const char *ath9k_hw_probe(u16 vendorid, u16 devid); void ath9k_hw_deinit(struct ath_hw *ah); int ath9k_hw_init(struct ath_hw *ah); int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, - bool bChannelChange); + struct ath9k_hw_cal_data *caldata, bool bChannelChange); int ath9k_hw_fill_cap_info(struct ath_hw *ah); u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); @@ -958,9 +964,10 @@ void ar9003_hw_bb_watchdog_read(struct ath_hw *ah); void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah); void ar9003_paprd_enable(struct ath_hw *ah, bool val); void ar9003_paprd_populate_single_table(struct ath_hw *ah, - struct ath9k_channel *chan, int chain); -int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan, - int chain); + struct ath9k_hw_cal_data *caldata, + int chain); +int ar9003_paprd_create_curve(struct ath_hw *ah, + struct ath9k_hw_cal_data *caldata, int chain); int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); int ar9003_paprd_init_table(struct ath_hw *ah); bool ar9003_paprd_is_done(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 0429dda0961f..3caa32316e7b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -154,6 +154,27 @@ void ath9k_ps_restore(struct ath_softc *sc) spin_unlock_irqrestore(&sc->sc_pm_lock, flags); } +static void ath_start_ani(struct ath_common *common) +{ + struct ath_hw *ah = common->ah; + unsigned long timestamp = jiffies_to_msecs(jiffies); + struct ath_softc *sc = (struct ath_softc *) common->priv; + + if (!(sc->sc_flags & SC_OP_ANI_RUN)) + return; + + if (sc->sc_flags & SC_OP_OFFCHANNEL) + return; + + common->ani.longcal_timer = timestamp; + common->ani.shortcal_timer = timestamp; + common->ani.checkani_timer = timestamp; + + mod_timer(&common->ani.timer, + jiffies + + msecs_to_jiffies((u32)ah->config.ani_poll_interval)); +} + /* * Set/change channels. If the channel is really being changed, it's done * by reseting the chip. To accomplish this we must first cleanup any pending @@ -162,16 +183,23 @@ void ath9k_ps_restore(struct ath_softc *sc) int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, struct ath9k_channel *hchan) { + struct ath_wiphy *aphy = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; bool fastcc = true, stopped; struct ieee80211_channel *channel = hw->conf.channel; + struct ath9k_hw_cal_data *caldata = NULL; int r; if (sc->sc_flags & SC_OP_INVALID) return -EIO; + del_timer_sync(&common->ani.timer); + cancel_work_sync(&sc->paprd_work); + cancel_work_sync(&sc->hw_check_work); + cancel_delayed_work_sync(&sc->tx_complete_work); + ath9k_ps_wakeup(sc); /* @@ -191,9 +219,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, * to flush data frames already in queue because of * changing channel. */ - if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET)) + if (!stopped || !(sc->sc_flags & SC_OP_OFFCHANNEL)) fastcc = false; + if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) + caldata = &aphy->caldata; + ath_print(common, ATH_DBG_CONFIG, "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n", sc->sc_ah->curchan->channel, @@ -201,7 +232,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, spin_lock_bh(&sc->sc_resetlock); - r = ath9k_hw_reset(ah, hchan, fastcc); + r = ath9k_hw_reset(ah, hchan, caldata, fastcc); if (r) { ath_print(common, ATH_DBG_FATAL, "Unable to reset channel (%u MHz), " @@ -212,8 +243,6 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, } spin_unlock_bh(&sc->sc_resetlock); - sc->sc_flags &= ~SC_OP_FULL_RESET; - if (ath_startrecv(sc) != 0) { ath_print(common, ATH_DBG_FATAL, "Unable to restart recv logic\n"); @@ -225,6 +254,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, ath_update_txpow(sc); ath9k_hw_set_interrupts(ah, ah->imask); + if (!(sc->sc_flags & (SC_OP_OFFCHANNEL | SC_OP_SCANNING))) { + ath_start_ani(common); + ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); + ath_beacon_config(sc, NULL); + } + ps_restore: ath9k_ps_restore(sc); return r; @@ -233,17 +268,19 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, static void ath_paprd_activate(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; + struct ath9k_hw_cal_data *caldata = ah->caldata; int chain; - if (!ah->curchan->paprd_done) + if (!caldata || !caldata->paprd_done) return; ath9k_ps_wakeup(sc); + ar9003_paprd_enable(ah, false); for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { if (!(ah->caps.tx_chainmask & BIT(chain))) continue; - ar9003_paprd_populate_single_table(ah, ah->curchan, chain); + ar9003_paprd_populate_single_table(ah, caldata, chain); } ar9003_paprd_enable(ah, true); @@ -261,6 +298,7 @@ void ath_paprd_calibrate(struct work_struct *work) int band = hw->conf.channel->band; struct ieee80211_supported_band *sband = &sc->sbands[band]; struct ath_tx_control txctl; + struct ath9k_hw_cal_data *caldata = ah->caldata; int qnum, ftype; int chain_ok = 0; int chain; @@ -268,6 +306,9 @@ void ath_paprd_calibrate(struct work_struct *work) int time_left; int i; + if (!caldata) + return; + skb = alloc_skb(len, GFP_KERNEL); if (!skb) return; @@ -322,7 +363,7 @@ void ath_paprd_calibrate(struct work_struct *work) if (!ar9003_paprd_is_done(ah)) break; - if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0) + if (ar9003_paprd_create_curve(ah, caldata, chain) != 0) break; chain_ok = 1; @@ -330,7 +371,7 @@ void ath_paprd_calibrate(struct work_struct *work) kfree_skb(skb); if (chain_ok) { - ah->curchan->paprd_done = true; + caldata->paprd_done = true; ath_paprd_activate(sc); } @@ -439,33 +480,14 @@ set_timer: cal_interval = min(cal_interval, (u32)short_cal_interval); mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); - if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && - !(sc->sc_flags & SC_OP_SCANNING)) { - if (!sc->sc_ah->curchan->paprd_done) + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) { + if (!ah->caldata->paprd_done) ieee80211_queue_work(sc->hw, &sc->paprd_work); else ath_paprd_activate(sc); } } -static void ath_start_ani(struct ath_common *common) -{ - struct ath_hw *ah = common->ah; - unsigned long timestamp = jiffies_to_msecs(jiffies); - struct ath_softc *sc = (struct ath_softc *) common->priv; - - if (!(sc->sc_flags & SC_OP_ANI_RUN)) - return; - - common->ani.longcal_timer = timestamp; - common->ani.shortcal_timer = timestamp; - common->ani.checkani_timer = timestamp; - - mod_timer(&common->ani.timer, - jiffies + - msecs_to_jiffies((u32)ah->config.ani_poll_interval)); -} - /* * Update tx/rx chainmask. For legacy association, * hard code chainmask to 1x1, for 11n association, use @@ -477,7 +499,7 @@ void ath_update_chainmask(struct ath_softc *sc, int is_ht) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - if ((sc->sc_flags & SC_OP_SCANNING) || is_ht || + if ((sc->sc_flags & SC_OP_OFFCHANNEL) || is_ht || (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) { common->tx_chainmask = ah->caps.tx_chainmask; common->rx_chainmask = ah->caps.rx_chainmask; @@ -817,7 +839,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) ah->curchan = ath_get_curchannel(sc, sc->hw); spin_lock_bh(&sc->sc_resetlock); - r = ath9k_hw_reset(ah, ah->curchan, false); + r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); if (r) { ath_print(common, ATH_DBG_FATAL, "Unable to reset channel (%u MHz), " @@ -877,7 +899,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) ah->curchan = ath_get_curchannel(sc, hw); spin_lock_bh(&sc->sc_resetlock); - r = ath9k_hw_reset(ah, ah->curchan, false); + r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); if (r) { ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, "Unable to reset channel (%u MHz), " @@ -910,7 +932,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) ath_flushrecv(sc); spin_lock_bh(&sc->sc_resetlock); - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); if (r) ath_print(common, ATH_DBG_FATAL, "Unable to reset hardware; reset status %d\n", r); @@ -1085,7 +1107,7 @@ static int ath9k_start(struct ieee80211_hw *hw) * and then setup of the interrupt mask. */ spin_lock_bh(&sc->sc_resetlock); - r = ath9k_hw_reset(ah, init_channel, false); + r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); if (r) { ath_print(common, ATH_DBG_FATAL, "Unable to reset hardware; reset status %d " @@ -1579,6 +1601,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) aphy->chan_idx = pos; aphy->chan_is_ht = conf_is_ht(conf); + if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + sc->sc_flags |= SC_OP_OFFCHANNEL; + else + sc->sc_flags &= ~SC_OP_OFFCHANNEL; if (aphy->state == ATH_WIPHY_SCAN || aphy->state == ATH_WIPHY_ACTIVE) @@ -1990,7 +2016,6 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw) { struct ath_wiphy *aphy = hw->priv; struct ath_softc *sc = aphy->sc; - struct ath_common *common = ath9k_hw_common(sc->sc_ah); mutex_lock(&sc->mutex); if (ath9k_wiphy_scanning(sc)) { @@ -2008,10 +2033,6 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw) aphy->state = ATH_WIPHY_SCAN; ath9k_wiphy_pause_all_forced(sc, aphy); sc->sc_flags |= SC_OP_SCANNING; - del_timer_sync(&common->ani.timer); - cancel_work_sync(&sc->paprd_work); - cancel_work_sync(&sc->hw_check_work); - cancel_delayed_work_sync(&sc->tx_complete_work); mutex_unlock(&sc->mutex); } @@ -2023,15 +2044,10 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) { struct ath_wiphy *aphy = hw->priv; struct ath_softc *sc = aphy->sc; - struct ath_common *common = ath9k_hw_common(sc->sc_ah); mutex_lock(&sc->mutex); aphy->state = ATH_WIPHY_ACTIVE; sc->sc_flags &= ~SC_OP_SCANNING; - sc->sc_flags |= SC_OP_FULL_RESET; - ath_start_ani(common); - ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); - ath_beacon_config(sc, NULL); mutex_unlock(&sc->mutex); } diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index da0cfe90c38a..a3fc987ebab0 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1140,6 +1140,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (flush) goto requeue; + retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, + rxs, &decrypt_error); + if (retval) + goto requeue; + rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; if (rs.rs_tstamp > tsf_lower && unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) @@ -1149,11 +1154,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) rxs->mactime += 0x100000000ULL; - retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, - rxs, &decrypt_error); - if (retval) - goto requeue; - /* Ensure we always have an skb to requeue once we are done * processing the current buffer's skb */ requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 501b72821b4d..4dda14e36227 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -120,26 +120,14 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) list_add_tail(&ac->list, &txq->axq_acq); } -static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) -{ - struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; - - spin_lock_bh(&txq->axq_lock); - tid->paused++; - spin_unlock_bh(&txq->axq_lock); -} - static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; - BUG_ON(tid->paused <= 0); - spin_lock_bh(&txq->axq_lock); - - tid->paused--; + WARN_ON(!tid->paused); - if (tid->paused > 0) - goto unlock; + spin_lock_bh(&txq->axq_lock); + tid->paused = false; if (list_empty(&tid->buf_q)) goto unlock; @@ -157,15 +145,10 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) struct list_head bf_head; INIT_LIST_HEAD(&bf_head); - BUG_ON(tid->paused <= 0); - spin_lock_bh(&txq->axq_lock); + WARN_ON(!tid->paused); - tid->paused--; - - if (tid->paused > 0) { - spin_unlock_bh(&txq->axq_lock); - return; - } + spin_lock_bh(&txq->axq_lock); + tid->paused = false; while (!list_empty(&tid->buf_q)) { bf = list_first_entry(&tid->buf_q, struct ath_buf, list); @@ -811,7 +794,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, an = (struct ath_node *)sta->drv_priv; txtid = ATH_AN_2_TID(an, tid); txtid->state |= AGGR_ADDBA_PROGRESS; - ath_tx_pause_tid(sc, txtid); + txtid->paused = true; *ssn = txtid->seq_start; } @@ -835,10 +818,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) return; } - ath_tx_pause_tid(sc, txtid); - /* drop all software retried frames and mark this TID */ spin_lock_bh(&txq->axq_lock); + txtid->paused = true; while (!list_empty(&txtid->buf_q)) { bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); if (!bf_isretried(bf)) { @@ -1181,7 +1163,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) "Failed to stop TX DMA. Resetting hardware!\n"); spin_lock_bh(&sc->sc_resetlock); - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); if (r) ath_print(common, ATH_DBG_FATAL, "Unable to reset hardware; reset status %d\n", diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index c24c5efeae1f..16bbfa3189a5 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -1924,6 +1924,10 @@ static int ipw2100_net_init(struct net_device *dev) bg_band->channels = kzalloc(geo->bg_channels * sizeof(struct ieee80211_channel), GFP_KERNEL); + if (!bg_band->channels) { + ipw2100_down(priv); + return -ENOMEM; + } /* translate geo->bg to bg_band.channels */ for (i = 0; i < geo->bg_channels; i++) { bg_band->channels[i].band = IEEE80211_BAND_2GHZ; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c index f052c6d09b37..d706b8afbe5a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c @@ -980,7 +980,7 @@ ssize_t iwl_ucode_bt_stats_read(struct file *file, le32_to_cpu(bt->lo_priority_tx_req_cnt), accum_bt->lo_priority_tx_req_cnt); pos += scnprintf(buf + pos, bufsz - pos, - "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n", + "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n", le32_to_cpu(bt->lo_priority_tx_denied_cnt), accum_bt->lo_priority_tx_denied_cnt); pos += scnprintf(buf + pos, bufsz - pos, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index a1b6d202d57c..9dd9e64c2b0b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -1429,7 +1429,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv, void iwl_free_tfds_in_queue(struct iwl_priv *priv, int sta_id, int tid, int freed) { - WARN_ON(!spin_is_locked(&priv->sta_lock)); + lockdep_assert_held(&priv->sta_lock); if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 35c86d22b14b..23e5c42e7d7e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -300,8 +300,9 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, struct ieee80211_sta *sta) { int ret = -EAGAIN; + u32 load = rs_tl_get_load(lq_data, tid); - if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { + if (load > IWL_AGG_LOAD_THRESHOLD) { IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); ret = ieee80211_start_tx_ba_session(sta, tid); @@ -311,12 +312,14 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, * this might be cause by reloading firmware * stop the tx ba session here */ - IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n", + IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", tid); ieee80211_stop_tx_ba_session(sta, tid); } - } else - IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid); + } else { + IWL_ERR(priv, "Aggregation not enabled for tid %d " + "because load = %u\n", tid, load); + } return ret; } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 55a1b31fd09a..d04502d54df3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -1117,7 +1117,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv, u8 *addr = priv->stations[sta_id].sta.sta.addr; struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; - WARN_ON(!spin_is_locked(&priv->sta_lock)); + lockdep_assert_held(&priv->sta_lock); switch (priv->stations[sta_id].tid[tid].agg.state) { case IWL_EMPTYING_HW_QUEUE_DELBA: @@ -1331,7 +1331,14 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, tid = ba_resp->tid; agg = &priv->stations[sta_id].tid[tid].agg; if (unlikely(agg->txq_id != scd_flow)) { - IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n", + /* + * FIXME: this is a uCode bug which need to be addressed, + * log the information and return for now! + * since it is possible happen very often and in order + * not to fill the syslog, don't enable the logging by default + */ + IWL_DEBUG_TX_REPLY(priv, + "BA scd_flow %d does not match txq_id %d\n", scd_flow, agg->txq_id); return; } diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 8024d44ce4bb..8ccb6d205b6d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -2000,6 +2000,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_priv *priv = hw->priv; + bool scan_completed = false; IWL_DEBUG_MAC80211(priv, "enter\n"); @@ -2013,7 +2014,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw, if (priv->vif == vif) { priv->vif = NULL; if (priv->scan_vif == vif) { - ieee80211_scan_completed(priv->hw, true); + scan_completed = true; priv->scan_vif = NULL; priv->scan_request = NULL; } @@ -2021,6 +2022,9 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw, } mutex_unlock(&priv->mutex); + if (scan_completed) + ieee80211_scan_completed(priv->hw, true); + IWL_DEBUG_MAC80211(priv, "leave\n"); } diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 5c2bcef5df0c..0b961a353ff6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -71,7 +71,7 @@ do { \ #define IWL_DEBUG(__priv, level, fmt, args...) #define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, - void *p, u32 len) + const void *p, u32 len) {} #endif /* CONFIG_IWLWIFI_DEBUG */ diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index ae7319bb3a99..4cf864c664ee 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -193,7 +193,7 @@ TRACE_EVENT(iwlwifi_dev_tx, __entry->framelen = buf0_len + buf1_len; memcpy(__get_dynamic_array(tfd), tfd, tfdlen); memcpy(__get_dynamic_array(buf0), buf0, buf0_len); - memcpy(__get_dynamic_array(buf1), buf1, buf0_len); + memcpy(__get_dynamic_array(buf1), buf1, buf1_len); ), TP_printk("[%p] TX %.2x (%zu bytes)", __entry->priv, diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index b0c6b0473901..a4b3663a262f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -298,7 +298,7 @@ EXPORT_SYMBOL(iwl_init_scan_params); static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif) { - WARN_ON(!mutex_is_locked(&priv->mutex)); + lockdep_assert_held(&priv->mutex); IWL_DEBUG_INFO(priv, "Starting scan...\n"); set_bit(STATUS_SCANNING, &priv->status); diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 9511f03f07e0..7e0829be5e78 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -773,7 +773,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) int iwl_restore_default_wep_keys(struct iwl_priv *priv) { - WARN_ON(!mutex_is_locked(&priv->mutex)); + lockdep_assert_held(&priv->mutex); return iwl_send_static_wepkey_cmd(priv, 0); } @@ -784,7 +784,7 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, { int ret; - WARN_ON(!mutex_is_locked(&priv->mutex)); + lockdep_assert_held(&priv->mutex); IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", keyconf->keyidx); @@ -808,7 +808,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv, { int ret; - WARN_ON(!mutex_is_locked(&priv->mutex)); + lockdep_assert_held(&priv->mutex); if (keyconf->keylen != WEP_KEY_LEN_128 && keyconf->keylen != WEP_KEY_LEN_64) { diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 25f902760980..8e9fbfd804b6 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -257,6 +257,29 @@ static int lbs_add_supported_rates_tlv(u8 *tlv) return sizeof(rate_tlv->header) + i; } +/* Add common rates from a TLV and return the new end of the TLV */ +static u8 * +add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) +{ + int hw, ap, ap_max = ie[1]; + u8 hw_rate; + + /* Advance past IE header */ + ie += 2; + + lbs_deb_hex(LBS_DEB_ASSOC, "AP IE Rates", (u8 *) ie, ap_max); + + for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { + hw_rate = lbs_rates[hw].bitrate / 5; + for (ap = 0; ap < ap_max; ap++) { + if (hw_rate == (ie[ap] & 0x7f)) { + *tlv++ = ie[ap]; + *nrates = *nrates + 1; + } + } + } + return tlv; +} /* * Adds a TLV with all rates the hardware *and* BSS supports. @@ -264,8 +287,11 @@ static int lbs_add_supported_rates_tlv(u8 *tlv) static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss) { struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; - const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); - int n; + const u8 *rates_eid, *ext_rates_eid; + int n = 0; + + rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); + ext_rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_EXT_SUPP_RATES); /* * 01 00 TLV_TYPE_RATES @@ -275,26 +301,21 @@ static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss) rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); tlv += sizeof(rate_tlv->header); - if (!rates_eid) { + /* Add basic rates */ + if (rates_eid) { + tlv = add_ie_rates(tlv, rates_eid, &n); + + /* Add extended rates, if any */ + if (ext_rates_eid) + tlv = add_ie_rates(tlv, ext_rates_eid, &n); + } else { + lbs_deb_assoc("assoc: bss had no basic rate IE\n"); /* Fallback: add basic 802.11b rates */ *tlv++ = 0x82; *tlv++ = 0x84; *tlv++ = 0x8b; *tlv++ = 0x96; n = 4; - } else { - int hw, ap; - u8 ap_max = rates_eid[1]; - n = 0; - for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { - u8 hw_rate = lbs_rates[hw].bitrate / 5; - for (ap = 0; ap < ap_max; ap++) { - if (hw_rate == (rates_eid[ap+2] & 0x7f)) { - *tlv++ = rates_eid[ap+2]; - n++; - } - } - } } rate_tlv->header.len = cpu_to_le16(n); @@ -465,7 +486,15 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, lbs_deb_enter(LBS_DEB_CFG80211); bsssize = get_unaligned_le16(&scanresp->bssdescriptsize); - nr_sets = le16_to_cpu(resp->size); + nr_sets = le16_to_cpu(scanresp->nr_sets); + + lbs_deb_scan("scan response: %d BSSs (%d bytes); resp size %d bytes\n", + nr_sets, bsssize, le16_to_cpu(resp->size)); + + if (nr_sets == 0) { + ret = 0; + goto done; + } /* * The general layout of the scan response is described in chapter @@ -670,8 +699,13 @@ static void lbs_scan_worker(struct work_struct *work) if (priv->scan_channel >= priv->scan_req->n_channels) { /* Mark scan done */ - cfg80211_scan_done(priv->scan_req, false); + if (priv->internal_scan) + kfree(priv->scan_req); + else + cfg80211_scan_done(priv->scan_req, false); + priv->scan_req = NULL; + priv->last_scan = jiffies; } /* Restart network */ @@ -682,10 +716,33 @@ static void lbs_scan_worker(struct work_struct *work) kfree(scan_cmd); + /* Wake up anything waiting on scan completion */ + if (priv->scan_req == NULL) { + lbs_deb_scan("scan: waking up waiters\n"); + wake_up_all(&priv->scan_q); + } + out_no_scan_cmd: lbs_deb_leave(LBS_DEB_SCAN); } +static void _internal_start_scan(struct lbs_private *priv, bool internal, + struct cfg80211_scan_request *request) +{ + lbs_deb_enter(LBS_DEB_CFG80211); + + lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n", + request->n_ssids, request->n_channels, request->ie_len); + + priv->scan_channel = 0; + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(50)); + + priv->scan_req = request; + priv->internal_scan = internal; + + lbs_deb_leave(LBS_DEB_CFG80211); +} static int lbs_cfg_scan(struct wiphy *wiphy, struct net_device *dev, @@ -702,18 +759,11 @@ static int lbs_cfg_scan(struct wiphy *wiphy, goto out; } - lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n", - request->n_ssids, request->n_channels, request->ie_len); - - priv->scan_channel = 0; - queue_delayed_work(priv->work_thread, &priv->scan_work, - msecs_to_jiffies(50)); + _internal_start_scan(priv, false, request); if (priv->surpriseremoved) ret = -EIO; - priv->scan_req = request; - out: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; @@ -1000,6 +1050,7 @@ static int lbs_associate(struct lbs_private *priv, int status; int ret; u8 *pos = &(cmd->iebuf[0]); + u8 *tmp; lbs_deb_enter(LBS_DEB_CFG80211); @@ -1044,7 +1095,9 @@ static int lbs_associate(struct lbs_private *priv, pos += lbs_add_cf_param_tlv(pos); /* add rates TLV */ + tmp = pos + 4; /* skip Marvell IE header */ pos += lbs_add_common_rates_tlv(pos, bss); + lbs_deb_hex(LBS_DEB_ASSOC, "Common Rates", tmp, pos - tmp); /* add auth type TLV */ if (priv->fwrelease >= 0x09000000) @@ -1124,7 +1177,62 @@ done: return ret; } +static struct cfg80211_scan_request * +_new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme) +{ + struct cfg80211_scan_request *creq = NULL; + int i, n_channels = 0; + enum ieee80211_band band; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (wiphy->bands[band]) + n_channels += wiphy->bands[band]->n_channels; + } + + creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + + n_channels * sizeof(void *), + GFP_ATOMIC); + if (!creq) + return NULL; + + /* SSIDs come after channels */ + creq->ssids = (void *)&creq->channels[n_channels]; + creq->n_channels = n_channels; + creq->n_ssids = 1; + + /* Scan all available channels */ + i = 0; + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + int j; + + if (!wiphy->bands[band]) + continue; + + for (j = 0; j < wiphy->bands[band]->n_channels; j++) { + /* ignore disabled channels */ + if (wiphy->bands[band]->channels[j].flags & + IEEE80211_CHAN_DISABLED) + continue; + + creq->channels[i] = &wiphy->bands[band]->channels[j]; + i++; + } + } + if (i) { + /* Set real number of channels specified in creq->channels[] */ + creq->n_channels = i; + + /* Scan for the SSID we're going to connect to */ + memcpy(creq->ssids[0].ssid, sme->ssid, sme->ssid_len); + creq->ssids[0].ssid_len = sme->ssid_len; + } else { + /* No channels found... */ + kfree(creq); + creq = NULL; + } + return creq; +} static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) @@ -1136,37 +1244,43 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, lbs_deb_enter(LBS_DEB_CFG80211); - if (sme->bssid) { - bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, - sme->ssid, sme->ssid_len, - WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); - } else { - /* - * Here we have an impedance mismatch. The firmware command - * CMD_802_11_ASSOCIATE always needs a BSSID, it cannot - * connect otherwise. However, for the connect-API of - * cfg80211 the bssid is purely optional. We don't get one, - * except the user specifies one on the "iw" command line. - * - * If we don't got one, we could initiate a scan and look - * for the best matching cfg80211_bss entry. - * - * Or, better yet, net/wireless/sme.c get's rewritten into - * something more generally useful. + if (!sme->bssid) { + /* Run a scan if one isn't in-progress already and if the last + * scan was done more than 2 seconds ago. */ - lbs_pr_err("TODO: no BSS specified\n"); - ret = -ENOTSUPP; - goto done; - } + if (priv->scan_req == NULL && + time_after(jiffies, priv->last_scan + (2 * HZ))) { + struct cfg80211_scan_request *creq; + creq = _new_connect_scan_req(wiphy, sme); + if (!creq) { + ret = -EINVAL; + goto done; + } + + lbs_deb_assoc("assoc: scanning for compatible AP\n"); + _internal_start_scan(priv, true, creq); + } + + /* Wait for any in-progress scan to complete */ + lbs_deb_assoc("assoc: waiting for scan to complete\n"); + wait_event_interruptible_timeout(priv->scan_q, + (priv->scan_req == NULL), + (15 * HZ)); + lbs_deb_assoc("assoc: scanning competed\n"); + } + /* Find the BSS we want using available scan results */ + bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, + sme->ssid, sme->ssid_len, + WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (!bss) { - lbs_pr_err("assicate: bss %pM not in scan results\n", + lbs_pr_err("assoc: bss %pM not in scan results\n", sme->bssid); ret = -ENOENT; goto done; } - lbs_deb_assoc("trying %pM", sme->bssid); + lbs_deb_assoc("trying %pM\n", bss->bssid); lbs_deb_assoc("cipher 0x%x, key index %d, key len %d\n", sme->crypto.cipher_group, sme->key_idx, sme->key_len); @@ -1229,7 +1343,7 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, lbs_set_radio(priv, preamble, 1); /* Do the actual association */ - lbs_associate(priv, bss, sme); + ret = lbs_associate(priv, bss, sme); done: if (bss) diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 3c7e255e18c7..f062ed583901 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -161,6 +161,11 @@ struct lbs_private { /** Scanning */ struct delayed_work scan_work; int scan_channel; + /* Queue of things waiting for scan completion */ + wait_queue_head_t scan_q; + /* Whether the scan was initiated internally and not by cfg80211 */ + bool internal_scan; + unsigned long last_scan; }; extern struct cmd_confirm_sleep confirm_sleep; diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 258967144b96..24958a86747b 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -719,6 +719,7 @@ static int lbs_init_adapter(struct lbs_private *priv) priv->deep_sleep_required = 0; priv->wakeup_dev_required = 0; init_waitqueue_head(&priv->ds_awake_q); + init_waitqueue_head(&priv->scan_q); priv->authtype_auto = 1; priv->is_host_sleep_configured = 0; priv->is_host_sleep_activated = 0; diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 822f8dc26e9c..71a101fb2e4e 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -43,6 +43,8 @@ static DEFINE_PCI_DEVICE_TABLE(p54p_table) = { { PCI_DEVICE(0x1260, 0x3886) }, /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */ { PCI_DEVICE(0x1260, 0xffff) }, + /* Standard Microsystems Corp SMC2802W Wireless PCI */ + { PCI_DEVICE(0x10b8, 0x2802) }, { }, }; diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 19b262e1ddbe..63c2cc408e15 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -240,16 +240,16 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) struct rt2x00_dev *rt2x00dev; int retval; - retval = pci_request_regions(pci_dev, pci_name(pci_dev)); + retval = pci_enable_device(pci_dev); if (retval) { - ERROR_PROBE("PCI request regions failed.\n"); + ERROR_PROBE("Enable device failed.\n"); return retval; } - retval = pci_enable_device(pci_dev); + retval = pci_request_regions(pci_dev, pci_name(pci_dev)); if (retval) { - ERROR_PROBE("Enable device failed.\n"); - goto exit_release_regions; + ERROR_PROBE("PCI request regions failed.\n"); + goto exit_disable_device; } pci_set_master(pci_dev); @@ -260,14 +260,14 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { ERROR_PROBE("PCI DMA not supported.\n"); retval = -EIO; - goto exit_disable_device; + goto exit_release_regions; } hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { ERROR_PROBE("Failed to allocate hardware.\n"); retval = -ENOMEM; - goto exit_disable_device; + goto exit_release_regions; } pci_set_drvdata(pci_dev, hw); @@ -300,13 +300,12 @@ exit_free_reg: exit_free_device: ieee80211_free_hw(hw); -exit_disable_device: - if (retval != -EBUSY) - pci_disable_device(pci_dev); - exit_release_regions: pci_release_regions(pci_dev); +exit_disable_device: + pci_disable_device(pci_dev); + pci_set_drvdata(pci_dev, NULL); return retval; diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c index 1d8178563d76..b50c39aaec05 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c @@ -695,6 +695,8 @@ static void rtl8180_beacon_work(struct work_struct *work) /* grab a fresh beacon */ skb = ieee80211_beacon_get(dev, vif); + if (!skb) + goto resched; /* * update beacon timestamp w/ TSF value diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c index 96d25fb50495..4cb99c541e2a 100644 --- a/drivers/net/wireless/wl12xx/wl1271_spi.c +++ b/drivers/net/wireless/wl12xx/wl1271_spi.c @@ -160,9 +160,8 @@ static void wl1271_spi_init(struct wl1271 *wl) spi_message_add_tail(&t, &m); spi_sync(wl_to_spi(wl), &m); - kfree(cmd); - wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); + kfree(cmd); } #define WL1271_BUSY_WORD_TIMEOUT 1000 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 6a5af18faf68..b0de57947189 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -3030,6 +3030,34 @@ static void __init iommu_exit_mempool(void) } +static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) +{ + struct dmar_drhd_unit *drhd; + u32 vtbar; + int rc; + + /* We know that this device on this chipset has its own IOMMU. + * If we find it under a different IOMMU, then the BIOS is lying + * to us. Hope that the IOMMU for this device is actually + * disabled, and it needs no translation... + */ + rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); + if (rc) { + /* "can't" happen */ + dev_info(&pdev->dev, "failed to run vt-d quirk\n"); + return; + } + vtbar &= 0xffff0000; + + /* we know that the this iommu should be at offset 0xa000 from vtbar */ + drhd = dmar_find_matched_drhd_unit(pdev); + if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000, + TAINT_FIRMWARE_WORKAROUND, + "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n")) + pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; +} +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); + static void __init init_no_remapping_devices(void) { struct dmar_drhd_unit *drhd; diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index dd88803e4899..7158f9528ecc 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -701,7 +701,7 @@ int __init dasd_eer_init(void) void dasd_eer_exit(void) { if (dasd_eer_dev) { - WARN_ON(misc_deregister(dasd_eer_dev) != 0); + misc_deregister(dasd_eer_dev); kfree(dasd_eer_dev); dasd_eer_dev = NULL; } diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 2ed3f82e5c30..e021ec663ef9 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -627,7 +627,7 @@ out_iucv: static void __exit mon_exit(void) { segment_unload(mon_dcss_name); - WARN_ON(misc_deregister(&mon_dev) != 0); + misc_deregister(&mon_dev); device_unregister(monreader_device); driver_unregister(&monreader_driver); iucv_unregister(&monreader_iucv_handler, 1); diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 98a49dfda1de..572a1e7fd099 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -380,7 +380,7 @@ out_driver: static void __exit mon_exit(void) { - WARN_ON(misc_deregister(&mon_dev) != 0); + misc_deregister(&mon_dev); platform_device_unregister(monwriter_pdev); platform_driver_unregister(&monwriter_pdrv); } diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c index 31f172397af3..30626440a062 100644 --- a/drivers/serial/crisv10.c +++ b/drivers/serial/crisv10.c @@ -3724,6 +3724,17 @@ rs_ioctl(struct tty_struct *tty, struct file * file, return e100_enable_rs485(tty, &rs485data); } + case TIOCGRS485: + { + struct serial_rs485 *rs485data = + &(((struct e100_serial *)tty->driver_data)->rs485); + /* This is the ioctl to get RS485 data from user-space */ + if (copy_to_user((struct serial_rs485 *) arg, + rs485data, + sizeof(serial_rs485))) + return -EFAULT; + break; + } case TIOCSERWRRS485: { diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c index bc1c6051a6f6..97dae297ca3c 100644 --- a/drivers/staging/pohmelfs/inode.c +++ b/drivers/staging/pohmelfs/inode.c @@ -968,12 +968,18 @@ int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr) goto err_out_exit; } - err = inode_setattr(inode, attr); - if (err) { - dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino); - goto err_out_exit; + if ((attr->ia_valid & ATTR_SIZE) && + attr->ia_size != i_size_read(inode)) { + err = vmtruncate(inode, attr->ia_size); + if (err) { + dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino); + goto err_out_exit; + } } + setattr_copy(inode, attr); + mark_inode_dirty(inode); + dprintk("%s: ino: %llu, mode: %o -> %o, uid: %u -> %u, gid: %u -> %u, size: %llu -> %llu.\n", __func__, POHMELFS_I(inode)->ino, inode->i_mode, attr->ia_mode, inode->i_uid, attr->ia_uid, inode->i_gid, attr->ia_gid, inode->i_size, attr->ia_size); @@ -1217,7 +1223,7 @@ void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info) } } -static void pohmelfs_drop_inode(struct inode *inode) +static int pohmelfs_drop_inode(struct inode *inode) { struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); struct pohmelfs_inode *pi = POHMELFS_I(inode); @@ -1226,7 +1232,7 @@ static void pohmelfs_drop_inode(struct inode *inode) list_del_init(&pi->inode_entry); spin_unlock(&psb->ino_lock); - generic_drop_inode(inode); + return generic_drop_inode(inode); } static struct pohmelfs_inode *pohmelfs_get_inode_from_list(struct pohmelfs_sb *psb, diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c index e66b8b19ce5d..d8b12c32e3ef 100644 --- a/drivers/video/w100fb.c +++ b/drivers/video/w100fb.c @@ -858,9 +858,9 @@ unsigned long w100fb_gpio_read(int port) void w100fb_gpio_write(int port, unsigned long value) { if (port==W100_GPIO_PORT_A) - value = writel(value, remapped_regs + mmGPIO_DATA); + writel(value, remapped_regs + mmGPIO_DATA); else - value = writel(value, remapped_regs + mmGPIO_DATA2); + writel(value, remapped_regs + mmGPIO_DATA2); } EXPORT_SYMBOL(w100fb_gpio_read); EXPORT_SYMBOL(w100fb_gpio_write); |