diff options
Diffstat (limited to 'drivers')
81 files changed, 15186 insertions, 717 deletions
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index d50ac3bfefd..2d97b4f1e4d 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -26,6 +26,7 @@ include $(TOPDIR)/config.mk LIB := $(obj)libgpio.o COBJS-$(CONFIG_AT91_GPIO) += at91_gpio.o +COBJS-$(CONFIG_INTEL_ICH6_GPIO) += intel_ich6_gpio.o COBJS-$(CONFIG_KIRKWOOD_GPIO) += kw_gpio.o COBJS-$(CONFIG_MARVELL_GPIO) += mvgpio.o COBJS-$(CONFIG_MARVELL_MFP) += mvmfp.o diff --git a/drivers/gpio/intel_ich6_gpio.c b/drivers/gpio/intel_ich6_gpio.c new file mode 100644 index 00000000000..6fed01f71a0 --- /dev/null +++ b/drivers/gpio/intel_ich6_gpio.c @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2012 The Chromium OS Authors. + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +/* + * This is a GPIO driver for Intel ICH6 and later. The x86 GPIOs are accessed + * through the PCI bus. Each PCI device has 256 bytes of configuration space, + * consisting of a standard header and a device-specific set of registers. PCI + * bus 0, device 31, function 0 gives us access to the chipset GPIOs (among + * other things). Within the PCI configuration space, the GPIOBASE register + * tells us where in the device's I/O region we can find more registers to + * actually access the GPIOs. + * + * PCI bus/device/function 0:1f:0 => PCI config registers + * PCI config register "GPIOBASE" + * PCI I/O space + [GPIOBASE] => start of GPIO registers + * GPIO registers => gpio pin function, direction, value + * + * + * Danger Will Robinson! Bank 0 (GPIOs 0-31) seems to be fairly stable. Most + * ICH versions have more, but the decoding the matrix that describes them is + * absurdly complex and constantly changing. We'll provide Bank 1 and Bank 2, + * but they will ONLY work for certain unspecified chipsets because the offset + * from GPIOBASE changes randomly. Even then, many GPIOs are unimplemented or + * reserved or subject to arcane restrictions. + */ + +#include <common.h> +#include <pci.h> +#include <asm/gpio.h> +#include <asm/io.h> + +/* Where in config space is the register that points to the GPIO registers? */ +#define PCI_CFG_GPIOBASE 0x48 + +#define NUM_BANKS 3 + +/* Within the I/O space, where are the registers to control the GPIOs? */ +static struct { + u8 use_sel; + u8 io_sel; + u8 lvl; +} gpio_bank[NUM_BANKS] = { + { 0x00, 0x04, 0x0c }, /* Bank 0 */ + { 0x30, 0x34, 0x38 }, /* Bank 1 */ + { 0x40, 0x44, 0x48 } /* Bank 2 */ +}; + +static pci_dev_t dev; /* handle for 0:1f:0 */ +static u32 gpiobase; /* offset into I/O space */ +static int found_it_once; /* valid GPIO device? */ +static u32 lock[NUM_BANKS]; /* "lock" for access to pins */ + +static int bad_arg(int num, int *bank, int *bitnum) +{ + int i = num / 32; + int j = num % 32; + + if (num < 0 || i > NUM_BANKS) { + debug("%s: bogus gpio num: %d\n", __func__, num); + return -1; + } + *bank = i; + *bitnum = j; + return 0; +} + +static int mark_gpio(int bank, int bitnum) +{ + if (lock[bank] & (1UL << bitnum)) { + debug("%s: %d.%d already marked\n", __func__, bank, bitnum); + return -1; + } + lock[bank] |= (1 << bitnum); + return 0; +} + +static void clear_gpio(int bank, int bitnum) +{ + lock[bank] &= ~(1 << bitnum); +} + +static int notmine(int num, int *bank, int *bitnum) +{ + if (bad_arg(num, bank, bitnum)) + return -1; + return !(lock[*bank] & (1UL << *bitnum)); +} + +static int gpio_init(void) +{ + u8 tmpbyte; + u16 tmpword; + u32 tmplong; + + /* Have we already done this? */ + if (found_it_once) + return 0; + + /* Where should it be? */ + dev = PCI_BDF(0, 0x1f, 0); + + /* Is the device present? */ + pci_read_config_word(dev, PCI_VENDOR_ID, &tmpword); + if (tmpword != PCI_VENDOR_ID_INTEL) { + debug("%s: wrong VendorID\n", __func__); + return -1; + } + + pci_read_config_word(dev, PCI_DEVICE_ID, &tmpword); + debug("Found %04x:%04x\n", PCI_VENDOR_ID_INTEL, tmpword); + /* + * We'd like to validate the Device ID too, but pretty much any + * value is either a) correct with slight differences, or b) + * correct but undocumented. We'll have to check a bunch of other + * things instead... + */ + + /* I/O should already be enabled (it's a RO bit). */ + pci_read_config_word(dev, PCI_COMMAND, &tmpword); + if (!(tmpword & PCI_COMMAND_IO)) { + debug("%s: device IO not enabled\n", __func__); + return -1; + } + + /* Header Type must be normal (bits 6-0 only; see spec.) */ + pci_read_config_byte(dev, PCI_HEADER_TYPE, &tmpbyte); + if ((tmpbyte & 0x7f) != PCI_HEADER_TYPE_NORMAL) { + debug("%s: invalid Header type\n", __func__); + return -1; + } + + /* Base Class must be a bridge device */ + pci_read_config_byte(dev, PCI_CLASS_CODE, &tmpbyte); + if (tmpbyte != PCI_CLASS_CODE_BRIDGE) { + debug("%s: invalid class\n", __func__); + return -1; + } + /* Sub Class must be ISA */ + pci_read_config_byte(dev, PCI_CLASS_SUB_CODE, &tmpbyte); + if (tmpbyte != PCI_CLASS_SUB_CODE_BRIDGE_ISA) { + debug("%s: invalid subclass\n", __func__); + return -1; + } + + /* Programming Interface must be 0x00 (no others exist) */ + pci_read_config_byte(dev, PCI_CLASS_PROG, &tmpbyte); + if (tmpbyte != 0x00) { + debug("%s: invalid interface type\n", __func__); + return -1; + } + + /* + * GPIOBASE moved to its current offset with ICH6, but prior to + * that it was unused (or undocumented). Check that it looks + * okay: not all ones or zeros, and mapped to I/O space (bit 0). + */ + pci_read_config_dword(dev, PCI_CFG_GPIOBASE, &tmplong); + if (tmplong == 0x00000000 || tmplong == 0xffffffff || + !(tmplong & 0x00000001)) { + debug("%s: unexpected GPIOBASE value\n", __func__); + return -1; + } + + /* + * Okay, I guess we're looking at the right device. The actual + * GPIO registers are in the PCI device's I/O space, starting + * at the offset that we just read. Bit 0 indicates that it's + * an I/O address, not a memory address, so mask that off. + */ + gpiobase = tmplong & 0xfffffffe; + + /* Finally. These are the droids we're looking for. */ + found_it_once = 1; + return 0; +} + +int gpio_request(unsigned num, const char *label /* UNUSED */) +{ + u32 tmplong; + int i = 0, j = 0; + + /* Is the hardware ready? */ + if (gpio_init()) + return -1; + + if (bad_arg(num, &i, &j)) + return -1; + + /* + * Make sure that the GPIO pin we want isn't already in use for some + * built-in hardware function. We have to check this for every + * requested pin. + */ + tmplong = inl(gpiobase + gpio_bank[i].use_sel); + if (!(tmplong & (1UL << j))) { + debug("%s: gpio %d is reserved for internal use\n", __func__, + num); + return -1; + } + + return mark_gpio(i, j); +} + +int gpio_free(unsigned num) +{ + int i = 0, j = 0; + + if (notmine(num, &i, &j)) + return -1; + + clear_gpio(i, j); + return 0; +} + +int gpio_direction_input(unsigned num) +{ + u32 tmplong; + int i = 0, j = 0; + + if (notmine(num, &i, &j)) + return -1; + + tmplong = inl(gpiobase + gpio_bank[i].io_sel); + tmplong |= (1UL << j); + outl(gpiobase + gpio_bank[i].io_sel, tmplong); + return 0; +} + +int gpio_direction_output(unsigned num, int value) +{ + u32 tmplong; + int i = 0, j = 0; + + if (notmine(num, &i, &j)) + return -1; + + tmplong = inl(gpiobase + gpio_bank[i].io_sel); + tmplong &= ~(1UL << j); + outl(gpiobase + gpio_bank[i].io_sel, tmplong); + return 0; +} + +int gpio_get_value(unsigned num) +{ + u32 tmplong; + int i = 0, j = 0; + int r; + + if (notmine(num, &i, &j)) + return -1; + + tmplong = inl(gpiobase + gpio_bank[i].lvl); + r = (tmplong & (1UL << j)) ? 1 : 0; + return r; +} + +int gpio_set_value(unsigned num, int value) +{ + u32 tmplong; + int i = 0, j = 0; + + if (notmine(num, &i, &j)) + return -1; + + tmplong = inl(gpiobase + gpio_bank[i].lvl); + if (value) + tmplong |= (1UL << j); + else + tmplong &= ~(1UL << j); + outl(gpiobase + gpio_bank[i].lvl, tmplong); + return 0; +} diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 9fac190a6a8..8cdc3b649ca 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -27,21 +27,13 @@ LIB := $(obj)libmisc.o COBJS-$(CONFIG_ALI152X) += ali512x.o COBJS-$(CONFIG_DS4510) += ds4510.o -COBJS-$(CONFIG_FSL_LAW) += fsl_law.o +COBJS-$(CONFIG_CBMEM_CONSOLE) += cbmem_console.o COBJS-$(CONFIG_GPIO_LED) += gpio_led.o COBJS-$(CONFIG_FSL_MC9SDZ60) += mc9sdz60.o COBJS-$(CONFIG_NS87308) += ns87308.o COBJS-$(CONFIG_PDSP188x) += pdsp188x.o COBJS-$(CONFIG_STATUS_LED) += status_led.o COBJS-$(CONFIG_TWL4030_LED) += twl4030_led.o -COBJS-$(CONFIG_PMIC) += pmic_core.o -COBJS-$(CONFIG_DIALOG_PMIC) += pmic_dialog.o -COBJS-$(CONFIG_PMIC_FSL) += pmic_fsl.o -COBJS-$(CONFIG_PMIC_I2C) += pmic_i2c.o -COBJS-$(CONFIG_PMIC_SPI) += pmic_spi.o -COBJS-$(CONFIG_PMIC_MAX77686) += pmic_max77686.o -COBJS-$(CONFIG_PMIC_MAX8998) += pmic_max8998.o -COBJS-$(CONFIG_PMIC_MAX8997) += pmic_max8997.o COBJS := $(COBJS-y) SRCS := $(COBJS:.o=.c) diff --git a/drivers/misc/cbmem_console.c b/drivers/misc/cbmem_console.c new file mode 100644 index 00000000000..80a84fdf8f9 --- /dev/null +++ b/drivers/misc/cbmem_console.c @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2011 The ChromiumOS Authors. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA + */ + +#include <common.h> + +#ifndef CONFIG_SYS_COREBOOT +#error This driver requires coreboot +#endif + +#include <asm/arch/sysinfo.h> + +struct cbmem_console { + u32 buffer_size; + u32 buffer_cursor; + u8 buffer_body[0]; +} __attribute__ ((__packed__)); + +static struct cbmem_console *cbmem_console_p; + +void cbmemc_putc(char data) +{ + int cursor; + + cursor = cbmem_console_p->buffer_cursor++; + if (cursor < cbmem_console_p->buffer_size) + cbmem_console_p->buffer_body[cursor] = data; +} + +void cbmemc_puts(const char *str) +{ + char c; + + while ((c = *str++) != 0) + cbmemc_putc(c); +} + +int cbmemc_init(void) +{ + int rc; + struct stdio_dev cons_dev; + cbmem_console_p = lib_sysinfo.cbmem_cons; + + memset(&cons_dev, 0, sizeof(cons_dev)); + + strcpy(cons_dev.name, "cbmem"); + cons_dev.flags = DEV_FLAGS_OUTPUT; /* Output only */ + cons_dev.putc = cbmemc_putc; + cons_dev.puts = cbmemc_puts; + + rc = stdio_register(&cons_dev); + + return (rc == 0) ? 1 : rc; +} diff --git a/drivers/misc/fsl_law.c b/drivers/misc/fsl_law.c deleted file mode 100644 index 223cd5d65c8..00000000000 --- a/drivers/misc/fsl_law.c +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Copyright 2008-2011 Freescale Semiconductor, Inc. - * - * (C) Copyright 2000 - * Wolfgang Denk, DENX Software Engineering, wd@denx.de. - * - * See file CREDITS for list of people who contributed to this - * project. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA - */ - -#include <common.h> -#include <linux/compiler.h> -#include <asm/fsl_law.h> -#include <asm/io.h> - -DECLARE_GLOBAL_DATA_PTR; - -#define FSL_HW_NUM_LAWS CONFIG_SYS_FSL_NUM_LAWS - -#ifdef CONFIG_FSL_CORENET -#define LAW_BASE (CONFIG_SYS_FSL_CORENET_CCM_ADDR) -#define LAWAR_ADDR(x) (&((ccsr_local_t *)LAW_BASE)->law[x].lawar) -#define LAWBARH_ADDR(x) (&((ccsr_local_t *)LAW_BASE)->law[x].lawbarh) -#define LAWBARL_ADDR(x) (&((ccsr_local_t *)LAW_BASE)->law[x].lawbarl) -#define LAWBAR_SHIFT 0 -#else -#define LAW_BASE (CONFIG_SYS_IMMR + 0xc08) -#define LAWAR_ADDR(x) ((u32 *)LAW_BASE + 8 * x + 2) -#define LAWBAR_ADDR(x) ((u32 *)LAW_BASE + 8 * x) -#define LAWBAR_SHIFT 12 -#endif - - -static inline phys_addr_t get_law_base_addr(int idx) -{ -#ifdef CONFIG_FSL_CORENET - return (phys_addr_t) - ((u64)in_be32(LAWBARH_ADDR(idx)) << 32) | - in_be32(LAWBARL_ADDR(idx)); -#else - return (phys_addr_t)in_be32(LAWBAR_ADDR(idx)) << LAWBAR_SHIFT; -#endif -} - -static inline void set_law_base_addr(int idx, phys_addr_t addr) -{ -#ifdef CONFIG_FSL_CORENET - out_be32(LAWBARL_ADDR(idx), addr & 0xffffffff); - out_be32(LAWBARH_ADDR(idx), (u64)addr >> 32); -#else - out_be32(LAWBAR_ADDR(idx), addr >> LAWBAR_SHIFT); -#endif -} - -void set_law(u8 idx, phys_addr_t addr, enum law_size sz, enum law_trgt_if id) -{ - gd->used_laws |= (1 << idx); - - out_be32(LAWAR_ADDR(idx), 0); - set_law_base_addr(idx, addr); - out_be32(LAWAR_ADDR(idx), LAW_EN | ((u32)id << 20) | (u32)sz); - - /* Read back so that we sync the writes */ - in_be32(LAWAR_ADDR(idx)); -} - -void disable_law(u8 idx) -{ - gd->used_laws &= ~(1 << idx); - - out_be32(LAWAR_ADDR(idx), 0); - set_law_base_addr(idx, 0); - - /* Read back so that we sync the writes */ - in_be32(LAWAR_ADDR(idx)); - - return; -} - -#ifndef CONFIG_NAND_SPL -static int get_law_entry(u8 i, struct law_entry *e) -{ - u32 lawar; - - lawar = in_be32(LAWAR_ADDR(i)); - - if (!(lawar & LAW_EN)) - return 0; - - e->addr = get_law_base_addr(i); - e->size = lawar & 0x3f; - e->trgt_id = (lawar >> 20) & 0xff; - - return 1; -} -#endif - -int set_next_law(phys_addr_t addr, enum law_size sz, enum law_trgt_if id) -{ - u32 idx = ffz(gd->used_laws); - - if (idx >= FSL_HW_NUM_LAWS) - return -1; - - set_law(idx, addr, sz, id); - - return idx; -} - -#ifndef CONFIG_NAND_SPL -int set_last_law(phys_addr_t addr, enum law_size sz, enum law_trgt_if id) -{ - u32 idx; - - /* we have no LAWs free */ - if (gd->used_laws == -1) - return -1; - - /* grab the last free law */ - idx = __ilog2(~(gd->used_laws)); - - if (idx >= FSL_HW_NUM_LAWS) - return -1; - - set_law(idx, addr, sz, id); - - return idx; -} - -struct law_entry find_law(phys_addr_t addr) -{ - struct law_entry entry; - int i; - - entry.index = -1; - entry.addr = 0; - entry.size = 0; - entry.trgt_id = 0; - - for (i = 0; i < FSL_HW_NUM_LAWS; i++) { - u64 upper; - - if (!get_law_entry(i, &entry)) - continue; - - upper = entry.addr + (2ull << entry.size); - if ((addr >= entry.addr) && (addr < upper)) { - entry.index = i; - break; - } - } - - return entry; -} - -void print_laws(void) -{ - int i; - u32 lawar; - - printf("\nLocal Access Window Configuration\n"); - for (i = 0; i < FSL_HW_NUM_LAWS; i++) { - lawar = in_be32(LAWAR_ADDR(i)); -#ifdef CONFIG_FSL_CORENET - printf("LAWBARH%02d: 0x%08x LAWBARL%02d: 0x%08x", - i, in_be32(LAWBARH_ADDR(i)), - i, in_be32(LAWBARL_ADDR(i))); -#else - printf("LAWBAR%02d: 0x%08x", i, in_be32(LAWBAR_ADDR(i))); -#endif - printf(" LAWAR%02d: 0x%08x\n", i, lawar); - printf("\t(EN: %d TGT: 0x%02x SIZE: ", - (lawar & LAW_EN) ? 1 : 0, (lawar >> 20) & 0xff); - print_size(lawar_size(lawar), ")\n"); - } - - return; -} - -/* use up to 2 LAWs for DDR, used the last available LAWs */ -int set_ddr_laws(u64 start, u64 sz, enum law_trgt_if id) -{ - u64 start_align, law_sz; - int law_sz_enc; - - if (start == 0) - start_align = 1ull << (LAW_SIZE_32G + 1); - else - start_align = 1ull << (ffs64(start) - 1); - law_sz = min(start_align, sz); - law_sz_enc = __ilog2_u64(law_sz) - 1; - - if (set_last_law(start, law_sz_enc, id) < 0) - return -1; - - /* recalculate size based on what was actually covered by the law */ - law_sz = 1ull << __ilog2_u64(law_sz); - - /* do we still have anything to map */ - sz = sz - law_sz; - if (sz) { - start += law_sz; - - start_align = 1ull << (ffs64(start) - 1); - law_sz = min(start_align, sz); - law_sz_enc = __ilog2_u64(law_sz) - 1; - - if (set_last_law(start, law_sz_enc, id) < 0) - return -1; - } else { - return 0; - } - - /* do we still have anything to map */ - sz = sz - law_sz; - if (sz) - return 1; - - return 0; -} -#endif - -void init_laws(void) -{ - int i; - -#if FSL_HW_NUM_LAWS < 32 - gd->used_laws = ~((1 << FSL_HW_NUM_LAWS) - 1); -#elif FSL_HW_NUM_LAWS == 32 - gd->used_laws = 0; -#else -#error FSL_HW_NUM_LAWS can not be greater than 32 w/o code changes -#endif - - /* - * Any LAWs that were set up before we booted assume they are meant to - * be around and mark them used. - */ - for (i = 0; i < FSL_HW_NUM_LAWS; i++) { - u32 lawar = in_be32(LAWAR_ADDR(i)); - - if (lawar & LAW_EN) - gd->used_laws |= (1 << i); - } - -#if defined(CONFIG_NAND_U_BOOT) && !defined(CONFIG_NAND_SPL) - /* - * in NAND boot we've already parsed the law_table and setup those LAWs - * so don't do it again. - */ - return; -#endif - - for (i = 0; i < num_law_entries; i++) { - if (law_table[i].index == -1) - set_next_law(law_table[i].addr, law_table[i].size, - law_table[i].trgt_id); - else - set_law(law_table[i].index, law_table[i].addr, - law_table[i].size, law_table[i].trgt_id); - } - -#ifdef CONFIG_SRIO_PCIE_BOOT_SLAVE - /* check RCW to get which port is used for boot */ - ccsr_gur_t *gur = (void *)CONFIG_SYS_MPC85xx_GUTS_ADDR; - u32 bootloc = in_be32(&gur->rcwsr[6]); - /* - * in SRIO or PCIE boot we need to set specail LAWs for - * SRIO or PCIE interfaces. - */ - switch ((bootloc & FSL_CORENET_RCWSR6_BOOT_LOC) >> 23) { - case 0x0: /* boot from PCIE1 */ - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_SLAVE_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_PCIE_1); - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_UCODE_ENV_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_PCIE_1); - break; - case 0x1: /* boot from PCIE2 */ - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_SLAVE_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_PCIE_2); - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_UCODE_ENV_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_PCIE_2); - break; - case 0x2: /* boot from PCIE3 */ - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_SLAVE_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_PCIE_3); - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_UCODE_ENV_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_PCIE_3); - break; - case 0x8: /* boot from SRIO1 */ - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_SLAVE_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_RIO_1); - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_UCODE_ENV_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_RIO_1); - break; - case 0x9: /* boot from SRIO2 */ - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_SLAVE_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_RIO_2); - set_next_law(CONFIG_SYS_SRIO_PCIE_BOOT_UCODE_ENV_ADDR_PHYS, - LAW_SIZE_1M, - LAW_TRGT_IF_RIO_2); - break; - default: - break; - } -#endif - - return ; -} diff --git a/drivers/misc/pmic_core.c b/drivers/misc/pmic_core.c deleted file mode 100644 index 5d62a56d346..00000000000 --- a/drivers/misc/pmic_core.c +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright (C) 2011 Samsung Electronics - * Lukasz Majewski <l.majewski@samsung.com> - * - * (C) Copyright 2010 - * Stefano Babic, DENX Software Engineering, sbabic@denx.de - * - * (C) Copyright 2008-2009 Freescale Semiconductor, Inc. - * - * See file CREDITS for list of people who contributed to this - * project. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA - */ - -#include <common.h> -#include <linux/types.h> -#include <pmic.h> - -static struct pmic pmic; - -int check_reg(u32 reg) -{ - if (reg >= pmic.number_of_regs) { - printf("<reg num> = %d is invalid. Should be less than %d\n", - reg, pmic.number_of_regs); - return -1; - } - return 0; -} - -int pmic_set_output(struct pmic *p, u32 reg, int out, int on) -{ - u32 val; - - if (pmic_reg_read(p, reg, &val)) - return -1; - - if (on) - val |= out; - else - val &= ~out; - - if (pmic_reg_write(p, reg, val)) - return -1; - - return 0; -} - -static void pmic_show_info(struct pmic *p) -{ - printf("PMIC: %s\n", p->name); -} - -static void pmic_dump(struct pmic *p) -{ - int i, ret; - u32 val; - - pmic_show_info(p); - for (i = 0; i < p->number_of_regs; i++) { - ret = pmic_reg_read(p, i, &val); - if (ret) - puts("PMIC: Registers dump failed\n"); - - if (!(i % 8)) - printf("\n0x%02x: ", i); - - printf("%08x ", val); - } - puts("\n"); -} - -struct pmic *get_pmic(void) -{ - return &pmic; -} - -int do_pmic(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) -{ - u32 ret, reg, val; - char *cmd; - - struct pmic *p = &pmic; - - /* at least two arguments please */ - if (argc < 2) - return cmd_usage(cmdtp); - - cmd = argv[1]; - if (strcmp(cmd, "dump") == 0) { - pmic_dump(p); - return 0; - } - - if (strcmp(cmd, "read") == 0) { - if (argc < 3) - return cmd_usage(cmdtp); - - reg = simple_strtoul(argv[2], NULL, 16); - - ret = pmic_reg_read(p, reg, &val); - - if (ret) - puts("PMIC: Register read failed\n"); - - printf("\n0x%02x: 0x%08x\n", reg, val); - - return 0; - } - - if (strcmp(cmd, "write") == 0) { - if (argc < 4) - return cmd_usage(cmdtp); - - reg = simple_strtoul(argv[2], NULL, 16); - val = simple_strtoul(argv[3], NULL, 16); - - pmic_reg_write(p, reg, val); - - return 0; - } - - /* No subcommand found */ - return 1; -} - -U_BOOT_CMD( - pmic, CONFIG_SYS_MAXARGS, 1, do_pmic, - "PMIC", - "dump - dump PMIC registers\n" - "pmic read <reg> - read register\n" - "pmic write <reg> <value> - write register" -); diff --git a/drivers/misc/pmic_max8997.c b/drivers/misc/pmic_max8997.c deleted file mode 100644 index 62dbc05311c..00000000000 --- a/drivers/misc/pmic_max8997.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (C) 2012 Samsung Electronics - * Lukasz Majewski <l.majewski@samsung.com> - * - * See file CREDITS for list of people who contributed to this - * project. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA - */ - -#include <common.h> -#include <pmic.h> -#include <max8997_pmic.h> - -int pmic_init(void) -{ - struct pmic *p = get_pmic(); - static const char name[] = "MAX8997_PMIC"; - - puts("Board PMIC init\n"); - - p->name = name; - p->interface = PMIC_I2C; - p->number_of_regs = PMIC_NUM_OF_REGS; - p->hw.i2c.addr = MAX8997_I2C_ADDR; - p->hw.i2c.tx_num = 1; - p->bus = I2C_PMIC; - - return 0; -} diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile index a1dd7302bfc..65791aa2184 100644 --- a/drivers/mmc/Makefile +++ b/drivers/mmc/Makefile @@ -47,6 +47,7 @@ COBJS-$(CONFIG_S5P_SDHCI) += s5p_sdhci.o COBJS-$(CONFIG_SH_MMCIF) += sh_mmcif.o COBJS-$(CONFIG_TEGRA_MMC) += tegra_mmc.o COBJS-$(CONFIG_DWMMC) += dw_mmc.o +COBJS-$(CONFIG_EXYNOS_DWMMC) += exynos_dw_mmc.o COBJS := $(COBJS-y) SRCS := $(COBJS:.o=.c) diff --git a/drivers/mmc/exynos_dw_mmc.c b/drivers/mmc/exynos_dw_mmc.c new file mode 100644 index 00000000000..72a31b73fd5 --- /dev/null +++ b/drivers/mmc/exynos_dw_mmc.c @@ -0,0 +1,57 @@ +/* + * (C) Copyright 2012 SAMSUNG Electronics + * Jaehoon Chung <jh80.chung@samsung.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <common.h> +#include <malloc.h> +#include <dwmmc.h> +#include <asm/arch/dwmmc.h> +#include <asm/arch/clk.h> + +static char *EXYNOS_NAME = "EXYNOS DWMMC"; + +static void exynos_dwmci_clksel(struct dwmci_host *host) +{ + u32 val; + val = DWMCI_SET_SAMPLE_CLK(DWMCI_SHIFT_0) | + DWMCI_SET_DRV_CLK(DWMCI_SHIFT_0) | DWMCI_SET_DIV_RATIO(0); + + dwmci_writel(host, DWMCI_CLKSEL, val); +} + +int exynos_dwmci_init(u32 regbase, int bus_width, int index) +{ + struct dwmci_host *host = NULL; + host = malloc(sizeof(struct dwmci_host)); + if (!host) { + printf("dwmci_host malloc fail!\n"); + return 1; + } + + host->name = EXYNOS_NAME; + host->ioaddr = (void *)regbase; + host->buswidth = bus_width; + host->clksel = exynos_dwmci_clksel; + host->dev_index = index; + + add_dwmci(host, 52000000, 400000); + + return 0; +} + diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 5ffd8c59e6e..72e8ce6da42 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c @@ -868,7 +868,7 @@ static void mmc_set_bus_width(struct mmc *mmc, uint width) static int mmc_startup(struct mmc *mmc) { - int err, width; + int err; uint mult, freq; u64 cmult, csize, capacity; struct mmc_cmd cmd; @@ -1087,21 +1087,44 @@ static int mmc_startup(struct mmc *mmc) else mmc->tran_speed = 25000000; } else { - width = ((mmc->host_caps & MMC_MODE_MASK_WIDTH_BITS) >> - MMC_MODE_WIDTH_BITS_SHIFT); - for (; width >= 0; width--) { - /* Set the card to use 4 bit*/ + int idx; + + /* An array of possible bus widths in order of preference */ + static unsigned ext_csd_bits[] = { + EXT_CSD_BUS_WIDTH_8, + EXT_CSD_BUS_WIDTH_4, + EXT_CSD_BUS_WIDTH_1, + }; + + /* An array to map CSD bus widths to host cap bits */ + static unsigned ext_to_hostcaps[] = { + [EXT_CSD_BUS_WIDTH_4] = MMC_MODE_4BIT, + [EXT_CSD_BUS_WIDTH_8] = MMC_MODE_8BIT, + }; + + /* An array to map chosen bus width to an integer */ + static unsigned widths[] = { + 8, 4, 1, + }; + + for (idx=0; idx < ARRAY_SIZE(ext_csd_bits); idx++) { + unsigned int extw = ext_csd_bits[idx]; + + /* + * Check to make sure the controller supports + * this bus width, if it's more than 1 + */ + if (extw != EXT_CSD_BUS_WIDTH_1 && + !(mmc->host_caps & ext_to_hostcaps[extw])) + continue; + err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BUS_WIDTH, width); + EXT_CSD_BUS_WIDTH, extw); if (err) continue; - if (!width) { - mmc_set_bus_width(mmc, 1); - break; - } else - mmc_set_bus_width(mmc, 4 * width); + mmc_set_bus_width(mmc, widths[idx]); err = mmc_send_ext_csd(mmc, test_csd); if (!err && ext_csd[EXT_CSD_PARTITIONING_SUPPORT] \ @@ -1115,7 +1138,7 @@ static int mmc_startup(struct mmc *mmc) && memcmp(&ext_csd[EXT_CSD_SEC_CNT], \ &test_csd[EXT_CSD_SEC_CNT], 4) == 0) { - mmc->card_caps |= width; + mmc->card_caps |= ext_to_hostcaps[extw]; break; } } @@ -1135,13 +1158,15 @@ static int mmc_startup(struct mmc *mmc) mmc->block_dev.type = 0; mmc->block_dev.blksz = mmc->read_bl_len; mmc->block_dev.lba = lldiv(mmc->capacity, mmc->read_bl_len); - sprintf(mmc->block_dev.vendor, "Man %06x Snr %08x", mmc->cid[0] >> 8, - (mmc->cid[2] << 8) | (mmc->cid[3] >> 24)); - sprintf(mmc->block_dev.product, "%c%c%c%c%c", mmc->cid[0] & 0xff, - (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, - (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff); - sprintf(mmc->block_dev.revision, "%d.%d", mmc->cid[2] >> 28, - (mmc->cid[2] >> 24) & 0xf); + sprintf(mmc->block_dev.vendor, "Man %06x Snr %04x%04x", + mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), + (mmc->cid[3] >> 16) & 0xffff); + sprintf(mmc->block_dev.product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, + (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, + (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, + (mmc->cid[2] >> 24) & 0xff); + sprintf(mmc->block_dev.revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, + (mmc->cid[2] >> 16) & 0xf); #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) init_part(&mmc->block_dev); #endif diff --git a/drivers/mmc/mxsmmc.c b/drivers/mmc/mxsmmc.c index 109acbf6234..024df592f22 100644 --- a/drivers/mmc/mxsmmc.c +++ b/drivers/mmc/mxsmmc.c @@ -96,11 +96,11 @@ static int mxsmmc_send_cmd_pio(struct mxsmmc_priv *priv, struct mmc_data *data) static int mxsmmc_send_cmd_dma(struct mxsmmc_priv *priv, struct mmc_data *data) { uint32_t data_count = data->blocksize * data->blocks; - uint32_t cache_data_count = roundup(data_count, ARCH_DMA_MINALIGN); int dmach; struct mxs_dma_desc *desc = priv->desc; - void *addr, *backup; - uint8_t flags; + void *addr; + unsigned int flags; + struct bounce_buffer bbstate; memset(desc, 0, sizeof(struct mxs_dma_desc)); desc->address = (dma_addr_t)desc; @@ -115,19 +115,9 @@ static int mxsmmc_send_cmd_dma(struct mxsmmc_priv *priv, struct mmc_data *data) flags = GEN_BB_READ; } - bounce_buffer_start(&addr, data_count, &backup, flags); + bounce_buffer_start(&bbstate, addr, data_count, flags); - priv->desc->cmd.address = (dma_addr_t)addr; - - if (data->flags & MMC_DATA_WRITE) { - /* Flush data to DRAM so DMA can pick them up */ - flush_dcache_range((uint32_t)addr, - (uint32_t)(addr) + cache_data_count); - } - - /* Invalidate the area, so no writeback into the RAM races with DMA */ - invalidate_dcache_range((uint32_t)priv->desc->cmd.address, - (uint32_t)(priv->desc->cmd.address + cache_data_count)); + priv->desc->cmd.address = (dma_addr_t)bbstate.bounce_buffer; priv->desc->cmd.data |= MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM | (data_count << MXS_DMA_DESC_BYTES_OFFSET); @@ -135,17 +125,11 @@ static int mxsmmc_send_cmd_dma(struct mxsmmc_priv *priv, struct mmc_data *data) dmach = MXS_DMA_CHANNEL_AHB_APBH_SSP0 + priv->id; mxs_dma_desc_append(dmach, priv->desc); if (mxs_dma_go(dmach)) { - bounce_buffer_stop(&addr, data_count, &backup, flags); + bounce_buffer_stop(&bbstate); return COMM_ERR; } - /* The data arrived into DRAM, invalidate cache over them */ - if (data->flags & MMC_DATA_READ) { - invalidate_dcache_range((uint32_t)addr, - (uint32_t)(addr) + cache_data_count); - } - - bounce_buffer_stop(&addr, data_count, &backup, flags); + bounce_buffer_stop(&bbstate); return 0; } diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c index 7845f873ac9..b9cbe34f1f1 100644 --- a/drivers/mmc/sdhci.c +++ b/drivers/mmc/sdhci.c @@ -340,6 +340,9 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) return; } + if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + pwr |= SDHCI_POWER_ON; sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); diff --git a/drivers/mmc/tegra_mmc.c b/drivers/mmc/tegra_mmc.c index 1fd5592f2de..d749ab095e3 100644 --- a/drivers/mmc/tegra_mmc.c +++ b/drivers/mmc/tegra_mmc.c @@ -19,6 +19,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <bouncebuf.h> #include <common.h> #include <asm/gpio.h> #include <asm/io.h> @@ -66,14 +67,17 @@ static void tegra_get_setup(struct mmc_host *host, int dev_index) host->reg = (struct tegra_mmc *)host->base; } -static void mmc_prepare_data(struct mmc_host *host, struct mmc_data *data) +static void mmc_prepare_data(struct mmc_host *host, struct mmc_data *data, + struct bounce_buffer *bbstate) { unsigned char ctrl; - debug("data->dest: %08X, data->blocks: %u, data->blocksize: %u\n", - (u32)data->dest, data->blocks, data->blocksize); - writel((u32)data->dest, &host->reg->sysad); + debug("buf: %p (%p), data->blocks: %u, data->blocksize: %u\n", + bbstate->bounce_buffer, bbstate->user_buffer, data->blocks, + data->blocksize); + + writel((u32)bbstate->bounce_buffer, &host->reg->sysad); /* * DMASEL[4:3] * 00 = Selects SDMA @@ -114,14 +118,6 @@ static void mmc_set_transfer_mode(struct mmc_host *host, struct mmc_data *data) if (data->flags & MMC_DATA_READ) mode |= TEGRA_MMC_TRNMOD_DATA_XFER_DIR_SEL_READ; - if (data->flags & MMC_DATA_WRITE) { - if ((uintptr_t)data->src & (ARCH_DMA_MINALIGN - 1)) - printf("Warning: unaligned write to %p may fail\n", - data->src); - flush_dcache_range((ulong)data->src, (ulong)data->src + - data->blocks * data->blocksize); - } - writew(mode, &host->reg->trnmod); } @@ -156,8 +152,8 @@ static int mmc_wait_inhibit(struct mmc_host *host, return 0; } -static int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, - struct mmc_data *data) +static int mmc_send_cmd_bounced(struct mmc *mmc, struct mmc_cmd *cmd, + struct mmc_data *data, struct bounce_buffer *bbstate) { struct mmc_host *host = (struct mmc_host *)mmc->priv; int flags, i; @@ -172,7 +168,7 @@ static int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, return result; if (data) - mmc_prepare_data(host, data); + mmc_prepare_data(host, data, bbstate); debug("cmd->arg: %08x\n", cmd->cmdarg); writel(cmd->cmdarg, &host->reg->argument); @@ -322,20 +318,42 @@ static int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, } } writel(mask, &host->reg->norintsts); - if (data->flags & MMC_DATA_READ) { - if ((uintptr_t)data->dest & (ARCH_DMA_MINALIGN - 1)) - printf("Warning: unaligned read from %p " - "may fail\n", data->dest); - invalidate_dcache_range((ulong)data->dest, - (ulong)data->dest + - data->blocks * data->blocksize); - } } udelay(1000); return 0; } +static int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, + struct mmc_data *data) +{ + void *buf; + unsigned int bbflags; + size_t len; + struct bounce_buffer bbstate; + int ret; + + if (data) { + if (data->flags & MMC_DATA_READ) { + buf = data->dest; + bbflags = GEN_BB_WRITE; + } else { + buf = (void *)data->src; + bbflags = GEN_BB_READ; + } + len = data->blocks * data->blocksize; + + bounce_buffer_start(&bbstate, buf, len, bbflags); + } + + ret = mmc_send_cmd_bounced(mmc, cmd, data, &bbstate); + + if (data) + bounce_buffer_stop(&bbstate); + + return ret; +} + static void mmc_change_clock(struct mmc_host *host, uint clock) { int div; diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index beb99cacb66..28e52bd08ea 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -26,21 +26,33 @@ include $(TOPDIR)/config.mk LIB := $(obj)libnand.o ifdef CONFIG_CMD_NAND + ifdef CONFIG_SPL_BUILD -ifdef CONFIG_SPL_NAND_SIMPLE -COBJS-y += nand_spl_simple.o -endif -ifdef CONFIG_SPL_NAND_LOAD -COBJS-y += nand_spl_load.o + +ifdef CONFIG_SPL_NAND_DRIVERS +NORMAL_DRIVERS=y endif -else + +COBJS-$(CONFIG_SPL_NAND_SIMPLE) += nand_spl_simple.o +COBJS-$(CONFIG_SPL_NAND_LOAD) += nand_spl_load.o +COBJS-$(CONFIG_SPL_NAND_ECC) += nand_ecc.o +COBJS-$(CONFIG_SPL_NAND_BASE) += nand_base.o + +else # not spl + +NORMAL_DRIVERS=y + COBJS-y += nand.o COBJS-y += nand_bbt.o COBJS-y += nand_ids.o COBJS-y += nand_util.o -endif COBJS-y += nand_ecc.o COBJS-y += nand_base.o + +endif # not spl + +ifdef NORMAL_DRIVERS + COBJS-$(CONFIG_NAND_ECC_BCH) += nand_bch.o COBJS-$(CONFIG_NAND_ATMEL) += atmel_nand.o @@ -65,7 +77,13 @@ COBJS-$(CONFIG_NAND_SPEAR) += spr_nand.o COBJS-$(CONFIG_TEGRA_NAND) += tegra_nand.o COBJS-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o COBJS-$(CONFIG_NAND_PLAT) += nand_plat.o -endif + +else # minimal SPL drivers + +COBJS-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_spl.o + +endif # drivers +endif # nand COBJS := $(COBJS-y) SRCS := $(COBJS:.o=.c) diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 9076ad4cdc3..834a8a64983 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -748,7 +748,7 @@ static int fsl_elbc_chip_init(int devnum, u8 *addr) /* set up nand options */ nand->options = NAND_NO_READRDY | NAND_NO_AUTOINCR | - NAND_USE_FLASH_BBT; + NAND_USE_FLASH_BBT | NAND_NO_SUBPAGE_WRITE; nand->controller = &elbc_ctrl->controller; nand->priv = priv; diff --git a/drivers/mtd/nand/fsl_elbc_spl.c b/drivers/mtd/nand/fsl_elbc_spl.c new file mode 100644 index 00000000000..50ff4fe93e4 --- /dev/null +++ b/drivers/mtd/nand/fsl_elbc_spl.c @@ -0,0 +1,168 @@ +/* + * NAND boot for Freescale Enhanced Local Bus Controller, Flash Control Machine + * + * (C) Copyright 2006-2008 + * Stefan Roese, DENX Software Engineering, sr@denx.de. + * + * Copyright (c) 2008 Freescale Semiconductor, Inc. + * Author: Scott Wood <scottwood@freescale.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/fsl_lbc.h> +#include <nand.h> + +#define WINDOW_SIZE 8192 + +static void nand_wait(void) +{ + fsl_lbc_t *regs = LBC_BASE_ADDR; + + for (;;) { + uint32_t status = in_be32(®s->ltesr); + + if (status == 1) + return; + + if (status & 1) { + puts("read failed (ltesr)\n"); + for (;;); + } + } +} + +static int nand_load_image(uint32_t offs, unsigned int uboot_size, void *vdst) +{ + fsl_lbc_t *regs = LBC_BASE_ADDR; + uchar *buf = (uchar *)CONFIG_SYS_NAND_BASE; + const int large = CONFIG_SYS_NAND_OR_PRELIM & OR_FCM_PGS; + const int block_shift = large ? 17 : 14; + const int block_size = 1 << block_shift; + const int page_size = large ? 2048 : 512; + const int bad_marker = large ? page_size + 0 : page_size + 5; + int fmr = (15 << FMR_CWTO_SHIFT) | (2 << FMR_AL_SHIFT) | 2; + int pos = 0; + char *dst = vdst; + + if (offs & (block_size - 1)) { + puts("bad offset\n"); + for (;;); + } + + if (large) { + fmr |= FMR_ECCM; + out_be32(®s->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) | + (NAND_CMD_READSTART << FCR_CMD1_SHIFT)); + out_be32(®s->fir, + (FIR_OP_CW0 << FIR_OP0_SHIFT) | + (FIR_OP_CA << FIR_OP1_SHIFT) | + (FIR_OP_PA << FIR_OP2_SHIFT) | + (FIR_OP_CW1 << FIR_OP3_SHIFT) | + (FIR_OP_RBW << FIR_OP4_SHIFT)); + } else { + out_be32(®s->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT); + out_be32(®s->fir, + (FIR_OP_CW0 << FIR_OP0_SHIFT) | + (FIR_OP_CA << FIR_OP1_SHIFT) | + (FIR_OP_PA << FIR_OP2_SHIFT) | + (FIR_OP_RBW << FIR_OP3_SHIFT)); + } + + out_be32(®s->fbcr, 0); + clrsetbits_be32(®s->bank[0].br, BR_DECC, BR_DECC_CHK_GEN); + + while (pos < uboot_size) { + int i = 0; + out_be32(®s->fbar, offs >> block_shift); + + do { + int j; + unsigned int page_offs = (offs & (block_size - 1)) << 1; + + out_be32(®s->ltesr, ~0); + out_be32(®s->lteatr, 0); + out_be32(®s->fpar, page_offs); + out_be32(®s->fmr, fmr); + out_be32(®s->lsor, 0); + nand_wait(); + + page_offs %= WINDOW_SIZE; + + /* + * If either of the first two pages are marked bad, + * continue to the next block. + */ + if (i++ < 2 && buf[page_offs + bad_marker] != 0xff) { + puts("skipping\n"); + offs = (offs + block_size) & ~(block_size - 1); + pos &= ~(block_size - 1); + break; + } + + for (j = 0; j < page_size; j++) + dst[pos + j] = buf[page_offs + j]; + + pos += page_size; + offs += page_size; + } while ((offs & (block_size - 1)) && (pos < uboot_size)); + } + + return 0; +} + +/* + * The main entry for NAND booting. It's necessary that SDRAM is already + * configured and available since this code loads the main U-Boot image + * from NAND into SDRAM and starts it from there. + */ +void nand_boot(void) +{ + __attribute__((noreturn)) void (*uboot)(void); + /* + * Load U-Boot image from NAND into RAM + */ + nand_load_image(CONFIG_SYS_NAND_U_BOOT_OFFS, + CONFIG_SYS_NAND_U_BOOT_SIZE, + (void *)CONFIG_SYS_NAND_U_BOOT_DST); + +#ifdef CONFIG_NAND_ENV_DST + nand_load_image(CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE, + (void *)CONFIG_NAND_ENV_DST); + +#ifdef CONFIG_ENV_OFFSET_REDUND + nand_load_image(CONFIG_ENV_OFFSET_REDUND, CONFIG_ENV_SIZE, + (void *)CONFIG_NAND_ENV_DST + CONFIG_ENV_SIZE); +#endif +#endif + +#ifdef CONFIG_SPL_FLUSH_IMAGE + /* + * Clean d-cache and invalidate i-cache, to + * make sure that no stale data is executed. + */ + flush_cache(CONFIG_SYS_NAND_U_BOOT_DST, CONFIG_SYS_NAND_U_BOOT_SIZE); +#endif + + puts("transfering control\n"); + /* + * Jump to U-Boot image + */ + uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START; + (*uboot)(); +} diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index b3b7c705e18..0878bece675 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -30,6 +30,7 @@ #include <asm/errno.h> #include <asm/fsl_ifc.h> +#define FSL_IFC_V1_1_0 0x01010000 #define MAX_BANKS 4 #define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ @@ -738,11 +739,66 @@ static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip) { } +static void fsl_ifc_sram_init(void) +{ + struct fsl_ifc *ifc = ifc_ctrl->regs; + uint32_t cs = 0, csor = 0, csor_8k = 0, csor_ext = 0; + long long end_tick; + + cs = ifc_ctrl->cs_nand >> IFC_NAND_CSEL_SHIFT; + + /* Save CSOR and CSOR_ext */ + csor = in_be32(&ifc_ctrl->regs->csor_cs[cs].csor); + csor_ext = in_be32(&ifc_ctrl->regs->csor_cs[cs].csor_ext); + + /* chage PageSize 8K and SpareSize 1K*/ + csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; + out_be32(&ifc_ctrl->regs->csor_cs[cs].csor, csor_8k); + out_be32(&ifc_ctrl->regs->csor_cs[cs].csor_ext, 0x0000400); + + /* READID */ + out_be32(&ifc->ifc_nand.nand_fir0, + (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) | + (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | + (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT)); + out_be32(&ifc->ifc_nand.nand_fcr0, + NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT); + out_be32(&ifc->ifc_nand.row3, 0x0); + + out_be32(&ifc->ifc_nand.nand_fbcr, 0x0); + + /* Program ROW0/COL0 */ + out_be32(&ifc->ifc_nand.row0, 0x0); + out_be32(&ifc->ifc_nand.col0, 0x0); + + /* set the chip select for NAND Transaction */ + out_be32(&ifc->ifc_nand.nand_csel, ifc_ctrl->cs_nand); + + /* start read seq */ + out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT); + + /* wait for NAND Machine complete flag or timeout */ + end_tick = usec2ticks(IFC_TIMEOUT_MSECS * 1000) + get_ticks(); + + while (end_tick > get_ticks()) { + ifc_ctrl->status = in_be32(&ifc->ifc_nand.nand_evter_stat); + + if (ifc_ctrl->status & IFC_NAND_EVTER_STAT_OPC) + break; + } + + out_be32(&ifc->ifc_nand.nand_evter_stat, ifc_ctrl->status); + + /* Restore CSOR and CSOR_ext */ + out_be32(&ifc_ctrl->regs->csor_cs[cs].csor, csor); + out_be32(&ifc_ctrl->regs->csor_cs[cs].csor_ext, csor_ext); +} + int board_nand_init(struct nand_chip *nand) { struct fsl_ifc_mtd *priv; struct nand_ecclayout *layout; - uint32_t cspr = 0, csor = 0; + uint32_t cspr = 0, csor = 0, ver = 0; if (!ifc_ctrl) { fsl_ifc_ctrl_init(); @@ -797,7 +853,7 @@ int board_nand_init(struct nand_chip *nand) /* set up nand options */ nand->options = NAND_NO_READRDY | NAND_NO_AUTOINCR | - NAND_USE_FLASH_BBT; + NAND_USE_FLASH_BBT | NAND_NO_SUBPAGE_WRITE; if (cspr & CSPR_PORT_SIZE_16) { nand->read_byte = fsl_ifc_read_byte16; @@ -861,5 +917,9 @@ int board_nand_init(struct nand_chip *nand) nand->ecc.mode = NAND_ECC_SOFT; } + ver = in_be32(&ifc_ctrl->regs->ifc_rev); + if (ver == FSL_IFC_V1_1_0) + fsl_ifc_sram_init(); + return 0; } diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index d3b71a50adc..a2d06be99fc 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1245,7 +1245,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, if (unlikely(ops->mode == MTD_OOB_RAW)) ret = chip->ecc.read_page_raw(mtd, chip, bufpoi, page); - else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) + else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && + !oob) ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi); else @@ -1256,7 +1257,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, /* Transfer not aligned data */ if (!aligned) { - if (!NAND_SUBPAGE_READ(chip) && !oob && + if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && !(mtd->ecc_stats.failed - stats.failed)) chip->pagebuf = realpage; memcpy(buf, chip->buffers->databuf + col, bytes); @@ -3150,6 +3151,10 @@ int nand_scan_tail(struct mtd_info *mtd) /* Invalidate the pagebuffer reference */ chip->pagebuf = -1; + /* Large page NAND with SOFT_ECC should support subpage reads */ + if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9)) + chip->options |= NAND_SUBPAGE_READ; + /* Fill in remaining MTD driver data */ mtd->type = MTD_NANDFLASH; mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : diff --git a/drivers/mtd/nand/nand_util.c b/drivers/mtd/nand/nand_util.c index c4752a7cbf8..2ba0c5ef956 100644 --- a/drivers/mtd/nand/nand_util.c +++ b/drivers/mtd/nand/nand_util.c @@ -50,8 +50,8 @@ #include <nand.h> #include <jffs2/jffs2.h> -typedef struct erase_info erase_info_t; -typedef struct mtd_info mtd_info_t; +typedef struct erase_info erase_info_t; +typedef struct mtd_info mtd_info_t; /* support only for native endian JFFS2 */ #define cpu_to_je16(x) (x) @@ -59,7 +59,7 @@ typedef struct mtd_info mtd_info_t; /** * nand_erase_opts: - erase NAND flash with support for various options - * (jffs2 formating) + * (jffs2 formatting) * * @param meminfo NAND device to erase * @param opts options, @see struct nand_erase_options @@ -80,8 +80,8 @@ int nand_erase_opts(nand_info_t *meminfo, const nand_erase_options_t *opts) struct mtd_oob_ops oob_opts; struct nand_chip *chip = meminfo->priv; - if ((opts->offset & (meminfo->writesize - 1)) != 0) { - printf("Attempt to erase non page aligned data\n"); + if ((opts->offset & (meminfo->erasesize - 1)) != 0) { + printf("Attempt to erase non block-aligned data\n"); return -1; } @@ -94,8 +94,8 @@ int nand_erase_opts(nand_info_t *meminfo, const nand_erase_options_t *opts) erase_length = lldiv(opts->length + meminfo->erasesize - 1, meminfo->erasesize); - cleanmarker.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK); - cleanmarker.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER); + cleanmarker.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); + cleanmarker.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER); cleanmarker.totlen = cpu_to_je32(8); /* scrub option allows to erase badblock. To prevent internal @@ -118,7 +118,7 @@ int nand_erase_opts(nand_info_t *meminfo, const nand_erase_options_t *opts) erased_length < erase_length; erase.addr += meminfo->erasesize) { - WATCHDOG_RESET (); + WATCHDOG_RESET(); if (!opts->scrub && bbtest) { int ret = meminfo->block_isbad(meminfo, erase.addr); @@ -259,7 +259,7 @@ int nand_lock(struct mtd_info *mtd, int tight) * flash * * @param mtd nand mtd instance - * @param offset page address to query (muss be page aligned!) + * @param offset page address to query (must be page-aligned!) * * @return -1 in case of error * >0 lock status: @@ -281,7 +281,7 @@ int nand_get_lock_status(struct mtd_info *mtd, loff_t offset) if ((offset & (mtd->writesize - 1)) != 0) { - printf ("nand_get_lock_status: " + printf("nand_get_lock_status: " "Start address must be beginning of " "nand page!\n"); ret = -1; @@ -332,20 +332,20 @@ int nand_unlock(struct mtd_info *mtd, loff_t start, size_t length, /* check the WP bit */ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); if (!(chip->read_byte(mtd) & NAND_STATUS_WP)) { - printf ("nand_unlock: Device is write protected!\n"); + printf("nand_unlock: Device is write protected!\n"); ret = -1; goto out; } if ((start & (mtd->erasesize - 1)) != 0) { - printf ("nand_unlock: Start address must be beginning of " + printf("nand_unlock: Start address must be beginning of " "nand block!\n"); ret = -1; goto out; } if (length == 0 || (length & (mtd->erasesize - 1)) != 0) { - printf ("nand_unlock: Length must be a multiple of nand block " + printf("nand_unlock: Length must be a multiple of nand block " "size %08x!\n", mtd->erasesize); ret = -1; goto out; @@ -485,7 +485,7 @@ int nand_write_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, pages = nand->erasesize / nand->writesize; blocksize = (pages * nand->oobsize) + nand->erasesize; if (*length % (nand->writesize + nand->oobsize)) { - printf ("Attempt to write incomplete page" + printf("Attempt to write incomplete page" " in yaffs mode\n"); return -EINVAL; } @@ -507,25 +507,25 @@ int nand_write_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, * partition boundary). So don't try to handle that. */ if ((offset & (nand->writesize - 1)) != 0) { - printf ("Attempt to write non page aligned data\n"); + printf("Attempt to write non page-aligned data\n"); *length = 0; return -EINVAL; } need_skip = check_skip_len(nand, offset, *length); if (need_skip < 0) { - printf ("Attempt to write outside the flash area\n"); + printf("Attempt to write outside the flash area\n"); *length = 0; return -EINVAL; } if (!need_skip && !(flags & WITH_DROP_FFS)) { - rval = nand_write (nand, offset, length, buffer); + rval = nand_write(nand, offset, length, buffer); if (rval == 0) return 0; *length = 0; - printf ("NAND write to offset %llx failed %d\n", + printf("NAND write to offset %llx failed %d\n", offset, rval); return rval; } @@ -534,10 +534,10 @@ int nand_write_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, size_t block_offset = offset & (nand->erasesize - 1); size_t write_size, truncated_write_size; - WATCHDOG_RESET (); + WATCHDOG_RESET(); - if (nand_block_isbad (nand, offset & ~(nand->erasesize - 1))) { - printf ("Skip bad block 0x%08llx\n", + if (nand_block_isbad(nand, offset & ~(nand->erasesize - 1))) { + printf("Skip bad block 0x%08llx\n", offset & ~(nand->erasesize - 1)); offset += nand->erasesize - block_offset; continue; @@ -592,7 +592,7 @@ int nand_write_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, } if (rval != 0) { - printf ("NAND write to offset %llx failed %d\n", + printf("NAND write to offset %llx failed %d\n", offset, rval); *length -= left_to_write; return rval; @@ -608,13 +608,13 @@ int nand_write_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, * nand_read_skip_bad: * * Read image from NAND flash. - * Blocks that are marked bad are skipped and the next block is readen + * Blocks that are marked bad are skipped and the next block is read * instead as long as the image is short enough to fit even after skipping the * bad blocks. * * @param nand NAND device * @param offset offset in flash - * @param length buffer length, on return holds remaining bytes to read + * @param length buffer length, on return holds number of read bytes * @param buffer buffer to write to * @return 0 in case of success */ @@ -627,25 +627,25 @@ int nand_read_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, int need_skip; if ((offset & (nand->writesize - 1)) != 0) { - printf ("Attempt to read non page aligned data\n"); + printf("Attempt to read non page-aligned data\n"); *length = 0; return -EINVAL; } need_skip = check_skip_len(nand, offset, *length); if (need_skip < 0) { - printf ("Attempt to read outside the flash area\n"); + printf("Attempt to read outside the flash area\n"); *length = 0; return -EINVAL; } if (!need_skip) { - rval = nand_read (nand, offset, length, buffer); + rval = nand_read(nand, offset, length, buffer); if (!rval || rval == -EUCLEAN) return 0; *length = 0; - printf ("NAND read from offset %llx failed %d\n", + printf("NAND read from offset %llx failed %d\n", offset, rval); return rval; } @@ -654,10 +654,10 @@ int nand_read_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, size_t block_offset = offset & (nand->erasesize - 1); size_t read_length; - WATCHDOG_RESET (); + WATCHDOG_RESET(); - if (nand_block_isbad (nand, offset & ~(nand->erasesize - 1))) { - printf ("Skipping bad block 0x%08llx\n", + if (nand_block_isbad(nand, offset & ~(nand->erasesize - 1))) { + printf("Skipping bad block 0x%08llx\n", offset & ~(nand->erasesize - 1)); offset += nand->erasesize - block_offset; continue; @@ -668,9 +668,9 @@ int nand_read_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, else read_length = nand->erasesize - block_offset; - rval = nand_read (nand, offset, &read_length, p_buffer); + rval = nand_read(nand, offset, &read_length, p_buffer); if (rval && rval != -EUCLEAN) { - printf ("NAND read from offset %llx failed %d\n", + printf("NAND read from offset %llx failed %d\n", offset, rval); *length -= left_to_read; return rval; @@ -683,3 +683,125 @@ int nand_read_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, return 0; } + +#ifdef CONFIG_CMD_NAND_TORTURE + +/** + * check_pattern: + * + * Check if buffer contains only a certain byte pattern. + * + * @param buf buffer to check + * @param patt the pattern to check + * @param size buffer size in bytes + * @return 1 if there are only patt bytes in buf + * 0 if something else was found + */ +static int check_pattern(const u_char *buf, u_char patt, int size) +{ + int i; + + for (i = 0; i < size; i++) + if (buf[i] != patt) + return 0; + return 1; +} + +/** + * nand_torture: + * + * Torture a block of NAND flash. + * This is useful to determine if a block that caused a write error is still + * good or should be marked as bad. + * + * @param nand NAND device + * @param offset offset in flash + * @return 0 if the block is still good + */ +int nand_torture(nand_info_t *nand, loff_t offset) +{ + u_char patterns[] = {0xa5, 0x5a, 0x00}; + struct erase_info instr = { + .mtd = nand, + .addr = offset, + .len = nand->erasesize, + }; + size_t retlen; + int err, ret = -1, i, patt_count; + u_char *buf; + + if ((offset & (nand->erasesize - 1)) != 0) { + puts("Attempt to torture a block at a non block-aligned offset\n"); + return -EINVAL; + } + + if (offset + nand->erasesize > nand->size) { + puts("Attempt to torture a block outside the flash area\n"); + return -EINVAL; + } + + patt_count = ARRAY_SIZE(patterns); + + buf = malloc(nand->erasesize); + if (buf == NULL) { + puts("Out of memory for erase block buffer\n"); + return -ENOMEM; + } + + for (i = 0; i < patt_count; i++) { + err = nand->erase(nand, &instr); + if (err) { + printf("%s: erase() failed for block at 0x%llx: %d\n", + nand->name, instr.addr, err); + goto out; + } + + /* Make sure the block contains only 0xff bytes */ + err = nand->read(nand, offset, nand->erasesize, &retlen, buf); + if ((err && err != -EUCLEAN) || retlen != nand->erasesize) { + printf("%s: read() failed for block at 0x%llx: %d\n", + nand->name, instr.addr, err); + goto out; + } + + err = check_pattern(buf, 0xff, nand->erasesize); + if (!err) { + printf("Erased block at 0x%llx, but a non-0xff byte was found\n", + offset); + ret = -EIO; + goto out; + } + + /* Write a pattern and check it */ + memset(buf, patterns[i], nand->erasesize); + err = nand->write(nand, offset, nand->erasesize, &retlen, buf); + if (err || retlen != nand->erasesize) { + printf("%s: write() failed for block at 0x%llx: %d\n", + nand->name, instr.addr, err); + goto out; + } + + err = nand->read(nand, offset, nand->erasesize, &retlen, buf); + if ((err && err != -EUCLEAN) || retlen != nand->erasesize) { + printf("%s: read() failed for block at 0x%llx: %d\n", + nand->name, instr.addr, err); + goto out; + } + + err = check_pattern(buf, patterns[i], nand->erasesize); + if (!err) { + printf("Pattern 0x%.2x checking failed for block at " + "0x%llx\n", patterns[i], offset); + ret = -EIO; + goto out; + } + } + + ret = 0; + +out: + free(buf); + return ret; +} + +#endif diff --git a/drivers/net/fm/Makefile b/drivers/net/fm/Makefile index 7a1fcdd81fb..7fbb50a5623 100644 --- a/drivers/net/fm/Makefile +++ b/drivers/net/fm/Makefile @@ -44,6 +44,7 @@ COBJS-$(CONFIG_PPC_P2041) += p5020.o COBJS-$(CONFIG_PPC_P3041) += p5020.o COBJS-$(CONFIG_PPC_P4080) += p4080.o COBJS-$(CONFIG_PPC_P5020) += p5020.o +COBJS-$(CONFIG_PPC_P5040) += p5040.o COBJS-$(CONFIG_PPC_T4240) += t4240.o COBJS-$(CONFIG_PPC_B4860) += b4860.o endif diff --git a/drivers/net/fm/p5040.c b/drivers/net/fm/p5040.c new file mode 100644 index 00000000000..bc6b4ba0bb8 --- /dev/null +++ b/drivers/net/fm/p5040.c @@ -0,0 +1,113 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ +#include <common.h> +#include <phy.h> +#include <fm_eth.h> +#include <asm/io.h> +#include <asm/immap_85xx.h> +#include <asm/fsl_serdes.h> + +u32 port_to_devdisr[] = { + [FM1_DTSEC1] = FSL_CORENET_DEVDISR2_DTSEC1_1, + [FM1_DTSEC2] = FSL_CORENET_DEVDISR2_DTSEC1_2, + [FM1_DTSEC3] = FSL_CORENET_DEVDISR2_DTSEC1_3, + [FM1_DTSEC4] = FSL_CORENET_DEVDISR2_DTSEC1_4, + [FM1_DTSEC5] = FSL_CORENET_DEVDISR2_DTSEC1_5, + [FM1_10GEC1] = FSL_CORENET_DEVDISR2_10GEC1, + [FM2_DTSEC1] = FSL_CORENET_DEVDISR2_DTSEC2_1, + [FM2_DTSEC2] = FSL_CORENET_DEVDISR2_DTSEC2_2, + [FM2_DTSEC3] = FSL_CORENET_DEVDISR2_DTSEC2_3, + [FM2_DTSEC4] = FSL_CORENET_DEVDISR2_DTSEC2_4, + [FM2_DTSEC5] = FSL_CORENET_DEVDISR2_DTSEC2_5, + [FM2_10GEC1] = FSL_CORENET_DEVDISR2_10GEC2, +}; + +static int is_device_disabled(enum fm_port port) +{ + ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); + u32 devdisr2 = in_be32(&gur->devdisr2); + + return port_to_devdisr[port] & devdisr2; +} + +void fman_disable_port(enum fm_port port) +{ + ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); + + /* don't allow disabling of DTSEC1 as its needed for MDIO */ + if (port == FM1_DTSEC1) + return; + + setbits_be32(&gur->devdisr2, port_to_devdisr[port]); +} + +phy_interface_t fman_port_enet_if(enum fm_port port) +{ + ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); + u32 rcwsr11 = in_be32(&gur->rcwsr[11]); + + if (is_device_disabled(port)) + return PHY_INTERFACE_MODE_NONE; + + if ((port == FM1_10GEC1) && (is_serdes_configured(XAUI_FM1))) + return PHY_INTERFACE_MODE_XGMII; + + if ((port == FM2_10GEC1) && (is_serdes_configured(XAUI_FM2))) + return PHY_INTERFACE_MODE_XGMII; + + /* handle RGMII first */ + if ((port == FM1_DTSEC5) && ((rcwsr11 & FSL_CORENET_RCWSR11_EC1) == + FSL_CORENET_RCWSR11_EC1_FM1_DTSEC5_RGMII)) + return PHY_INTERFACE_MODE_RGMII; + + if ((port == FM1_DTSEC5) && ((rcwsr11 & FSL_CORENET_RCWSR11_EC1) == + FSL_CORENET_RCWSR11_EC1_FM1_DTSEC5_MII)) + return PHY_INTERFACE_MODE_MII; + + if ((port == FM2_DTSEC5) && ((rcwsr11 & FSL_CORENET_RCWSR11_EC2) == + FSL_CORENET_RCWSR11_EC2_FM2_DTSEC5_RGMII)) + return PHY_INTERFACE_MODE_RGMII; + + if ((port == FM2_DTSEC5) && ((rcwsr11 & FSL_CORENET_RCWSR11_EC2) == + FSL_CORENET_RCWSR11_EC2_FM2_DTSEC5_MII)) + return PHY_INTERFACE_MODE_MII; + + switch (port) { + case FM1_DTSEC1: + case FM1_DTSEC2: + case FM1_DTSEC3: + case FM1_DTSEC4: + case FM1_DTSEC5: + if (is_serdes_configured(SGMII_FM1_DTSEC1 + port - FM1_DTSEC1)) + return PHY_INTERFACE_MODE_SGMII; + break; + case FM2_DTSEC1: + case FM2_DTSEC2: + case FM2_DTSEC3: + case FM2_DTSEC4: + case FM2_DTSEC5: + if (is_serdes_configured(SGMII_FM2_DTSEC1 + port - FM2_DTSEC1)) + return PHY_INTERFACE_MODE_SGMII; + break; + default: + return PHY_INTERFACE_MODE_NONE; + } + + return PHY_INTERFACE_MODE_NONE; +} diff --git a/drivers/pci/fsl_pci_init.c b/drivers/pci/fsl_pci_init.c index 48ae16374dc..77ac1f7c7b9 100644 --- a/drivers/pci/fsl_pci_init.c +++ b/drivers/pci/fsl_pci_init.c @@ -470,6 +470,28 @@ void fsl_pci_init(struct pci_controller *hose, struct fsl_pci_info *pci_info) } #endif +#ifdef CONFIG_SYS_P4080_ERRATUM_PCIE_A003 + if (enabled == 0) { + serdes_corenet_t *srds_regs = (void *)CONFIG_SYS_FSL_CORENET_SERDES_ADDR; + temp32 = in_be32(&srds_regs->srdspccr0); + + if ((temp32 >> 28) == 3) { + int i; + + out_be32(&srds_regs->srdspccr0, 2 << 28); + setbits_be32(&pci->pdb_stat, 0x08000000); + in_be32(&pci->pdb_stat); + udelay(100); + clrbits_be32(&pci->pdb_stat, 0x08000000); + asm("sync;isync"); + for (i=0; i < 100 && ltssm < PCI_LTSSM_L0; i++) { + pci_hose_read_config_word(hose, dev, PCI_LTSSM, <ssm); + udelay(1000); + } + enabled = ltssm >= PCI_LTSSM_L0; + } + } +#endif if (!enabled) { /* Let the user know there's no PCIe link */ printf("no link, regs @ 0x%lx\n", pci_info->regs); diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 6bf388cb7d5..8c7190156c3 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -23,7 +23,7 @@ include $(TOPDIR)/config.mk -LIB := $(obj)libpower.o +LIB := $(obj)libpower.o COBJS-$(CONFIG_FTPMU010_POWER) += ftpmu010.o COBJS-$(CONFIG_TPS6586X_POWER) += tps6586x.o @@ -31,9 +31,15 @@ COBJS-$(CONFIG_TWL4030_POWER) += twl4030.o COBJS-$(CONFIG_TWL6030_POWER) += twl6030.o COBJS-$(CONFIG_TWL6035_POWER) += twl6035.o +COBJS-$(CONFIG_POWER) += power_core.o +COBJS-$(CONFIG_DIALOG_POWER) += power_dialog.o +COBJS-$(CONFIG_POWER_FSL) += power_fsl.o +COBJS-$(CONFIG_POWER_I2C) += power_i2c.o +COBJS-$(CONFIG_POWER_SPI) += power_spi.o + COBJS := $(COBJS-y) -SRCS := $(COBJS:.o=.c) -OBJS := $(addprefix $(obj),$(COBJS)) +SRCS := $(COBJS:.o=.c) +OBJS := $(addprefix $(obj),$(COBJS)) all: $(LIB) diff --git a/drivers/power/battery/Makefile b/drivers/power/battery/Makefile new file mode 100644 index 00000000000..b176701234b --- /dev/null +++ b/drivers/power/battery/Makefile @@ -0,0 +1,47 @@ +# +# Copyright (C) 2012 Samsung Electronics +# Lukasz Majewski <l.majewski@samsung.com> +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +include $(TOPDIR)/config.mk + +LIB := $(obj)libbattery.o + +COBJS-$(CONFIG_POWER_BATTERY_TRATS) += bat_trats.o + +COBJS := $(COBJS-y) +SRCS := $(COBJS:.o=.c) +OBJS := $(addprefix $(obj),$(COBJS)) + +all: $(LIB) + +$(LIB): $(obj).depend $(OBJS) + $(call cmd_link_o_target, $(OBJS)) + + +######################################################################### + +# defines $(obj).depend target +include $(SRCTREE)/rules.mk + +sinclude $(obj).depend + +######################################################################## diff --git a/drivers/power/battery/bat_trats.c b/drivers/power/battery/bat_trats.c new file mode 100644 index 00000000000..ca0d2146599 --- /dev/null +++ b/drivers/power/battery/bat_trats.c @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2012 Samsung Electronics + * Lukasz Majewski <l.majewski@samsung.com> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <common.h> +#include <power/pmic.h> +#include <power/battery.h> +#include <power/max8997_pmic.h> +#include <errno.h> + +static struct battery battery_trats; + +static int power_battery_charge(struct pmic *bat) +{ + struct power_battery *p_bat = bat->pbat; + struct battery *battery = p_bat->bat; + int k; + + if (bat->chrg->chrg_state(p_bat->chrg, CHARGER_ENABLE, 450)) + return -1; + + for (k = 0; bat->chrg->chrg_bat_present(p_bat->chrg) && + bat->chrg->chrg_type(p_bat->muic) && + battery->state_of_chrg < 100; k++) { + udelay(10000000); + puts("."); + bat->fg->fg_battery_update(p_bat->fg, bat); + + if (k == 100) { + debug(" %d [V]", battery->voltage_uV); + puts("\n"); + k = 0; + } + + } + + bat->chrg->chrg_state(p_bat->chrg, CHARGER_DISABLE, 0); + + return 0; +} + +static int power_battery_init_trats(struct pmic *bat_, + struct pmic *fg_, + struct pmic *chrg_, + struct pmic *muic_) +{ + bat_->pbat->fg = fg_; + bat_->pbat->chrg = chrg_; + bat_->pbat->muic = muic_; + + bat_->fg = fg_->fg; + bat_->chrg = chrg_->chrg; + bat_->chrg->chrg_type = muic_->chrg->chrg_type; + return 0; +} + +static struct power_battery power_bat_trats = { + .bat = &battery_trats, + .battery_init = power_battery_init_trats, + .battery_charge = power_battery_charge, +}; + +int power_bat_init(unsigned char bus) +{ + static const char name[] = "BAT_TRATS"; + struct pmic *p = pmic_alloc(); + + if (!p) { + printf("%s: POWER allocation error!\n", __func__); + return -ENOMEM; + } + + debug("Board BAT init\n"); + + p->interface = PMIC_NONE; + p->name = name; + p->bus = bus; + + p->pbat = &power_bat_trats; + return 0; +} diff --git a/drivers/power/fuel_gauge/Makefile b/drivers/power/fuel_gauge/Makefile new file mode 100644 index 00000000000..da1541412b8 --- /dev/null +++ b/drivers/power/fuel_gauge/Makefile @@ -0,0 +1,47 @@ +# +# Copyright (C) 2012 Samsung Electronics +# Lukasz Majewski <l.majewski@samsung.com> +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +include $(TOPDIR)/config.mk + +LIB := $(obj)libfuel_gauge.o + +COBJS-$(CONFIG_POWER_FG_MAX17042) += fg_max17042.o + +COBJS := $(COBJS-y) +SRCS := $(COBJS:.o=.c) +OBJS := $(addprefix $(obj),$(COBJS)) + +all: $(LIB) + +$(LIB): $(obj).depend $(OBJS) + $(call cmd_link_o_target, $(OBJS)) + + +######################################################################### + +# defines $(obj).depend target +include $(SRCTREE)/rules.mk + +sinclude $(obj).depend + +######################################################################## diff --git a/drivers/power/fuel_gauge/fg_max17042.c b/drivers/power/fuel_gauge/fg_max17042.c new file mode 100644 index 00000000000..9b7c184cb0f --- /dev/null +++ b/drivers/power/fuel_gauge/fg_max17042.c @@ -0,0 +1,250 @@ +/* + * Copyright (C) 2012 Samsung Electronics + * Lukasz Majewski <l.majewski@samsung.com> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <common.h> +#include <power/pmic.h> +#include <power/max17042_fg.h> +#include <i2c.h> +#include <power/max8997_pmic.h> +#include <power/power_chrg.h> +#include <power/battery.h> +#include <power/fg_battery_cell_params.h> +#include <errno.h> + +static int fg_write_regs(struct pmic *p, u8 addr, u16 *data, int num) +{ + int ret = 0; + int i; + + for (i = 0; i < num; i++, addr++) + ret |= pmic_reg_write(p, addr, *(data + i)); + + return ret; +} + +static int fg_read_regs(struct pmic *p, u8 addr, u16 *data, int num) +{ + int ret = 0; + int i; + + for (i = 0; i < num; i++, addr++) + ret |= pmic_reg_read(p, addr, (u32 *) (data + i)); + + return ret; +} + +static int fg_write_and_verify(struct pmic *p, u8 addr, u16 data) +{ + unsigned int val = data; + int ret = 0; + + ret |= pmic_reg_write(p, addr, val); + ret |= pmic_reg_read(p, addr, &val); + + if (ret) + return ret; + + if (((u16) val) == data) + return 0; + + return -1; +} + +static void por_fuelgauge_init(struct pmic *p) +{ + u16 r_data0[16], r_data1[16], r_data2[16]; + u32 rewrite_count = 5, i = 0; + unsigned int val; + int ret = 0; + + /* Delay 500 ms */ + mdelay(500); + /* Initilize Configuration */ + pmic_reg_write(p, MAX17042_CONFIG, 0x2310); + +rewrite_model: + /* Unlock Model Access */ + pmic_reg_write(p, MAX17042_MLOCKReg1, MODEL_UNLOCK1); + pmic_reg_write(p, MAX17042_MLOCKReg2, MODEL_UNLOCK2); + + /* Write/Read/Verify the Custom Model */ + ret |= fg_write_regs(p, MAX17042_MODEL1, cell_character0, + ARRAY_SIZE(cell_character0)); + ret |= fg_write_regs(p, MAX17042_MODEL2, cell_character1, + ARRAY_SIZE(cell_character1)); + ret |= fg_write_regs(p, MAX17042_MODEL3, cell_character2, + ARRAY_SIZE(cell_character2)); + + if (ret) { + printf("%s: Cell parameters write failed!\n", __func__); + return; + } + + ret |= fg_read_regs(p, MAX17042_MODEL1, r_data0, ARRAY_SIZE(r_data0)); + ret |= fg_read_regs(p, MAX17042_MODEL2, r_data1, ARRAY_SIZE(r_data1)); + ret |= fg_read_regs(p, MAX17042_MODEL3, r_data2, ARRAY_SIZE(r_data2)); + + if (ret) + printf("%s: Cell parameters read failed!\n", __func__); + + for (i = 0; i < 16; i++) { + if ((cell_character0[i] != r_data0[i]) + || (cell_character1[i] != r_data1[i]) + || (cell_character2[i] != r_data2[i])) + goto rewrite_model; + } + + /* Lock model access */ + pmic_reg_write(p, MAX17042_MLOCKReg1, MODEL_LOCK1); + pmic_reg_write(p, MAX17042_MLOCKReg2, MODEL_LOCK2); + + /* Verify the model access is locked */ + ret |= fg_read_regs(p, MAX17042_MODEL1, r_data0, ARRAY_SIZE(r_data0)); + ret |= fg_read_regs(p, MAX17042_MODEL2, r_data1, ARRAY_SIZE(r_data1)); + ret |= fg_read_regs(p, MAX17042_MODEL3, r_data2, ARRAY_SIZE(r_data2)); + + if (ret) { + printf("%s: Cell parameters read failed!\n", __func__); + return; + } + + for (i = 0; i < ARRAY_SIZE(r_data0); i++) { + /* Check if model locked */ + if (r_data0[i] || r_data1[i] || r_data2[i]) { + /* Rewrite model data - prevent from endless loop */ + if (rewrite_count--) { + puts("FG - Lock model access failed!\n"); + goto rewrite_model; + } + } + } + + /* Write Custom Parameters */ + fg_write_and_verify(p, MAX17042_RCOMP0, RCOMP0); + fg_write_and_verify(p, MAX17042_TEMPCO, TempCo); + + /* Delay at least 350mS */ + mdelay(350); + + /* Initialization Complete */ + pmic_reg_read(p, MAX17042_STATUS, &val); + /* Write and Verify Status with POR bit Cleared */ + fg_write_and_verify(p, MAX17042_STATUS, val & ~MAX17042_POR); + + /* Delay at least 350 ms */ + mdelay(350); +} + +static int power_update_battery(struct pmic *p, struct pmic *bat) +{ + struct power_battery *pb = bat->pbat; + unsigned int val; + int ret = 0; + + if (pmic_probe(p)) { + puts("Can't find max17042 fuel gauge\n"); + return -1; + } + + ret |= pmic_reg_read(p, MAX17042_VFSOC, &val); + pb->bat->state_of_chrg = (val >> 8); + + pmic_reg_read(p, MAX17042_VCELL, &val); + debug("vfsoc: 0x%x\n", val); + pb->bat->voltage_uV = ((val & 0xFFUL) >> 3) + ((val & 0xFF00) >> 3); + pb->bat->voltage_uV = (pb->bat->voltage_uV * 625); + + pmic_reg_read(p, 0x05, &val); + pb->bat->capacity = val >> 2; + + return ret; +} + +static int power_check_battery(struct pmic *p, struct pmic *bat) +{ + struct power_battery *pb = bat->pbat; + unsigned int val; + int ret = 0; + + if (pmic_probe(p)) { + puts("Can't find max17042 fuel gauge\n"); + return -1; + } + + ret |= pmic_reg_read(p, MAX17042_STATUS, &val); + debug("fg status: 0x%x\n", val); + + if (val == MAX17042_POR) + por_fuelgauge_init(p); + + ret |= pmic_reg_read(p, MAX17042_VERSION, &val); + pb->bat->version = val; + + power_update_battery(p, bat); + debug("fg ver: 0x%x\n", pb->bat->version); + printf("BAT: state_of_charge(SOC):%d%%\n", + pb->bat->state_of_chrg); + + printf(" voltage: %d.%6.6d [V] (expected to be %d [mAh])\n", + pb->bat->voltage_uV / 1000000, + pb->bat->voltage_uV % 1000000, + pb->bat->capacity); + + if (pb->bat->voltage_uV > 3850000) + pb->bat->state = EXT_SOURCE; + else if (pb->bat->voltage_uV < 3600000 || pb->bat->state_of_chrg < 5) + pb->bat->state = CHARGE; + else + pb->bat->state = NORMAL; + + return ret; +} + +static struct power_fg power_fg_ops = { + .fg_battery_check = power_check_battery, + .fg_battery_update = power_update_battery, +}; + +int power_fg_init(unsigned char bus) +{ + static const char name[] = "MAX17042_FG"; + struct pmic *p = pmic_alloc(); + + if (!p) { + printf("%s: POWER allocation error!\n", __func__); + return -ENOMEM; + } + + debug("Board Fuel Gauge init\n"); + + p->name = name; + p->interface = PMIC_I2C; + p->number_of_regs = FG_NUM_OF_REGS; + p->hw.i2c.addr = MAX17042_I2C_ADDR; + p->hw.i2c.tx_num = 2; + p->sensor_byte_order = PMIC_SENSOR_BYTE_ORDER_BIG; + p->bus = bus; + + p->fg = &power_fg_ops; + return 0; +} diff --git a/drivers/power/pmic/Makefile b/drivers/power/pmic/Makefile new file mode 100644 index 00000000000..e19a9a81950 --- /dev/null +++ b/drivers/power/pmic/Makefile @@ -0,0 +1,49 @@ +# +# Copyright (C) 2012 Samsung Electronics +# Lukasz Majewski <l.majewski@samsung.com> +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +include $(TOPDIR)/config.mk + +LIB := $(obj)libpmic.o + +COBJS-$(CONFIG_POWER_MAX8998) += pmic_max8998.o +COBJS-$(CONFIG_POWER_MAX8997) += pmic_max8997.o +COBJS-$(CONFIG_POWER_MUIC_MAX8997) += muic_max8997.o + +COBJS := $(COBJS-y) +SRCS := $(COBJS:.o=.c) +OBJS := $(addprefix $(obj),$(COBJS)) + +all: $(LIB) + +$(LIB): $(obj).depend $(OBJS) + $(call cmd_link_o_target, $(OBJS)) + + +######################################################################### + +# defines $(obj).depend target +include $(SRCTREE)/rules.mk + +sinclude $(obj).depend + +######################################################################## diff --git a/drivers/power/pmic/muic_max8997.c b/drivers/power/pmic/muic_max8997.c new file mode 100644 index 00000000000..d5095c899ef --- /dev/null +++ b/drivers/power/pmic/muic_max8997.c @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2012 Samsung Electronics + * Lukasz Majewski <l.majewski@samsung.com> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <common.h> +#include <power/pmic.h> +#include <power/power_chrg.h> +#include <power/max8997_muic.h> +#include <i2c.h> +#include <errno.h> + +static int power_chrg_get_type(struct pmic *p) +{ + unsigned int val; + unsigned char charge_type, charger; + + if (pmic_probe(p)) + return CHARGER_NO; + + pmic_reg_read(p, MAX8997_MUIC_STATUS2, &val); + charge_type = val & MAX8997_MUIC_CHG_MASK; + + switch (charge_type) { + case MAX8997_MUIC_CHG_NO: + charger = CHARGER_NO; + break; + case MAX8997_MUIC_CHG_USB: + case MAX8997_MUIC_CHG_USB_D: + charger = CHARGER_USB; + break; + case MAX8997_MUIC_CHG_TA: + case MAX8997_MUIC_CHG_TA_1A: + charger = CHARGER_TA; + break; + case MAX8997_MUIC_CHG_TA_500: + charger = CHARGER_TA_500; + break; + default: + charger = CHARGER_UNKNOWN; + break; + } + + return charger; +} + +static struct power_chrg power_chrg_muic_ops = { + .chrg_type = power_chrg_get_type, +}; + +int power_muic_init(unsigned int bus) +{ + static const char name[] = "MAX8997_MUIC"; + struct pmic *p = pmic_alloc(); + + if (!p) { + printf("%s: POWER allocation error!\n", __func__); + return -ENOMEM; + } + + debug("Board Micro USB Interface Controller init\n"); + + p->name = name; + p->interface = PMIC_I2C; + p->number_of_regs = MUIC_NUM_OF_REGS; + p->hw.i2c.addr = MAX8997_MUIC_I2C_ADDR; + p->hw.i2c.tx_num = 1; + p->bus = bus; + + p->chrg = &power_chrg_muic_ops; + return 0; +} diff --git a/drivers/power/pmic/pmic_max8997.c b/drivers/power/pmic/pmic_max8997.c new file mode 100644 index 00000000000..4e5c6d647fb --- /dev/null +++ b/drivers/power/pmic/pmic_max8997.c @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2012 Samsung Electronics + * Lukasz Majewski <l.majewski@samsung.com> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <common.h> +#include <power/pmic.h> +#include <power/max8997_pmic.h> +#include <i2c.h> +#include <errno.h> + +unsigned char max8997_reg_ldo(int uV) +{ + unsigned char ret; + if (uV <= 800000) + return 0; + if (uV >= 3950000) + return MAX8997_LDO_MAX_VAL; + ret = (uV - 800000) / 50000; + if (ret > MAX8997_LDO_MAX_VAL) { + printf("MAX8997 LDO SETTING ERROR (%duV) -> %u\n", uV, ret); + ret = MAX8997_LDO_MAX_VAL; + } + + return ret; +} + +static int pmic_charger_state(struct pmic *p, int state, int current) +{ + unsigned char fc; + u32 val = 0; + + if (pmic_probe(p)) + return -1; + + if (state == CHARGER_DISABLE) { + puts("Disable the charger.\n"); + pmic_reg_read(p, MAX8997_REG_MBCCTRL2, &val); + val &= ~(MBCHOSTEN | VCHGR_FC); + pmic_reg_write(p, MAX8997_REG_MBCCTRL2, val); + + return -1; + } + + if (current < CHARGER_MIN_CURRENT || current > CHARGER_MAX_CURRENT) { + printf("%s: Wrong charge current: %d [mA]\n", + __func__, current); + return -1; + } + + fc = (current - CHARGER_MIN_CURRENT) / CHARGER_CURRENT_RESOLUTION; + fc = fc & 0xf; /* up to 950 mA */ + + printf("Enable the charger @ %d [mA]\n", fc * CHARGER_CURRENT_RESOLUTION + + CHARGER_MIN_CURRENT); + + val = fc | MBCICHFCSET; + pmic_reg_write(p, MAX8997_REG_MBCCTRL4, val); + + pmic_reg_read(p, MAX8997_REG_MBCCTRL2, &val); + val = MBCHOSTEN | VCHGR_FC; /* enable charger & fast charge */ + pmic_reg_write(p, MAX8997_REG_MBCCTRL2, val); + + return 0; +} + +static int pmic_charger_bat_present(struct pmic *p) +{ + u32 val; + + if (pmic_probe(p)) + return -1; + + pmic_reg_read(p, MAX8997_REG_STATUS4, &val); + + return !(val & DETBAT); +} + +static struct power_chrg power_chrg_pmic_ops = { + .chrg_bat_present = pmic_charger_bat_present, + .chrg_state = pmic_charger_state, +}; + +int pmic_init(unsigned char bus) +{ + static const char name[] = "MAX8997_PMIC"; + struct pmic *p = pmic_alloc(); + + if (!p) { + printf("%s: POWER allocation error!\n", __func__); + return -ENOMEM; + } + + debug("Board PMIC init\n"); + + p->name = name; + p->interface = PMIC_I2C; + p->number_of_regs = PMIC_NUM_OF_REGS; + p->hw.i2c.addr = MAX8997_I2C_ADDR; + p->hw.i2c.tx_num = 1; + p->bus = bus; + + p->chrg = &power_chrg_pmic_ops; + return 0; +} diff --git a/drivers/misc/pmic_max8998.c b/drivers/power/pmic/pmic_max8998.c index cc69fd70803..452e1c8d862 100644 --- a/drivers/misc/pmic_max8998.c +++ b/drivers/power/pmic/pmic_max8998.c @@ -22,13 +22,19 @@ */ #include <common.h> -#include <pmic.h> -#include <max8998_pmic.h> +#include <power/pmic.h> +#include <power/max8998_pmic.h> +#include <errno.h> -int pmic_init(void) +int pmic_init(unsigned char bus) { - struct pmic *p = get_pmic(); static const char name[] = "MAX8998_PMIC"; + struct pmic *p = pmic_alloc(); + + if (!p) { + printf("%s: POWER allocation error!\n", __func__); + return -ENOMEM; + } puts("Board PMIC init\n"); @@ -37,7 +43,7 @@ int pmic_init(void) p->number_of_regs = PMIC_NUM_OF_REGS; p->hw.i2c.addr = MAX8998_I2C_ADDR; p->hw.i2c.tx_num = 1; - p->bus = I2C_PMIC; + p->bus = bus; return 0; } diff --git a/drivers/power/power_core.c b/drivers/power/power_core.c new file mode 100644 index 00000000000..90df2c58c4c --- /dev/null +++ b/drivers/power/power_core.c @@ -0,0 +1,232 @@ +/* + * Copyright (C) 2011 Samsung Electronics + * Lukasz Majewski <l.majewski@samsung.com> + * + * (C) Copyright 2010 + * Stefano Babic, DENX Software Engineering, sbabic@denx.de + * + * (C) Copyright 2008-2009 Freescale Semiconductor, Inc. + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <common.h> +#include <malloc.h> +#include <linux/types.h> +#include <linux/list.h> +#include <power/pmic.h> + +static LIST_HEAD(pmic_list); + +int check_reg(struct pmic *p, u32 reg) +{ + if (reg >= p->number_of_regs) { + printf("<reg num> = %d is invalid. Should be less than %d\n", + reg, p->number_of_regs); + return -1; + } + + return 0; +} + +int pmic_set_output(struct pmic *p, u32 reg, int out, int on) +{ + u32 val; + + if (pmic_reg_read(p, reg, &val)) + return -1; + + if (on) + val |= out; + else + val &= ~out; + + if (pmic_reg_write(p, reg, val)) + return -1; + + return 0; +} + +static void pmic_show_info(struct pmic *p) +{ + printf("PMIC: %s\n", p->name); +} + +static int pmic_dump(struct pmic *p) +{ + int i, ret; + u32 val; + + if (!p) { + puts("Wrong PMIC name!\n"); + return -1; + } + + pmic_show_info(p); + for (i = 0; i < p->number_of_regs; i++) { + ret = pmic_reg_read(p, i, &val); + if (ret) + puts("PMIC: Registers dump failed\n"); + + if (!(i % 8)) + printf("\n0x%02x: ", i); + + printf("%08x ", val); + } + puts("\n"); + return 0; +} + +struct pmic *pmic_alloc(void) +{ + struct pmic *p; + + p = calloc(sizeof(*p), 1); + if (!p) { + printf("%s: No available memory for allocation!\n", __func__); + return NULL; + } + + list_add_tail(&p->list, &pmic_list); + + debug("%s: new pmic struct: 0x%p\n", __func__, p); + + return p; +} + +struct pmic *pmic_get(const char *s) +{ + struct pmic *p; + + list_for_each_entry(p, &pmic_list, list) { + if (strcmp(p->name, s) == 0) { + debug("%s: pmic %s -> 0x%p\n", __func__, p->name, p); + return p; + } + } + + return NULL; +} + +const char *power_get_interface(int interface) +{ + const char *power_interface[] = {"I2C", "SPI", "|+|-|"}; + return power_interface[interface]; +} + +static void pmic_list_names(void) +{ + struct pmic *p; + + puts("PMIC devices:\n"); + list_for_each_entry(p, &pmic_list, list) { + printf("name: %s bus: %s_%d\n", p->name, + power_get_interface(p->interface), p->bus); + } +} + +int do_pmic(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) +{ + u32 ret, reg, val; + char *cmd, *name; + struct pmic *p; + + /* at least two arguments please */ + if (argc < 2) + return CMD_RET_USAGE; + + if (strcmp(argv[1], "list") == 0) { + pmic_list_names(); + return CMD_RET_SUCCESS; + } + + name = argv[1]; + cmd = argv[2]; + + debug("%s: name: %s cmd: %s\n", __func__, name, cmd); + p = pmic_get(name); + if (!p) + return CMD_RET_FAILURE; + + if (strcmp(cmd, "dump") == 0) { + if (pmic_dump(p)) + return CMD_RET_FAILURE; + return CMD_RET_SUCCESS; + } + + if (strcmp(cmd, "read") == 0) { + if (argc < 4) + return CMD_RET_USAGE; + + reg = simple_strtoul(argv[3], NULL, 16); + ret = pmic_reg_read(p, reg, &val); + + if (ret) + puts("PMIC: Register read failed\n"); + + printf("\n0x%02x: 0x%08x\n", reg, val); + + return CMD_RET_SUCCESS; + } + + if (strcmp(cmd, "write") == 0) { + if (argc < 5) + return CMD_RET_USAGE; + + reg = simple_strtoul(argv[3], NULL, 16); + val = simple_strtoul(argv[4], NULL, 16); + pmic_reg_write(p, reg, val); + + return CMD_RET_SUCCESS; + } + + if (strcmp(cmd, "bat") == 0) { + if (argc < 4) + return CMD_RET_USAGE; + + if (strcmp(argv[3], "state") == 0) + p->fg->fg_battery_check(p->pbat->fg, p); + + if (strcmp(argv[3], "charge") == 0) { + if (p->pbat) { + printf("PRINT BAT charge %s\n", p->name); + if (p->low_power_mode) + p->low_power_mode(); + if (p->pbat->battery_charge) + p->pbat->battery_charge(p); + } + } + + return CMD_RET_SUCCESS; + } + + /* No subcommand found */ + return CMD_RET_SUCCESS; +} + +U_BOOT_CMD( + pmic, CONFIG_SYS_MAXARGS, 1, do_pmic, + "PMIC", + "list - list available PMICs\n" + "pmic name dump - dump named PMIC registers\n" + "pmic name read <reg> - read register\n" + "pmic name write <reg> <value> - write register\n" + "pmic name bat state - write register\n" + "pmic name bat charge - write register\n" +); diff --git a/drivers/misc/pmic_dialog.c b/drivers/power/power_dialog.c index e97af1d1d01..d7ebd1583e8 100644 --- a/drivers/misc/pmic_dialog.c +++ b/drivers/power/power_dialog.c @@ -17,13 +17,19 @@ */ #include <common.h> -#include <pmic.h> +#include <power/pmic.h> #include <dialog_pmic.h> +#include <errno.h> -int pmic_dialog_init(void) +int pmic_dialog_init(unsigned char bus) { - struct pmic *p = get_pmic(); static const char name[] = "DIALOG_PMIC"; + struct pmic *p = pmic_alloc(); + + if (!p) { + printf("%s: POWER allocation error!\n", __func__); + return -ENOMEM; + } p->name = name; p->number_of_regs = DIALOG_NUM_OF_REGS; @@ -31,7 +37,7 @@ int pmic_dialog_init(void) p->interface = PMIC_I2C; p->hw.i2c.addr = CONFIG_SYS_DIALOG_PMIC_I2C_ADDR; p->hw.i2c.tx_num = 1; - p->bus = I2C_PMIC; + p->bus = bus; return 0; } diff --git a/drivers/misc/pmic_fsl.c b/drivers/power/power_fsl.c index c8d4c8d9ecf..a663831589f 100644 --- a/drivers/misc/pmic_fsl.c +++ b/drivers/power/power_fsl.c @@ -23,8 +23,9 @@ #include <common.h> #include <spi.h> -#include <pmic.h> +#include <power/pmic.h> #include <fsl_pmic.h> +#include <errno.h> #if defined(CONFIG_PMIC_FSL_MC13892) #define FSL_PMIC_I2C_LENGTH 3 @@ -32,22 +33,27 @@ #define FSL_PMIC_I2C_LENGTH 1 #endif -#if defined(CONFIG_PMIC_SPI) +#if defined(CONFIG_POWER_SPI) static u32 pmic_spi_prepare_tx(u32 reg, u32 *val, u32 write) { return (write << 31) | (reg << 25) | (*val & 0x00FFFFFF); } #endif -int pmic_init(void) +int pmic_init(unsigned char bus) { - struct pmic *p = get_pmic(); static const char name[] = "FSL_PMIC"; + struct pmic *p = pmic_alloc(); + + if (!p) { + printf("%s: POWER allocation error!\n", __func__); + return -ENOMEM; + } p->name = name; p->number_of_regs = PMIC_NUM_OF_REGS; -#if defined(CONFIG_PMIC_SPI) +#if defined(CONFIG_POWER_SPI) p->interface = PMIC_SPI; p->bus = CONFIG_FSL_PMIC_BUS; p->hw.spi.cs = CONFIG_FSL_PMIC_CS; @@ -56,13 +62,13 @@ int pmic_init(void) p->hw.spi.bitlen = CONFIG_FSL_PMIC_BITLEN; p->hw.spi.flags = SPI_XFER_BEGIN | SPI_XFER_END; p->hw.spi.prepare_tx = pmic_spi_prepare_tx; -#elif defined(CONFIG_PMIC_I2C) +#elif defined(CONFIG_POWER_I2C) p->interface = PMIC_I2C; p->hw.i2c.addr = CONFIG_SYS_FSL_PMIC_I2C_ADDR; p->hw.i2c.tx_num = FSL_PMIC_I2C_LENGTH; - p->bus = I2C_PMIC; + p->bus = bus; #else -#error "You must select CONFIG_PMIC_SPI or CONFIG_PMIC_I2C" +#error "You must select CONFIG_POWER_SPI or CONFIG_PMIC_I2C" #endif return 0; diff --git a/drivers/misc/pmic_i2c.c b/drivers/power/power_i2c.c index 95a3365b9fc..3e5a784cf53 100644 --- a/drivers/misc/pmic_i2c.c +++ b/drivers/power/power_i2c.c @@ -28,24 +28,40 @@ #include <common.h> #include <linux/types.h> -#include <pmic.h> +#include <power/pmic.h> #include <i2c.h> +#include <compiler.h> int pmic_reg_write(struct pmic *p, u32 reg, u32 val) { unsigned char buf[4] = { 0 }; - if (check_reg(reg)) + if (check_reg(p, reg)) return -1; switch (pmic_i2c_tx_num) { case 3: - buf[0] = (val >> 16) & 0xff; - buf[1] = (val >> 8) & 0xff; - buf[2] = val & 0xff; + if (p->sensor_byte_order == PMIC_SENSOR_BYTE_ORDER_BIG) { + buf[2] = (cpu_to_le32(val) >> 16) & 0xff; + buf[1] = (cpu_to_le32(val) >> 8) & 0xff; + buf[0] = cpu_to_le32(val) & 0xff; + } else { + buf[0] = (cpu_to_le32(val) >> 16) & 0xff; + buf[1] = (cpu_to_le32(val) >> 8) & 0xff; + buf[2] = cpu_to_le32(val) & 0xff; + } + break; + case 2: + if (p->sensor_byte_order == PMIC_SENSOR_BYTE_ORDER_BIG) { + buf[1] = (cpu_to_le32(val) >> 8) & 0xff; + buf[0] = cpu_to_le32(val) & 0xff; + } else { + buf[0] = (cpu_to_le32(val) >> 8) & 0xff; + buf[1] = cpu_to_le32(val) & 0xff; + } break; case 1: - buf[0] = val & 0xff; + buf[0] = cpu_to_le32(val) & 0xff; break; default: printf("%s: invalid tx_num: %d", __func__, pmic_i2c_tx_num); @@ -63,7 +79,7 @@ int pmic_reg_read(struct pmic *p, u32 reg, u32 *val) unsigned char buf[4] = { 0 }; u32 ret_val = 0; - if (check_reg(reg)) + if (check_reg(p, reg)) return -1; if (i2c_read(pmic_i2c_addr, reg, 1, buf, pmic_i2c_tx_num)) @@ -71,10 +87,21 @@ int pmic_reg_read(struct pmic *p, u32 reg, u32 *val) switch (pmic_i2c_tx_num) { case 3: - ret_val = buf[0] << 16 | buf[1] << 8 | buf[2]; + if (p->sensor_byte_order == PMIC_SENSOR_BYTE_ORDER_BIG) + ret_val = le32_to_cpu(buf[2] << 16 + | buf[1] << 8 | buf[0]); + else + ret_val = le32_to_cpu(buf[0] << 16 | + buf[1] << 8 | buf[2]); + break; + case 2: + if (p->sensor_byte_order == PMIC_SENSOR_BYTE_ORDER_BIG) + ret_val = le32_to_cpu(buf[1] << 8 | buf[0]); + else + ret_val = le32_to_cpu(buf[0] << 8 | buf[1]); break; case 1: - ret_val = buf[0]; + ret_val = le32_to_cpu(buf[0]); break; default: printf("%s: invalid tx_num: %d", __func__, pmic_i2c_tx_num); @@ -88,7 +115,7 @@ int pmic_reg_read(struct pmic *p, u32 reg, u32 *val) int pmic_probe(struct pmic *p) { I2C_SET_BUS(p->bus); - debug("PMIC:%s probed!\n", p->name); + debug("Bus: %d PMIC:%s probed!\n", p->bus, p->name); if (i2c_probe(pmic_i2c_addr)) { printf("Can't find PMIC:%s\n", p->name); return -1; diff --git a/drivers/misc/pmic_spi.c b/drivers/power/power_spi.c index 5a0dd22e290..27488ea5d92 100644 --- a/drivers/misc/pmic_spi.c +++ b/drivers/power/power_spi.c @@ -28,7 +28,7 @@ #include <common.h> #include <linux/types.h> -#include <pmic.h> +#include <power/pmic.h> #include <spi.h> static struct spi_slave *slave; @@ -59,7 +59,7 @@ static u32 pmic_reg(struct pmic *p, u32 reg, u32 *val, u32 write) return -1; } - if (check_reg(reg)) + if (check_reg(p, reg)) return -1; if (spi_claim_bus(slave)) diff --git a/drivers/rtc/mc13xxx-rtc.c b/drivers/rtc/mc13xxx-rtc.c index 70ea8a15898..e79f4621d3f 100644 --- a/drivers/rtc/mc13xxx-rtc.c +++ b/drivers/rtc/mc13xxx-rtc.c @@ -23,16 +23,18 @@ #include <common.h> #include <rtc.h> #include <spi.h> -#include <pmic.h> +#include <power/pmic.h> #include <fsl_pmic.h> int rtc_get(struct rtc_time *rtc) { u32 day1, day2, time; int tim, i = 0; - struct pmic *p = get_pmic(); + struct pmic *p = pmic_get("FSL_PMIC"); int ret; + if (!p) + return -1; do { ret = pmic_reg_read(p, REG_RTC_DAY, &day1); if (ret < 0) @@ -61,7 +63,9 @@ int rtc_get(struct rtc_time *rtc) int rtc_set(struct rtc_time *rtc) { u32 time, day; - struct pmic *p = get_pmic(); + struct pmic *p = pmic_get("FSL_PMIC"); + if (!p) + return -1; time = mktime(rtc->tm_year, rtc->tm_mon, rtc->tm_mday, rtc->tm_hour, rtc->tm_min, rtc->tm_sec); diff --git a/drivers/serial/ns16550.c b/drivers/serial/ns16550.c index 9027781445f..bbd91ca247d 100644 --- a/drivers/serial/ns16550.c +++ b/drivers/serial/ns16550.c @@ -36,6 +36,9 @@ void NS16550_init(NS16550_t com_port, int baud_divisor) { + while (!(serial_in(&com_port->lsr) & UART_LSR_TEMT)) + ; + serial_out(CONFIG_SYS_NS16550_IER, &com_port->ier); #if (defined(CONFIG_OMAP) && !defined(CONFIG_OMAP3_ZOOM2)) || \ defined(CONFIG_AM33XX) diff --git a/drivers/serial/serial_ns16550.c b/drivers/serial/serial_ns16550.c index c1c0134bcb3..fc01a3c5164 100644 --- a/drivers/serial/serial_ns16550.c +++ b/drivers/serial/serial_ns16550.c @@ -31,6 +31,8 @@ #include <serial.h> +#ifndef CONFIG_NS16550_MIN_FUNCTIONS + DECLARE_GLOBAL_DATA_PTR; #if !defined(CONFIG_CONS_INDEX) @@ -304,3 +306,5 @@ void ns16550_serial_initialize(void) serial_register(&eserial6_device); #endif } + +#endif /* !CONFIG_NS16550_MIN_FUNCTIONS */ diff --git a/drivers/serial/serial_pl01x.c b/drivers/serial/serial_pl01x.c index b331be794ba..dfdba9f64eb 100644 --- a/drivers/serial/serial_pl01x.c +++ b/drivers/serial/serial_pl01x.c @@ -163,8 +163,8 @@ static int pl01x_serial_init(void) } #endif /* Finally, enable the UART */ - writel(UART_PL011_CR_UARTEN | UART_PL011_CR_TXE | UART_PL011_CR_RXE, - ®s->pl011_cr); + writel(UART_PL011_CR_UARTEN | UART_PL011_CR_TXE | UART_PL011_CR_RXE | + UART_PL011_CR_RTS, ®s->pl011_cr); return 0; } diff --git a/drivers/serial/serial_sh.c b/drivers/serial/serial_sh.c index 3c931d02127..ee1f2d768a1 100644 --- a/drivers/serial/serial_sh.c +++ b/drivers/serial/serial_sh.c @@ -117,6 +117,14 @@ static int serial_rx_fifo_level(void) return scif_rxfill(&sh_sci); } +static void handle_error(void) +{ + sci_in(&sh_sci, SCxSR); + sci_out(&sh_sci, SCxSR, SCxSR_ERROR_CLEAR(&sh_sci)); + sci_in(&sh_sci, SCLSR); + sci_out(&sh_sci, SCLSR, 0x00); +} + void serial_raw_putc(const char c) { while (1) { @@ -138,16 +146,14 @@ static void sh_serial_putc(const char c) static int sh_serial_tstc(void) { + if (sci_in(&sh_sci, SCxSR) & SCIF_ERRORS) { + handle_error(); + return 0; + } + return serial_rx_fifo_level() ? 1 : 0; } -void handle_error(void) -{ - sci_in(&sh_sci, SCxSR); - sci_out(&sh_sci, SCxSR, SCxSR_ERROR_CLEAR(&sh_sci)); - sci_in(&sh_sci, SCLSR); - sci_out(&sh_sci, SCLSR, 0x00); -} int serial_getc_check(void) { diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c index f88d0c190c4..f9163a80ed1 100644 --- a/drivers/usb/gadget/config.c +++ b/drivers/usb/gadget/config.c @@ -27,7 +27,6 @@ #include <linux/string.h> #include <linux/usb/ch9.h> -#include <usbdescriptors.h> #include <linux/usb/gadget.h> diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c index b656c8b9f48..5b8776e0b70 100644 --- a/drivers/usb/gadget/epautoconf.c +++ b/drivers/usb/gadget/epautoconf.c @@ -23,7 +23,6 @@ #include <common.h> #include <linux/usb/ch9.h> -#include <usbdescriptors.h> #include <asm/errno.h> #include <linux/usb/gadget.h> #include <asm/unaligned.h> diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index 8b24e00e271..de880ffeb8a 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c @@ -24,7 +24,6 @@ #include <asm/errno.h> #include <linux/netdevice.h> #include <linux/usb/ch9.h> -#include <usbdescriptors.h> #include <linux/usb/cdc.h> #include <linux/usb/gadget.h> #include <net.h> diff --git a/drivers/usb/gadget/f_dfu.c b/drivers/usb/gadget/f_dfu.c index 3ec4c65e748..10547e3279b 100644 --- a/drivers/usb/gadget/f_dfu.c +++ b/drivers/usb/gadget/f_dfu.c @@ -25,7 +25,6 @@ #include <malloc.h> #include <linux/usb/ch9.h> -#include <usbdescriptors.h> #include <linux/usb/gadget.h> #include <linux/usb/composite.h> diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h index 02cae0fc758..e5701422fa3 100644 --- a/drivers/usb/gadget/gadget_chips.h +++ b/drivers/usb/gadget/gadget_chips.h @@ -125,8 +125,8 @@ #endif /* Mentor high speed "dual role" controller, in peripheral role */ -#ifdef CONFIG_USB_GADGET_MUSB_HDRC -#define gadget_is_musbhdrc(g) (!strcmp("musb_hdrc", (g)->name)) +#ifdef CONFIG_MUSB_GADGET +#define gadget_is_musbhdrc(g) (!strcmp("musb-hdrc", (g)->name)) #else #define gadget_is_musbhdrc(g) 0 #endif diff --git a/drivers/usb/gadget/s3c_udc_otg.c b/drivers/usb/gadget/s3c_udc_otg.c index 3fdfdf7af08..f9d24e3dfdc 100644 --- a/drivers/usb/gadget/s3c_udc_otg.c +++ b/drivers/usb/gadget/s3c_udc_otg.c @@ -37,7 +37,6 @@ #include <malloc.h> #include <linux/usb/ch9.h> -#include <usbdescriptors.h> #include <linux/usb/gadget.h> #include <asm/byteorder.h> diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 4dbe060d6f2..95555cf96b9 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c @@ -13,7 +13,6 @@ #include <common.h> #include <asm/errno.h> #include <linux/usb/ch9.h> -#include <usbdescriptors.h> #include <linux/usb/gadget.h> #include <asm/unaligned.h> diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index d90e94d8109..7f98a6354ac 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -210,6 +210,18 @@ static int ehci_td_buffer(struct qTD *td, void *buf, size_t sz) return 0; } +static inline u8 ehci_encode_speed(enum usb_device_speed speed) +{ + #define QH_HIGH_SPEED 2 + #define QH_FULL_SPEED 0 + #define QH_LOW_SPEED 1 + if (speed == USB_SPEED_HIGH) + return QH_HIGH_SPEED; + if (speed == USB_SPEED_LOW) + return QH_LOW_SPEED; + return QH_FULL_SPEED; +} + static int ehci_submit_async(struct usb_device *dev, unsigned long pipe, void *buffer, int length, struct devrequest *req) @@ -318,12 +330,12 @@ ehci_submit_async(struct usb_device *dev, unsigned long pipe, void *buffer, * - qh_overlay.qt_altnext */ qh->qh_link = cpu_to_hc32((uint32_t)&ctrl->qh_list | QH_LINK_TYPE_QH); - c = usb_pipespeed(pipe) != USB_SPEED_HIGH && !usb_pipeendpoint(pipe); + c = (dev->speed != USB_SPEED_HIGH) && !usb_pipeendpoint(pipe); maxpacket = usb_maxpacket(dev, pipe); endpt = QH_ENDPT1_RL(8) | QH_ENDPT1_C(c) | QH_ENDPT1_MAXPKTLEN(maxpacket) | QH_ENDPT1_H(0) | QH_ENDPT1_DTC(QH_ENDPT1_DTC_DT_FROM_QTD) | - QH_ENDPT1_EPS(usb_pipespeed(pipe)) | + QH_ENDPT1_EPS(ehci_encode_speed(dev->speed)) | QH_ENDPT1_ENDPT(usb_pipeendpoint(pipe)) | QH_ENDPT1_I(0) | QH_ENDPT1_DEVADDR(usb_pipedevice(pipe)); qh->qh_endpt1 = cpu_to_hc32(endpt); diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 19e16a4a804..289018c4f6a 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -617,7 +617,7 @@ static int isp116x_submit_job(struct usb_device *dev, unsigned long pipe, int epnum = usb_pipeendpoint(pipe); int max = usb_maxpacket(dev, pipe); int dir_out = usb_pipeout(pipe); - int speed_low = usb_pipeslow(pipe); + int speed_low = (dev->speed == USB_SPEED_LOW); int i, done = 0, stat, timeout, cc; /* 500 frames or 0.5s timeout when function is busy and NAKs transactions for a while */ diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index c2106adbd3b..bdbe250b01e 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -803,7 +803,7 @@ static ed_t *ep_add_ed(struct usb_device *usb_dev, unsigned long pipe, | (usb_pipeisoc(pipe)? 0x8000: 0) | (usb_pipecontrol(pipe)? 0: \ (usb_pipeout(pipe)? 0x800: 0x1000)) - | usb_pipeslow(pipe) << 13 + | (usb_dev->speed == USB_SPEED_LOW) << 13 | usb_maxpacket(usb_dev, pipe) << 16); if (ed->type == PIPE_INTERRUPT && ed->state == ED_UNLINK) { diff --git a/drivers/usb/host/ohci-s3c24xx.c b/drivers/usb/host/ohci-s3c24xx.c index 03cd4c3d2a2..dde07640349 100644 --- a/drivers/usb/host/ohci-s3c24xx.c +++ b/drivers/usb/host/ohci-s3c24xx.c @@ -620,7 +620,7 @@ static struct ed *ep_add_ed(struct usb_device *usb_dev, unsigned long pipe) | (usb_pipeisoc(pipe) ? 0x8000 : 0) | (usb_pipecontrol(pipe) ? 0 : (usb_pipeout(pipe) ? 0x800 : 0x1000)) - | usb_pipeslow(pipe) << 13 | + | (usb_dev->speed == USB_SPEED_LOW) << 13 | usb_maxpacket(usb_dev, pipe) << 16); return ed_ret; diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 2a4e7ff4b37..b503b356ce5 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -31,9 +31,6 @@ #endif static const char hcd_name[] = "r8a66597_hcd"; -static unsigned short clock = CONFIG_R8A66597_XTAL; -static unsigned short vif = CONFIG_R8A66597_LDRV; -static unsigned short endian = CONFIG_R8A66597_ENDIAN; static struct r8a66597 gr8a66597; static void get_hub_data(struct usb_device *dev, u16 *hub_devnum, u16 *hubport) @@ -96,7 +93,7 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597) } } while ((tmp & USBE) != USBE); r8a66597_bclr(r8a66597, USBE, SYSCFG0); - r8a66597_mdfy(r8a66597, clock, XTAL, SYSCFG0); + r8a66597_mdfy(r8a66597, CONFIG_R8A66597_XTAL, XTAL, SYSCFG0); i = 0; r8a66597_bset(r8a66597, XCKE, SYSCFG0); @@ -162,7 +159,7 @@ static int enable_controller(struct r8a66597 *r8a66597) if (ret < 0) return ret; - r8a66597_bset(r8a66597, vif & LDRV, PINCFG); + r8a66597_bset(r8a66597, CONFIG_R8A66597_LDRV & LDRV, PINCFG); r8a66597_bset(r8a66597, USBE, SYSCFG0); r8a66597_bset(r8a66597, INTL, SOFCFG); @@ -170,9 +167,9 @@ static int enable_controller(struct r8a66597 *r8a66597) r8a66597_write(r8a66597, 0, INTENB1); r8a66597_write(r8a66597, 0, INTENB2); - r8a66597_bset(r8a66597, endian & BIGEND, CFIFOSEL); - r8a66597_bset(r8a66597, endian & BIGEND, D0FIFOSEL); - r8a66597_bset(r8a66597, endian & BIGEND, D1FIFOSEL); + r8a66597_bset(r8a66597, CONFIG_R8A66597_ENDIAN & BIGEND, CFIFOSEL); + r8a66597_bset(r8a66597, CONFIG_R8A66597_ENDIAN & BIGEND, D0FIFOSEL); + r8a66597_bset(r8a66597, CONFIG_R8A66597_ENDIAN & BIGEND, D1FIFOSEL); r8a66597_bset(r8a66597, TRNENSEL, SOFCFG); for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) @@ -673,7 +670,6 @@ static int r8a66597_submit_rh_msg(struct usb_device *dev, unsigned long pipe, int stat = 0; __u16 bmRType_bReq; __u16 wValue; - __u16 wIndex; __u16 wLength; unsigned char data[32]; @@ -686,7 +682,6 @@ static int r8a66597_submit_rh_msg(struct usb_device *dev, unsigned long pipe, bmRType_bReq = cmd->requesttype | (cmd->request << 8); wValue = cpu_to_le16 (cmd->value); - wIndex = cpu_to_le16 (cmd->index); wLength = cpu_to_le16 (cmd->length); switch (bmRType_bReq) { @@ -908,7 +903,7 @@ int submit_int_msg(struct usb_device *dev, unsigned long pipe, void *buffer, return 0; } -int usb_lowlevel_init(int index, void **controller)) +int usb_lowlevel_init(int index, void **controller) { struct r8a66597 *r8a66597 = &gr8a66597; diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 28306160469..417f1a8aba1 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -234,7 +234,7 @@ static int sl811_send_packet(struct usb_device *dev, unsigned long pipe, __u8 *b __u16 status = 0; int err = 0, time_start = get_timer(0); int need_preamble = !(rh_status.wPortStatus & USB_PORT_STAT_LOW_SPEED) && - usb_pipeslow(pipe); + (dev->speed == USB_SPEED_LOW); if (len > 239) return -1; diff --git a/drivers/usb/musb-new/Makefile b/drivers/usb/musb-new/Makefile new file mode 100644 index 00000000000..c23bef1cc27 --- /dev/null +++ b/drivers/usb/musb-new/Makefile @@ -0,0 +1,39 @@ +# +# for USB OTG silicon based on Mentor Graphics INVENTRA designs +# + +include $(TOPDIR)/config.mk + +LIB := $(obj)libusb_musb-new.o + +COBJS-$(CONFIG_MUSB_GADGET) += musb_gadget.o musb_gadget_ep0.o musb_core.o +COBJS-$(CONFIG_MUSB_GADGET) += musb_uboot.o +COBJS-$(CONFIG_MUSB_HOST) += musb_host.o musb_core.o musb_uboot.o +COBJS-$(CONFIG_USB_MUSB_DSPS) += musb_dsps.o +COBJS-$(CONFIG_USB_MUSB_AM35X) += am35x.o +COBJS-$(CONFIG_USB_MUSB_OMAP2PLUS) += omap2430.o + +CFLAGS_NO_WARN := $(call cc-option,-Wno-unused-variable) \ + $(call cc-option,-Wno-unused-but-set-variable) \ + $(call cc-option,-Wno-unused-label) +CFLAGS += $(CFLAGS_NO_WARN) + +COBJS := $(sort $(COBJS-y)) +SRCS := $(COBJS:.o=.c) +OBJS := $(addprefix $(obj),$(COBJS)) + +all: $(LIB) + +#$(LIB): $(OBJS) +$(LIB): $(obj).depend $(OBJS) + $(call cmd_link_o_target, $(OBJS)) + +######################################################################### + +# defines $(obj).depend target +include $(SRCTREE)/rules.mk + +sinclude $(obj).depend + +######################################################################### + diff --git a/drivers/usb/musb-new/am35x.c b/drivers/usb/musb-new/am35x.c new file mode 100644 index 00000000000..57c9bd393b8 --- /dev/null +++ b/drivers/usb/musb-new/am35x.c @@ -0,0 +1,709 @@ +/* + * Texas Instruments AM35x "glue layer" + * + * Copyright (c) 2010, by Texas Instruments + * + * Based on the DA8xx "glue layer" code. + * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com> + * + * This file is part of the Inventra Controller Driver for Linux. + * + * The Inventra Controller Driver for Linux is free software; you + * can redistribute it and/or modify it under the terms of the GNU + * General Public License version 2 as published by the Free Software + * Foundation. + * + * The Inventra Controller Driver for Linux is distributed in + * the hope that it will be useful, but WITHOUT ANY WARRANTY; + * without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + * License for more details. + * + * You should have received a copy of the GNU General Public License + * along with The Inventra Controller Driver for Linux ; if not, + * write to the Free Software Foundation, Inc., 59 Temple Place, + * Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define __UBOOT__ +#ifndef __UBOOT__ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> + +#include <plat/usb.h> +#else +#include <common.h> +#include <asm/omap_musb.h> +#include "linux-compat.h" +#endif + +#include "musb_core.h" + +/* + * AM35x specific definitions + */ +/* USB 2.0 OTG module registers */ +#define USB_REVISION_REG 0x00 +#define USB_CTRL_REG 0x04 +#define USB_STAT_REG 0x08 +#define USB_EMULATION_REG 0x0c +/* 0x10 Reserved */ +#define USB_AUTOREQ_REG 0x14 +#define USB_SRP_FIX_TIME_REG 0x18 +#define USB_TEARDOWN_REG 0x1c +#define EP_INTR_SRC_REG 0x20 +#define EP_INTR_SRC_SET_REG 0x24 +#define EP_INTR_SRC_CLEAR_REG 0x28 +#define EP_INTR_MASK_REG 0x2c +#define EP_INTR_MASK_SET_REG 0x30 +#define EP_INTR_MASK_CLEAR_REG 0x34 +#define EP_INTR_SRC_MASKED_REG 0x38 +#define CORE_INTR_SRC_REG 0x40 +#define CORE_INTR_SRC_SET_REG 0x44 +#define CORE_INTR_SRC_CLEAR_REG 0x48 +#define CORE_INTR_MASK_REG 0x4c +#define CORE_INTR_MASK_SET_REG 0x50 +#define CORE_INTR_MASK_CLEAR_REG 0x54 +#define CORE_INTR_SRC_MASKED_REG 0x58 +/* 0x5c Reserved */ +#define USB_END_OF_INTR_REG 0x60 + +/* Control register bits */ +#define AM35X_SOFT_RESET_MASK 1 + +/* USB interrupt register bits */ +#define AM35X_INTR_USB_SHIFT 16 +#define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT) +#define AM35X_INTR_DRVVBUS 0x100 +#define AM35X_INTR_RX_SHIFT 16 +#define AM35X_INTR_TX_SHIFT 0 +#define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */ +#define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */ +#define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT) +#define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT) + +#define USB_MENTOR_CORE_OFFSET 0x400 + +struct am35x_glue { + struct device *dev; + struct platform_device *musb; + struct clk *phy_clk; + struct clk *clk; +}; +#define glue_to_musb(g) platform_get_drvdata(g->musb) + +/* + * am35x_musb_enable - enable interrupts + */ +static void am35x_musb_enable(struct musb *musb) +{ + void __iomem *reg_base = musb->ctrl_base; + u32 epmask; + + /* Workaround: setup IRQs through both register sets. */ + epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) | + ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT); + + musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask); + musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK); + + /* Force the DRVVBUS IRQ so we can start polling for ID change. */ + if (is_otg_enabled(musb)) + musb_writel(reg_base, CORE_INTR_SRC_SET_REG, + AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT); +} + +/* + * am35x_musb_disable - disable HDRC and flush interrupts + */ +static void am35x_musb_disable(struct musb *musb) +{ + void __iomem *reg_base = musb->ctrl_base; + + musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK); + musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG, + AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK); + musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + musb_writel(reg_base, USB_END_OF_INTR_REG, 0); +} + +#ifndef __UBOOT__ +#define portstate(stmt) stmt + +static void am35x_musb_set_vbus(struct musb *musb, int is_on) +{ + WARN_ON(is_on && is_peripheral_active(musb)); +} + +#define POLL_SECONDS 2 + +static struct timer_list otg_workaround; + +static void otg_timer(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + void __iomem *mregs = musb->mregs; + u8 devctl; + unsigned long flags; + + /* + * We poll because AM35x's won't expose several OTG-critical + * status change events (from the transceiver) otherwise. + */ + devctl = musb_readb(mregs, MUSB_DEVCTL); + dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, + otg_state_string(musb->xceiv->state)); + + spin_lock_irqsave(&musb->lock, flags); + switch (musb->xceiv->state) { + case OTG_STATE_A_WAIT_BCON: + devctl &= ~MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) { + musb->xceiv->state = OTG_STATE_B_IDLE; + MUSB_DEV_MODE(musb); + } else { + musb->xceiv->state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + } + break; + case OTG_STATE_A_WAIT_VFALL: + musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; + musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG, + MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT); + break; + case OTG_STATE_B_IDLE: + if (!is_peripheral_enabled(musb)) + break; + + devctl = musb_readb(mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) + mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); + else + musb->xceiv->state = OTG_STATE_A_IDLE; + break; + default: + break; + } + spin_unlock_irqrestore(&musb->lock, flags); +} + +static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout) +{ + static unsigned long last_timer; + + if (!is_otg_enabled(musb)) + return; + + if (timeout == 0) + timeout = jiffies + msecs_to_jiffies(3); + + /* Never idle if active, or when VBUS timeout is not set as host */ + if (musb->is_active || (musb->a_wait_bcon == 0 && + musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { + dev_dbg(musb->controller, "%s active, deleting timer\n", + otg_state_string(musb->xceiv->state)); + del_timer(&otg_workaround); + last_timer = jiffies; + return; + } + + if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) { + dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); + return; + } + last_timer = timeout; + + dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", + otg_state_string(musb->xceiv->state), + jiffies_to_msecs(timeout - jiffies)); + mod_timer(&otg_workaround, timeout); +} +#endif + +static irqreturn_t am35x_musb_interrupt(int irq, void *hci) +{ + struct musb *musb = hci; + void __iomem *reg_base = musb->ctrl_base; +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; + struct usb_otg *otg = musb->xceiv->otg; +#else + struct omap_musb_board_data *data = + (struct omap_musb_board_data *)musb->controller; +#endif + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + u32 epintr, usbintr; + +#ifdef __UBOOT__ + /* + * It seems that on AM35X interrupt registers can be updated + * before core registers. This confuses the code. + * As a workaround add a small delay here. + */ + udelay(10); +#endif + spin_lock_irqsave(&musb->lock, flags); + + /* Get endpoint interrupts */ + epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG); + + if (epintr) { + musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr); + + musb->int_rx = + (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT; + musb->int_tx = + (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT; + } + + /* Get usb core interrupts */ + usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG); + if (!usbintr && !epintr) + goto eoi; + + if (usbintr) { + musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr); + + musb->int_usb = + (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT; + } +#ifndef __UBOOT__ + /* + * DRVVBUS IRQs are the only proxy we have (a very poor one!) for + * AM35x's missing ID change IRQ. We need an ID change IRQ to + * switch appropriately between halves of the OTG state machine. + * Managing DEVCTL.SESSION per Mentor docs requires that we know its + * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. + * Also, DRVVBUS pulses for SRP (but not at 5V) ... + */ + if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) { + int drvvbus = musb_readl(reg_base, USB_STAT_REG); + void __iomem *mregs = musb->mregs; + u8 devctl = musb_readb(mregs, MUSB_DEVCTL); + int err; + + err = is_host_enabled(musb) && (musb->int_usb & + MUSB_INTR_VBUSERROR); + if (err) { + /* + * The Mentor core doesn't debounce VBUS as needed + * to cope with device connect current spikes. This + * means it's not uncommon for bus-powered devices + * to get VBUS errors during enumeration. + * + * This is a workaround, but newer RTL from Mentor + * seems to allow a better one: "re"-starting sessions + * without waiting for VBUS to stop registering in + * devctl. + */ + musb->int_usb &= ~MUSB_INTR_VBUSERROR; + musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; + mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); + WARNING("VBUS error workaround (delay coming)\n"); + } else if (is_host_enabled(musb) && drvvbus) { + MUSB_HST_MODE(musb); + otg->default_a = 1; + musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; + portstate(musb->port1_status |= USB_PORT_STAT_POWER); + del_timer(&otg_workaround); + } else { + musb->is_active = 0; + MUSB_DEV_MODE(musb); + otg->default_a = 0; + musb->xceiv->state = OTG_STATE_B_IDLE; + portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); + } + + /* NOTE: this must complete power-on within 100 ms. */ + dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", + drvvbus ? "on" : "off", + otg_state_string(musb->xceiv->state), + err ? " ERROR" : "", + devctl); + ret = IRQ_HANDLED; + } +#endif + + if (musb->int_tx || musb->int_rx || musb->int_usb) + ret |= musb_interrupt(musb); + +eoi: + /* EOI needs to be written for the IRQ to be re-asserted. */ + if (ret == IRQ_HANDLED || epintr || usbintr) { + /* clear level interrupt */ + if (data->clear_irq) + data->clear_irq(); + /* write EOI */ + musb_writel(reg_base, USB_END_OF_INTR_REG, 0); + } + +#ifndef __UBOOT__ + /* Poll for ID change */ + if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) + mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); +#endif + + spin_unlock_irqrestore(&musb->lock, flags); + + return ret; +} + +#ifndef __UBOOT__ +static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode) +{ + struct device *dev = musb->controller; + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; + int retval = 0; + + if (data->set_mode) + data->set_mode(musb_mode); + else + retval = -EIO; + + return retval; +} +#endif + +static int am35x_musb_init(struct musb *musb) +{ +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; +#else + struct omap_musb_board_data *data = + (struct omap_musb_board_data *)musb->controller; +#endif + void __iomem *reg_base = musb->ctrl_base; + u32 rev; + + musb->mregs += USB_MENTOR_CORE_OFFSET; + + /* Returns zero if e.g. not clocked */ + rev = musb_readl(reg_base, USB_REVISION_REG); + if (!rev) + return -ENODEV; + +#ifndef __UBOOT__ + usb_nop_xceiv_register(); + musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); + if (IS_ERR_OR_NULL(musb->xceiv)) + return -ENODEV; + + if (is_host_enabled(musb)) + setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); +#endif + + /* Reset the musb */ + if (data->reset) + data->reset(); + + /* Reset the controller */ + musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK); + + /* Start the on-chip PHY and its PLL. */ + if (data->set_phy_power) + data->set_phy_power(1); + + msleep(5); + + musb->isr = am35x_musb_interrupt; + + /* clear level interrupt */ + if (data->clear_irq) + data->clear_irq(); + + return 0; +} + +static int am35x_musb_exit(struct musb *musb) +{ +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; +#else + struct omap_musb_board_data *data = + (struct omap_musb_board_data *)musb->controller; +#endif + +#ifndef __UBOOT__ + if (is_host_enabled(musb)) + del_timer_sync(&otg_workaround); +#endif + + /* Shutdown the on-chip PHY and its PLL. */ + if (data->set_phy_power) + data->set_phy_power(0); + +#ifndef __UBOOT__ + usb_put_phy(musb->xceiv); + usb_nop_xceiv_unregister(); +#endif + + return 0; +} + +/* AM35x supports only 32bit read operation */ +void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) +{ + void __iomem *fifo = hw_ep->fifo; + u32 val; + int i; + + /* Read for 32bit-aligned destination address */ + if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) { + readsl(fifo, dst, len >> 2); + dst += len & ~0x03; + len &= 0x03; + } + /* + * Now read the remaining 1 to 3 byte or complete length if + * unaligned address. + */ + if (len > 4) { + for (i = 0; i < (len >> 2); i++) { + *(u32 *) dst = musb_readl(fifo, 0); + dst += 4; + } + len &= 0x03; + } + if (len > 0) { + val = musb_readl(fifo, 0); + memcpy(dst, &val, len); + } +} + +#ifndef __UBOOT__ +static const struct musb_platform_ops am35x_ops = { +#else +const struct musb_platform_ops am35x_ops = { +#endif + .init = am35x_musb_init, + .exit = am35x_musb_exit, + + .enable = am35x_musb_enable, + .disable = am35x_musb_disable, + +#ifndef __UBOOT__ + .set_mode = am35x_musb_set_mode, + .try_idle = am35x_musb_try_idle, + + .set_vbus = am35x_musb_set_vbus, +#endif +}; + +#ifndef __UBOOT__ +static u64 am35x_dmamask = DMA_BIT_MASK(32); + +static int __devinit am35x_probe(struct platform_device *pdev) +{ + struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; + struct platform_device *musb; + struct am35x_glue *glue; + + struct clk *phy_clk; + struct clk *clk; + + int ret = -ENOMEM; + + glue = kzalloc(sizeof(*glue), GFP_KERNEL); + if (!glue) { + dev_err(&pdev->dev, "failed to allocate glue context\n"); + goto err0; + } + + musb = platform_device_alloc("musb-hdrc", -1); + if (!musb) { + dev_err(&pdev->dev, "failed to allocate musb device\n"); + goto err1; + } + + phy_clk = clk_get(&pdev->dev, "fck"); + if (IS_ERR(phy_clk)) { + dev_err(&pdev->dev, "failed to get PHY clock\n"); + ret = PTR_ERR(phy_clk); + goto err2; + } + + clk = clk_get(&pdev->dev, "ick"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + ret = PTR_ERR(clk); + goto err3; + } + + ret = clk_enable(phy_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable PHY clock\n"); + goto err4; + } + + ret = clk_enable(clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock\n"); + goto err5; + } + + musb->dev.parent = &pdev->dev; + musb->dev.dma_mask = &am35x_dmamask; + musb->dev.coherent_dma_mask = am35x_dmamask; + + glue->dev = &pdev->dev; + glue->musb = musb; + glue->phy_clk = phy_clk; + glue->clk = clk; + + pdata->platform_ops = &am35x_ops; + + platform_set_drvdata(pdev, glue); + + ret = platform_device_add_resources(musb, pdev->resource, + pdev->num_resources); + if (ret) { + dev_err(&pdev->dev, "failed to add resources\n"); + goto err6; + } + + ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); + if (ret) { + dev_err(&pdev->dev, "failed to add platform_data\n"); + goto err6; + } + + ret = platform_device_add(musb); + if (ret) { + dev_err(&pdev->dev, "failed to register musb device\n"); + goto err6; + } + + return 0; + +err6: + clk_disable(clk); + +err5: + clk_disable(phy_clk); + +err4: + clk_put(clk); + +err3: + clk_put(phy_clk); + +err2: + platform_device_put(musb); + +err1: + kfree(glue); + +err0: + return ret; +} + +static int __devexit am35x_remove(struct platform_device *pdev) +{ + struct am35x_glue *glue = platform_get_drvdata(pdev); + + platform_device_del(glue->musb); + platform_device_put(glue->musb); + clk_disable(glue->clk); + clk_disable(glue->phy_clk); + clk_put(glue->clk); + clk_put(glue->phy_clk); + kfree(glue); + + return 0; +} + +#ifdef CONFIG_PM +static int am35x_suspend(struct device *dev) +{ + struct am35x_glue *glue = dev_get_drvdata(dev); + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; + + /* Shutdown the on-chip PHY and its PLL. */ + if (data->set_phy_power) + data->set_phy_power(0); + + clk_disable(glue->phy_clk); + clk_disable(glue->clk); + + return 0; +} + +static int am35x_resume(struct device *dev) +{ + struct am35x_glue *glue = dev_get_drvdata(dev); + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; + int ret; + + /* Start the on-chip PHY and its PLL. */ + if (data->set_phy_power) + data->set_phy_power(1); + + ret = clk_enable(glue->phy_clk); + if (ret) { + dev_err(dev, "failed to enable PHY clock\n"); + return ret; + } + + ret = clk_enable(glue->clk); + if (ret) { + dev_err(dev, "failed to enable clock\n"); + return ret; + } + + return 0; +} + +static struct dev_pm_ops am35x_pm_ops = { + .suspend = am35x_suspend, + .resume = am35x_resume, +}; + +#define DEV_PM_OPS &am35x_pm_ops +#else +#define DEV_PM_OPS NULL +#endif + +static struct platform_driver am35x_driver = { + .probe = am35x_probe, + .remove = __devexit_p(am35x_remove), + .driver = { + .name = "musb-am35x", + .pm = DEV_PM_OPS, + }, +}; + +MODULE_DESCRIPTION("AM35x MUSB Glue Layer"); +MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>"); +MODULE_LICENSE("GPL v2"); + +static int __init am35x_init(void) +{ + return platform_driver_register(&am35x_driver); +} +module_init(am35x_init); + +static void __exit am35x_exit(void) +{ + platform_driver_unregister(&am35x_driver); +} +module_exit(am35x_exit); +#endif diff --git a/drivers/usb/musb-new/linux-compat.h b/drivers/usb/musb-new/linux-compat.h new file mode 100644 index 00000000000..5c126ef9cfd --- /dev/null +++ b/drivers/usb/musb-new/linux-compat.h @@ -0,0 +1,116 @@ +#ifndef __LINUX_COMPAT_H__ +#define __LINUX_COMPAT_H__ + +#include <malloc.h> +#include <linux/list.h> +#include <linux/compat.h> + +#define __init +#define __devinit +#define __devinitdata +#define __devinitconst +#define __iomem +#define __deprecated + +typedef enum { false = 0, true = 1 } bool; + +struct unused {}; +typedef struct unused unused_t; + +typedef int irqreturn_t; +typedef unused_t spinlock_t; + +struct work_struct {}; + +struct timer_list {}; +struct notifier_block {}; + +typedef unsigned long dmaaddr_t; + +#define spin_lock_init(lock) do {} while (0) +#define spin_lock(lock) do {} while (0) +#define spin_unlock(lock) do {} while (0) +#define spin_lock_irqsave(lock, flags) do {} while (0) +#define spin_unlock_irqrestore(lock, flags) do {} while (0) + +#define setup_timer(timer, func, data) do {} while (0) +#define del_timer_sync(timer) do {} while (0) +#define schedule_work(work) do {} while (0) +#define INIT_WORK(work, fun) do {} while (0) + +#define cpu_relax() do {} while (0) + +#define pr_debug(fmt, args...) debug(fmt, ##args) +#define dev_dbg(dev, fmt, args...) \ + debug(fmt, ##args) +#define dev_vdbg(dev, fmt, args...) \ + debug(fmt, ##args) +#define dev_info(dev, fmt, args...) \ + printf(fmt, ##args) +#define dev_err(dev, fmt, args...) \ + printf(fmt, ##args) +#define printk printf + +#define WARN(condition, fmt, args...) ({ \ + int ret_warn = !!condition; \ + if (ret_warn) \ + printf(fmt, ##args); \ + ret_warn; }) + +#define KERN_DEBUG +#define KERN_NOTICE +#define KERN_WARNING +#define KERN_ERR + +#define kfree(ptr) free(ptr) + +#define pm_runtime_get_sync(dev) do {} while (0) +#define pm_runtime_put(dev) do {} while (0) +#define pm_runtime_put_sync(dev) do {} while (0) +#define pm_runtime_use_autosuspend(dev) do {} while (0) +#define pm_runtime_set_autosuspend_delay(dev, delay) do {} while (0) +#define pm_runtime_enable(dev) do {} while (0) + +#define MODULE_DESCRIPTION(desc) +#define MODULE_AUTHOR(author) +#define MODULE_LICENSE(license) +#define MODULE_ALIAS(alias) +#define module_param(name, type, perm) +#define MODULE_PARM_DESC(name, desc) +#define EXPORT_SYMBOL_GPL(name) + +#define writesl(a, d, s) __raw_writesl((unsigned long)a, d, s) +#define readsl(a, d, s) __raw_readsl((unsigned long)a, d, s) +#define writesw(a, d, s) __raw_writesw((unsigned long)a, d, s) +#define readsw(a, d, s) __raw_readsw((unsigned long)a, d, s) +#define writesb(a, d, s) __raw_writesb((unsigned long)a, d, s) +#define readsb(a, d, s) __raw_readsb((unsigned long)a, d, s) + +#define IRQ_NONE 0 +#define IRQ_HANDLED 0 + +#define dev_set_drvdata(dev, data) do {} while (0) + +#define disable_irq_wake(irq) do {} while (0) +#define enable_irq_wake(irq) -EINVAL +#define free_irq(irq, data) do {} while (0) +#define request_irq(nr, f, flags, nm, data) 0 + +#define device_init_wakeup(dev, a) do {} while (0) + +#define platform_data device_data + +#ifndef wmb +#define wmb() asm volatile ("" : : : "memory") +#endif + +#define msleep(a) udelay(a * 1000) + +/* + * Map U-Boot config options to Linux ones + */ +#ifdef CONFIG_OMAP34XX +#define CONFIG_SOC_OMAP3430 +#endif + +#endif /* __LINUX_COMPAT_H__ */ diff --git a/drivers/usb/musb-new/musb_core.c b/drivers/usb/musb-new/musb_core.c new file mode 100644 index 00000000000..040a5c0f0f7 --- /dev/null +++ b/drivers/usb/musb-new/musb_core.c @@ -0,0 +1,2497 @@ +/* + * MUSB OTG driver core code + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * Inventra (Multipoint) Dual-Role Controller Driver for Linux. + * + * This consists of a Host Controller Driver (HCD) and a peripheral + * controller driver implementing the "Gadget" API; OTG support is + * in the works. These are normal Linux-USB controller drivers which + * use IRQs and have no dedicated thread. + * + * This version of the driver has only been used with products from + * Texas Instruments. Those products integrate the Inventra logic + * with other DMA, IRQ, and bus modules, as well as other logic that + * needs to be reflected in this driver. + * + * + * NOTE: the original Mentor code here was pretty much a collection + * of mechanisms that don't seem to have been fully integrated/working + * for *any* Linux kernel version. This version aims at Linux 2.6.now, + * Key open issues include: + * + * - Lack of host-side transaction scheduling, for all transfer types. + * The hardware doesn't do it; instead, software must. + * + * This is not an issue for OTG devices that don't support external + * hubs, but for more "normal" USB hosts it's a user issue that the + * "multipoint" support doesn't scale in the expected ways. That + * includes DaVinci EVM in a common non-OTG mode. + * + * * Control and bulk use dedicated endpoints, and there's as + * yet no mechanism to either (a) reclaim the hardware when + * peripherals are NAKing, which gets complicated with bulk + * endpoints, or (b) use more than a single bulk endpoint in + * each direction. + * + * RESULT: one device may be perceived as blocking another one. + * + * * Interrupt and isochronous will dynamically allocate endpoint + * hardware, but (a) there's no record keeping for bandwidth; + * (b) in the common case that few endpoints are available, there + * is no mechanism to reuse endpoints to talk to multiple devices. + * + * RESULT: At one extreme, bandwidth can be overcommitted in + * some hardware configurations, no faults will be reported. + * At the other extreme, the bandwidth capabilities which do + * exist tend to be severely undercommitted. You can't yet hook + * up both a keyboard and a mouse to an external USB hub. + */ + +/* + * This gets many kinds of configuration information: + * - Kconfig for everything user-configurable + * - platform_device for addressing, irq, and platform_data + * - platform_data is mostly for board-specific informarion + * (plus recentrly, SOC or family details) + * + * Most of the conditional compilation will (someday) vanish. + */ + +#define __UBOOT__ +#ifndef __UBOOT__ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/kobject.h> +#include <linux/prefetch.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#else +#include <common.h> +#include <usb.h> +#include <asm/errno.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/usb/musb.h> +#include <asm/io.h> +#include "linux-compat.h" +#include "usb-compat.h" +#endif + +#include "musb_core.h" + +#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON) + + +#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" +#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver" + +#define MUSB_VERSION "6.0" + +#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION + +#define MUSB_DRIVER_NAME "musb-hdrc" +const char musb_driver_name[] = MUSB_DRIVER_NAME; + +MODULE_DESCRIPTION(DRIVER_INFO); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); + + +#ifndef __UBOOT__ +/*-------------------------------------------------------------------------*/ + +static inline struct musb *dev_to_musb(struct device *dev) +{ + return dev_get_drvdata(dev); +} +#endif + +/*-------------------------------------------------------------------------*/ + +#ifndef __UBOOT__ +#ifndef CONFIG_BLACKFIN +static int musb_ulpi_read(struct usb_phy *phy, u32 offset) +{ + void __iomem *addr = phy->io_priv; + int i = 0; + u8 r; + u8 power; + int ret; + + pm_runtime_get_sync(phy->io_dev); + + /* Make sure the transceiver is not in low power mode */ + power = musb_readb(addr, MUSB_POWER); + power &= ~MUSB_POWER_SUSPENDM; + musb_writeb(addr, MUSB_POWER, power); + + /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the + * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. + */ + + musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); + musb_writeb(addr, MUSB_ULPI_REG_CONTROL, + MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); + + while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) + & MUSB_ULPI_REG_CMPLT)) { + i++; + if (i == 10000) { + ret = -ETIMEDOUT; + goto out; + } + + } + r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); + r &= ~MUSB_ULPI_REG_CMPLT; + musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); + + ret = musb_readb(addr, MUSB_ULPI_REG_DATA); + +out: + pm_runtime_put(phy->io_dev); + + return ret; +} + +static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) +{ + void __iomem *addr = phy->io_priv; + int i = 0; + u8 r = 0; + u8 power; + int ret = 0; + + pm_runtime_get_sync(phy->io_dev); + + /* Make sure the transceiver is not in low power mode */ + power = musb_readb(addr, MUSB_POWER); + power &= ~MUSB_POWER_SUSPENDM; + musb_writeb(addr, MUSB_POWER, power); + + musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); + musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); + musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); + + while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) + & MUSB_ULPI_REG_CMPLT)) { + i++; + if (i == 10000) { + ret = -ETIMEDOUT; + goto out; + } + } + + r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); + r &= ~MUSB_ULPI_REG_CMPLT; + musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); + +out: + pm_runtime_put(phy->io_dev); + + return ret; +} +#else +#define musb_ulpi_read NULL +#define musb_ulpi_write NULL +#endif + +static struct usb_phy_io_ops musb_ulpi_access = { + .read = musb_ulpi_read, + .write = musb_ulpi_write, +}; +#endif + +/*-------------------------------------------------------------------------*/ + +#if !defined(CONFIG_USB_MUSB_TUSB6010) && !defined(CONFIG_USB_MUSB_BLACKFIN) + +/* + * Load an endpoint's FIFO + */ +void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) +{ + struct musb *musb = hw_ep->musb; + void __iomem *fifo = hw_ep->fifo; + + prefetch((u8 *)src); + + dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", + 'T', hw_ep->epnum, fifo, len, src); + + /* we can't assume unaligned reads work */ + if (likely((0x01 & (unsigned long) src) == 0)) { + u16 index = 0; + + /* best case is 32bit-aligned source address */ + if ((0x02 & (unsigned long) src) == 0) { + if (len >= 4) { + writesl(fifo, src + index, len >> 2); + index += len & ~0x03; + } + if (len & 0x02) { + musb_writew(fifo, 0, *(u16 *)&src[index]); + index += 2; + } + } else { + if (len >= 2) { + writesw(fifo, src + index, len >> 1); + index += len & ~0x01; + } + } + if (len & 0x01) + musb_writeb(fifo, 0, src[index]); + } else { + /* byte aligned */ + writesb(fifo, src, len); + } +} + +#if !defined(CONFIG_USB_MUSB_AM35X) +/* + * Unload an endpoint's FIFO + */ +void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) +{ + struct musb *musb = hw_ep->musb; + void __iomem *fifo = hw_ep->fifo; + + dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", + 'R', hw_ep->epnum, fifo, len, dst); + + /* we can't assume unaligned writes work */ + if (likely((0x01 & (unsigned long) dst) == 0)) { + u16 index = 0; + + /* best case is 32bit-aligned destination address */ + if ((0x02 & (unsigned long) dst) == 0) { + if (len >= 4) { + readsl(fifo, dst, len >> 2); + index = len & ~0x03; + } + if (len & 0x02) { + *(u16 *)&dst[index] = musb_readw(fifo, 0); + index += 2; + } + } else { + if (len >= 2) { + readsw(fifo, dst, len >> 1); + index = len & ~0x01; + } + } + if (len & 0x01) + dst[index] = musb_readb(fifo, 0); + } else { + /* byte aligned */ + readsb(fifo, dst, len); + } +} +#endif + +#endif /* normal PIO */ + + +/*-------------------------------------------------------------------------*/ + +/* for high speed test mode; see USB 2.0 spec 7.1.20 */ +static const u8 musb_test_packet[53] = { + /* implicit SYNC then DATA0 to start */ + + /* JKJKJKJK x9 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* JJKKJJKK x8 */ + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + /* JJJJKKKK x8 */ + 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, + /* JJJJJJJKKKKKKK x8 */ + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + /* JJJJJJJK x8 */ + 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, + /* JKKKKKKK x10, JK */ + 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e + + /* implicit CRC16 then EOP to end */ +}; + +void musb_load_testpacket(struct musb *musb) +{ + void __iomem *regs = musb->endpoints[0].regs; + + musb_ep_select(musb->mregs, 0); + musb_write_fifo(musb->control_ep, + sizeof(musb_test_packet), musb_test_packet); + musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY); +} + +#ifndef __UBOOT__ +/*-------------------------------------------------------------------------*/ + +/* + * Handles OTG hnp timeouts, such as b_ase0_brst + */ +void musb_otg_timer_func(unsigned long data) +{ + struct musb *musb = (struct musb *)data; + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + switch (musb->xceiv->state) { + case OTG_STATE_B_WAIT_ACON: + dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n"); + musb_g_disconnect(musb); + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + musb->is_active = 0; + break; + case OTG_STATE_A_SUSPEND: + case OTG_STATE_A_WAIT_BCON: + dev_dbg(musb->controller, "HNP: %s timeout\n", + otg_state_string(musb->xceiv->state)); + musb_platform_set_vbus(musb, 0); + musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; + break; + default: + dev_dbg(musb->controller, "HNP: Unhandled mode %s\n", + otg_state_string(musb->xceiv->state)); + } + musb->ignore_disconnect = 0; + spin_unlock_irqrestore(&musb->lock, flags); +} + +/* + * Stops the HNP transition. Caller must take care of locking. + */ +void musb_hnp_stop(struct musb *musb) +{ + struct usb_hcd *hcd = musb_to_hcd(musb); + void __iomem *mbase = musb->mregs; + u8 reg; + + dev_dbg(musb->controller, "HNP: stop from %s\n", otg_state_string(musb->xceiv->state)); + + switch (musb->xceiv->state) { + case OTG_STATE_A_PERIPHERAL: + musb_g_disconnect(musb); + dev_dbg(musb->controller, "HNP: back to %s\n", + otg_state_string(musb->xceiv->state)); + break; + case OTG_STATE_B_HOST: + dev_dbg(musb->controller, "HNP: Disabling HR\n"); + hcd->self.is_b_host = 0; + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + MUSB_DEV_MODE(musb); + reg = musb_readb(mbase, MUSB_POWER); + reg |= MUSB_POWER_SUSPENDM; + musb_writeb(mbase, MUSB_POWER, reg); + /* REVISIT: Start SESSION_REQUEST here? */ + break; + default: + dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n", + otg_state_string(musb->xceiv->state)); + } + + /* + * When returning to A state after HNP, avoid hub_port_rebounce(), + * which cause occasional OPT A "Did not receive reset after connect" + * errors. + */ + musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16); +} +#endif + +/* + * Interrupt Service Routine to record USB "global" interrupts. + * Since these do not happen often and signify things of + * paramount importance, it seems OK to check them individually; + * the order of the tests is specified in the manual + * + * @param musb instance pointer + * @param int_usb register contents + * @param devctl + * @param power + */ + +static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, + u8 devctl, u8 power) +{ +#ifndef __UBOOT__ + struct usb_otg *otg = musb->xceiv->otg; +#endif + irqreturn_t handled = IRQ_NONE; + + dev_dbg(musb->controller, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, + int_usb); + +#ifndef __UBOOT__ + /* in host mode, the peripheral may issue remote wakeup. + * in peripheral mode, the host may resume the link. + * spurious RESUME irqs happen too, paired with SUSPEND. + */ + if (int_usb & MUSB_INTR_RESUME) { + handled = IRQ_HANDLED; + dev_dbg(musb->controller, "RESUME (%s)\n", otg_state_string(musb->xceiv->state)); + + if (devctl & MUSB_DEVCTL_HM) { + void __iomem *mbase = musb->mregs; + + switch (musb->xceiv->state) { + case OTG_STATE_A_SUSPEND: + /* remote wakeup? later, GetPortStatus + * will stop RESUME signaling + */ + + if (power & MUSB_POWER_SUSPENDM) { + /* spurious */ + musb->int_usb &= ~MUSB_INTR_SUSPEND; + dev_dbg(musb->controller, "Spurious SUSPENDM\n"); + break; + } + + power &= ~MUSB_POWER_SUSPENDM; + musb_writeb(mbase, MUSB_POWER, + power | MUSB_POWER_RESUME); + + musb->port1_status |= + (USB_PORT_STAT_C_SUSPEND << 16) + | MUSB_PORT_STAT_RESUME; + musb->rh_timer = jiffies + + msecs_to_jiffies(20); + + musb->xceiv->state = OTG_STATE_A_HOST; + musb->is_active = 1; + usb_hcd_resume_root_hub(musb_to_hcd(musb)); + break; + case OTG_STATE_B_WAIT_ACON: + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + musb->is_active = 1; + MUSB_DEV_MODE(musb); + break; + default: + WARNING("bogus %s RESUME (%s)\n", + "host", + otg_state_string(musb->xceiv->state)); + } + } else { + switch (musb->xceiv->state) { + case OTG_STATE_A_SUSPEND: + /* possibly DISCONNECT is upcoming */ + musb->xceiv->state = OTG_STATE_A_HOST; + usb_hcd_resume_root_hub(musb_to_hcd(musb)); + break; + case OTG_STATE_B_WAIT_ACON: + case OTG_STATE_B_PERIPHERAL: + /* disconnect while suspended? we may + * not get a disconnect irq... + */ + if ((devctl & MUSB_DEVCTL_VBUS) + != (3 << MUSB_DEVCTL_VBUS_SHIFT) + ) { + musb->int_usb |= MUSB_INTR_DISCONNECT; + musb->int_usb &= ~MUSB_INTR_SUSPEND; + break; + } + musb_g_resume(musb); + break; + case OTG_STATE_B_IDLE: + musb->int_usb &= ~MUSB_INTR_SUSPEND; + break; + default: + WARNING("bogus %s RESUME (%s)\n", + "peripheral", + otg_state_string(musb->xceiv->state)); + } + } + } + + /* see manual for the order of the tests */ + if (int_usb & MUSB_INTR_SESSREQ) { + void __iomem *mbase = musb->mregs; + + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS + && (devctl & MUSB_DEVCTL_BDEVICE)) { + dev_dbg(musb->controller, "SessReq while on B state\n"); + return IRQ_HANDLED; + } + + dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n", + otg_state_string(musb->xceiv->state)); + + /* IRQ arrives from ID pin sense or (later, if VBUS power + * is removed) SRP. responses are time critical: + * - turn on VBUS (with silicon-specific mechanism) + * - go through A_WAIT_VRISE + * - ... to A_WAIT_BCON. + * a_wait_vrise_tmout triggers VBUS_ERROR transitions + */ + musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); + musb->ep0_stage = MUSB_EP0_START; + musb->xceiv->state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + musb_platform_set_vbus(musb, 1); + + handled = IRQ_HANDLED; + } + + if (int_usb & MUSB_INTR_VBUSERROR) { + int ignore = 0; + + /* During connection as an A-Device, we may see a short + * current spikes causing voltage drop, because of cable + * and peripheral capacitance combined with vbus draw. + * (So: less common with truly self-powered devices, where + * vbus doesn't act like a power supply.) + * + * Such spikes are short; usually less than ~500 usec, max + * of ~2 msec. That is, they're not sustained overcurrent + * errors, though they're reported using VBUSERROR irqs. + * + * Workarounds: (a) hardware: use self powered devices. + * (b) software: ignore non-repeated VBUS errors. + * + * REVISIT: do delays from lots of DEBUG_KERNEL checks + * make trouble here, keeping VBUS < 4.4V ? + */ + switch (musb->xceiv->state) { + case OTG_STATE_A_HOST: + /* recovery is dicey once we've gotten past the + * initial stages of enumeration, but if VBUS + * stayed ok at the other end of the link, and + * another reset is due (at least for high speed, + * to redo the chirp etc), it might work OK... + */ + case OTG_STATE_A_WAIT_BCON: + case OTG_STATE_A_WAIT_VRISE: + if (musb->vbuserr_retry) { + void __iomem *mbase = musb->mregs; + + musb->vbuserr_retry--; + ignore = 1; + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(mbase, MUSB_DEVCTL, devctl); + } else { + musb->port1_status |= + USB_PORT_STAT_OVERCURRENT + | (USB_PORT_STAT_C_OVERCURRENT << 16); + } + break; + default: + break; + } + + dev_dbg(musb->controller, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n", + otg_state_string(musb->xceiv->state), + devctl, + ({ char *s; + switch (devctl & MUSB_DEVCTL_VBUS) { + case 0 << MUSB_DEVCTL_VBUS_SHIFT: + s = "<SessEnd"; break; + case 1 << MUSB_DEVCTL_VBUS_SHIFT: + s = "<AValid"; break; + case 2 << MUSB_DEVCTL_VBUS_SHIFT: + s = "<VBusValid"; break; + /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */ + default: + s = "VALID"; break; + }; s; }), + VBUSERR_RETRY_COUNT - musb->vbuserr_retry, + musb->port1_status); + + /* go through A_WAIT_VFALL then start a new session */ + if (!ignore) + musb_platform_set_vbus(musb, 0); + handled = IRQ_HANDLED; + } + + if (int_usb & MUSB_INTR_SUSPEND) { + dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x power %02x\n", + otg_state_string(musb->xceiv->state), devctl, power); + handled = IRQ_HANDLED; + + switch (musb->xceiv->state) { + case OTG_STATE_A_PERIPHERAL: + /* We also come here if the cable is removed, since + * this silicon doesn't report ID-no-longer-grounded. + * + * We depend on T(a_wait_bcon) to shut us down, and + * hope users don't do anything dicey during this + * undesired detour through A_WAIT_BCON. + */ + musb_hnp_stop(musb); + usb_hcd_resume_root_hub(musb_to_hcd(musb)); + musb_root_disconnect(musb); + musb_platform_try_idle(musb, jiffies + + msecs_to_jiffies(musb->a_wait_bcon + ? : OTG_TIME_A_WAIT_BCON)); + + break; + case OTG_STATE_B_IDLE: + if (!musb->is_active) + break; + case OTG_STATE_B_PERIPHERAL: + musb_g_suspend(musb); + musb->is_active = is_otg_enabled(musb) + && otg->gadget->b_hnp_enable; + if (musb->is_active) { + musb->xceiv->state = OTG_STATE_B_WAIT_ACON; + dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n"); + mod_timer(&musb->otg_timer, jiffies + + msecs_to_jiffies( + OTG_TIME_B_ASE0_BRST)); + } + break; + case OTG_STATE_A_WAIT_BCON: + if (musb->a_wait_bcon != 0) + musb_platform_try_idle(musb, jiffies + + msecs_to_jiffies(musb->a_wait_bcon)); + break; + case OTG_STATE_A_HOST: + musb->xceiv->state = OTG_STATE_A_SUSPEND; + musb->is_active = is_otg_enabled(musb) + && otg->host->b_hnp_enable; + break; + case OTG_STATE_B_HOST: + /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ + dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n"); + break; + default: + /* "should not happen" */ + musb->is_active = 0; + break; + } + } +#endif + + if (int_usb & MUSB_INTR_CONNECT) { + struct usb_hcd *hcd = musb_to_hcd(musb); + + handled = IRQ_HANDLED; + musb->is_active = 1; + + musb->ep0_stage = MUSB_EP0_START; + + /* flush endpoints when transitioning from Device Mode */ + if (is_peripheral_active(musb)) { + /* REVISIT HNP; just force disconnect */ + } + musb_writew(musb->mregs, MUSB_INTRTXE, musb->epmask); + musb_writew(musb->mregs, MUSB_INTRRXE, musb->epmask & 0xfffe); + musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7); +#ifndef __UBOOT__ + musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED + |USB_PORT_STAT_HIGH_SPEED + |USB_PORT_STAT_ENABLE + ); + musb->port1_status |= USB_PORT_STAT_CONNECTION + |(USB_PORT_STAT_C_CONNECTION << 16); + + /* high vs full speed is just a guess until after reset */ + if (devctl & MUSB_DEVCTL_LSDEV) + musb->port1_status |= USB_PORT_STAT_LOW_SPEED; + + /* indicate new connection to OTG machine */ + switch (musb->xceiv->state) { + case OTG_STATE_B_PERIPHERAL: + if (int_usb & MUSB_INTR_SUSPEND) { + dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n"); + int_usb &= ~MUSB_INTR_SUSPEND; + goto b_host; + } else + dev_dbg(musb->controller, "CONNECT as b_peripheral???\n"); + break; + case OTG_STATE_B_WAIT_ACON: + dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n"); +b_host: + musb->xceiv->state = OTG_STATE_B_HOST; + hcd->self.is_b_host = 1; + musb->ignore_disconnect = 0; + del_timer(&musb->otg_timer); + break; + default: + if ((devctl & MUSB_DEVCTL_VBUS) + == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { + musb->xceiv->state = OTG_STATE_A_HOST; + hcd->self.is_b_host = 0; + } + break; + } + + /* poke the root hub */ + MUSB_HST_MODE(musb); + if (hcd->status_urb) + usb_hcd_poll_rh_status(hcd); + else + usb_hcd_resume_root_hub(hcd); + + dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n", + otg_state_string(musb->xceiv->state), devctl); +#endif + } + +#ifndef __UBOOT__ + if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { + dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n", + otg_state_string(musb->xceiv->state), + MUSB_MODE(musb), devctl); + handled = IRQ_HANDLED; + + switch (musb->xceiv->state) { + case OTG_STATE_A_HOST: + case OTG_STATE_A_SUSPEND: + usb_hcd_resume_root_hub(musb_to_hcd(musb)); + musb_root_disconnect(musb); + if (musb->a_wait_bcon != 0 && is_otg_enabled(musb)) + musb_platform_try_idle(musb, jiffies + + msecs_to_jiffies(musb->a_wait_bcon)); + break; + case OTG_STATE_B_HOST: + /* REVISIT this behaves for "real disconnect" + * cases; make sure the other transitions from + * from B_HOST act right too. The B_HOST code + * in hnp_stop() is currently not used... + */ + musb_root_disconnect(musb); + musb_to_hcd(musb)->self.is_b_host = 0; + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + MUSB_DEV_MODE(musb); + musb_g_disconnect(musb); + break; + case OTG_STATE_A_PERIPHERAL: + musb_hnp_stop(musb); + musb_root_disconnect(musb); + /* FALLTHROUGH */ + case OTG_STATE_B_WAIT_ACON: + /* FALLTHROUGH */ + case OTG_STATE_B_PERIPHERAL: + case OTG_STATE_B_IDLE: + musb_g_disconnect(musb); + break; + default: + WARNING("unhandled DISCONNECT transition (%s)\n", + otg_state_string(musb->xceiv->state)); + break; + } + } + + /* mentor saves a bit: bus reset and babble share the same irq. + * only host sees babble; only peripheral sees bus reset. + */ + if (int_usb & MUSB_INTR_RESET) { + handled = IRQ_HANDLED; + if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { + /* + * Looks like non-HS BABBLE can be ignored, but + * HS BABBLE is an error condition. For HS the solution + * is to avoid babble in the first place and fix what + * caused BABBLE. When HS BABBLE happens we can only + * stop the session. + */ + if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV)) + dev_dbg(musb->controller, "BABBLE devctl: %02x\n", devctl); + else { + ERR("Stopping host session -- babble\n"); + musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + } + } else if (is_peripheral_capable()) { + dev_dbg(musb->controller, "BUS RESET as %s\n", + otg_state_string(musb->xceiv->state)); + switch (musb->xceiv->state) { + case OTG_STATE_A_SUSPEND: + /* We need to ignore disconnect on suspend + * otherwise tusb 2.0 won't reconnect after a + * power cycle, which breaks otg compliance. + */ + musb->ignore_disconnect = 1; + musb_g_reset(musb); + /* FALLTHROUGH */ + case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ + /* never use invalid T(a_wait_bcon) */ + dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n", + otg_state_string(musb->xceiv->state), + TA_WAIT_BCON(musb)); + mod_timer(&musb->otg_timer, jiffies + + msecs_to_jiffies(TA_WAIT_BCON(musb))); + break; + case OTG_STATE_A_PERIPHERAL: + musb->ignore_disconnect = 0; + del_timer(&musb->otg_timer); + musb_g_reset(musb); + break; + case OTG_STATE_B_WAIT_ACON: + dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n", + otg_state_string(musb->xceiv->state)); + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + musb_g_reset(musb); + break; + case OTG_STATE_B_IDLE: + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + /* FALLTHROUGH */ + case OTG_STATE_B_PERIPHERAL: + musb_g_reset(musb); + break; + default: + dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n", + otg_state_string(musb->xceiv->state)); + } + } + } +#endif + +#if 0 +/* REVISIT ... this would be for multiplexing periodic endpoints, or + * supporting transfer phasing to prevent exceeding ISO bandwidth + * limits of a given frame or microframe. + * + * It's not needed for peripheral side, which dedicates endpoints; + * though it _might_ use SOF irqs for other purposes. + * + * And it's not currently needed for host side, which also dedicates + * endpoints, relies on TX/RX interval registers, and isn't claimed + * to support ISO transfers yet. + */ + if (int_usb & MUSB_INTR_SOF) { + void __iomem *mbase = musb->mregs; + struct musb_hw_ep *ep; + u8 epnum; + u16 frame; + + dev_dbg(musb->controller, "START_OF_FRAME\n"); + handled = IRQ_HANDLED; + + /* start any periodic Tx transfers waiting for current frame */ + frame = musb_readw(mbase, MUSB_FRAME); + ep = musb->endpoints; + for (epnum = 1; (epnum < musb->nr_endpoints) + && (musb->epmask >= (1 << epnum)); + epnum++, ep++) { + /* + * FIXME handle framecounter wraps (12 bits) + * eliminate duplicated StartUrb logic + */ + if (ep->dwWaitFrame >= frame) { + ep->dwWaitFrame = 0; + pr_debug("SOF --> periodic TX%s on %d\n", + ep->tx_channel ? " DMA" : "", + epnum); + if (!ep->tx_channel) + musb_h_tx_start(musb, epnum); + else + cppi_hostdma_start(musb, epnum); + } + } /* end of for loop */ + } +#endif + + schedule_work(&musb->irq_work); + + return handled; +} + +/*-------------------------------------------------------------------------*/ + +/* +* Program the HDRC to start (enable interrupts, dma, etc.). +*/ +void musb_start(struct musb *musb) +{ + void __iomem *regs = musb->mregs; + u8 devctl = musb_readb(regs, MUSB_DEVCTL); + + dev_dbg(musb->controller, "<== devctl %02x\n", devctl); + + /* Set INT enable registers, enable interrupts */ + musb_writew(regs, MUSB_INTRTXE, musb->epmask); + musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe); + musb_writeb(regs, MUSB_INTRUSBE, 0xf7); + + musb_writeb(regs, MUSB_TESTMODE, 0); + + /* put into basic highspeed mode and start session */ + musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE + | MUSB_POWER_HSENAB + /* ENSUSPEND wedges tusb */ + /* | MUSB_POWER_ENSUSPEND */ + ); + + musb->is_active = 0; + devctl = musb_readb(regs, MUSB_DEVCTL); + devctl &= ~MUSB_DEVCTL_SESSION; + + if (is_otg_enabled(musb)) { +#ifndef __UBOOT__ + /* session started after: + * (a) ID-grounded irq, host mode; + * (b) vbus present/connect IRQ, peripheral mode; + * (c) peripheral initiates, using SRP + */ + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) + musb->is_active = 1; + else + devctl |= MUSB_DEVCTL_SESSION; +#endif + + } else if (is_host_enabled(musb)) { + /* assume ID pin is hard-wired to ground */ + devctl |= MUSB_DEVCTL_SESSION; + + } else /* peripheral is enabled */ { + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) + musb->is_active = 1; + } + musb_platform_enable(musb); + musb_writeb(regs, MUSB_DEVCTL, devctl); +} + + +static void musb_generic_disable(struct musb *musb) +{ + void __iomem *mbase = musb->mregs; + u16 temp; + + /* disable interrupts */ + musb_writeb(mbase, MUSB_INTRUSBE, 0); + musb_writew(mbase, MUSB_INTRTXE, 0); + musb_writew(mbase, MUSB_INTRRXE, 0); + + /* off */ + musb_writeb(mbase, MUSB_DEVCTL, 0); + + /* flush pending interrupts */ + temp = musb_readb(mbase, MUSB_INTRUSB); + temp = musb_readw(mbase, MUSB_INTRTX); + temp = musb_readw(mbase, MUSB_INTRRX); + +} + +/* + * Make the HDRC stop (disable interrupts, etc.); + * reversible by musb_start + * called on gadget driver unregister + * with controller locked, irqs blocked + * acts as a NOP unless some role activated the hardware + */ +void musb_stop(struct musb *musb) +{ + /* stop IRQs, timers, ... */ + musb_platform_disable(musb); + musb_generic_disable(musb); + dev_dbg(musb->controller, "HDRC disabled\n"); + + /* FIXME + * - mark host and/or peripheral drivers unusable/inactive + * - disable DMA (and enable it in HdrcStart) + * - make sure we can musb_start() after musb_stop(); with + * OTG mode, gadget driver module rmmod/modprobe cycles that + * - ... + */ + musb_platform_try_idle(musb, 0); +} + +#ifndef __UBOOT__ +static void musb_shutdown(struct platform_device *pdev) +{ + struct musb *musb = dev_to_musb(&pdev->dev); + unsigned long flags; + + pm_runtime_get_sync(musb->controller); + + musb_gadget_cleanup(musb); + + spin_lock_irqsave(&musb->lock, flags); + musb_platform_disable(musb); + musb_generic_disable(musb); + spin_unlock_irqrestore(&musb->lock, flags); + + if (!is_otg_enabled(musb) && is_host_enabled(musb)) + usb_remove_hcd(musb_to_hcd(musb)); + musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + musb_platform_exit(musb); + + pm_runtime_put(musb->controller); + /* FIXME power down */ +} +#endif + + +/*-------------------------------------------------------------------------*/ + +/* + * The silicon either has hard-wired endpoint configurations, or else + * "dynamic fifo" sizing. The driver has support for both, though at this + * writing only the dynamic sizing is very well tested. Since we switched + * away from compile-time hardware parameters, we can no longer rely on + * dead code elimination to leave only the relevant one in the object file. + * + * We don't currently use dynamic fifo setup capability to do anything + * more than selecting one of a bunch of predefined configurations. + */ +#if defined(CONFIG_USB_MUSB_TUSB6010) \ + || defined(CONFIG_USB_MUSB_TUSB6010_MODULE) \ + || defined(CONFIG_USB_MUSB_OMAP2PLUS) \ + || defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE) \ + || defined(CONFIG_USB_MUSB_AM35X) \ + || defined(CONFIG_USB_MUSB_AM35X_MODULE) \ + || defined(CONFIG_USB_MUSB_DSPS) \ + || defined(CONFIG_USB_MUSB_DSPS_MODULE) +static ushort __devinitdata fifo_mode = 4; +#elif defined(CONFIG_USB_MUSB_UX500) \ + || defined(CONFIG_USB_MUSB_UX500_MODULE) +static ushort __devinitdata fifo_mode = 5; +#else +static ushort __devinitdata fifo_mode = 2; +#endif + +/* "modprobe ... fifo_mode=1" etc */ +module_param(fifo_mode, ushort, 0); +MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration"); + +/* + * tables defining fifo_mode values. define more if you like. + * for host side, make sure both halves of ep1 are set up. + */ + +/* mode 0 - fits in 2KB */ +static struct musb_fifo_cfg __devinitdata mode_0_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, +{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, +}; + +/* mode 1 - fits in 4KB */ +static struct musb_fifo_cfg __devinitdata mode_1_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, +{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, +}; + +/* mode 2 - fits in 4KB */ +static struct musb_fifo_cfg __devinitdata mode_2_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, +{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, +}; + +/* mode 3 - fits in 4KB */ +static struct musb_fifo_cfg __devinitdata mode_3_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, +{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, +}; + +/* mode 4 - fits in 16KB */ +static struct musb_fifo_cfg __devinitdata mode_4_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, }, +{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, }, +{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, }, +{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, }, +{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, }, +{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, }, +{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, }, +{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, +{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, +}; + +/* mode 5 - fits in 8KB */ +static struct musb_fifo_cfg __devinitdata mode_5_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, }, +{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, +{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, +}; + +/* + * configure a fifo; for non-shared endpoints, this may be called + * once for a tx fifo and once for an rx fifo. + * + * returns negative errno or offset for next fifo. + */ +static int __devinit +fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep, + const struct musb_fifo_cfg *cfg, u16 offset) +{ + void __iomem *mbase = musb->mregs; + int size = 0; + u16 maxpacket = cfg->maxpacket; + u16 c_off = offset >> 3; + u8 c_size; + + /* expect hw_ep has already been zero-initialized */ + + size = ffs(max(maxpacket, (u16) 8)) - 1; + maxpacket = 1 << size; + + c_size = size - 3; + if (cfg->mode == BUF_DOUBLE) { + if ((offset + (maxpacket << 1)) > + (1 << (musb->config->ram_bits + 2))) + return -EMSGSIZE; + c_size |= MUSB_FIFOSZ_DPB; + } else { + if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2))) + return -EMSGSIZE; + } + + /* configure the FIFO */ + musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum); + + /* EP0 reserved endpoint for control, bidirectional; + * EP1 reserved for bulk, two unidirection halves. + */ + if (hw_ep->epnum == 1) + musb->bulk_ep = hw_ep; + /* REVISIT error check: be sure ep0 can both rx and tx ... */ + switch (cfg->style) { + case FIFO_TX: + musb_write_txfifosz(mbase, c_size); + musb_write_txfifoadd(mbase, c_off); + hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); + hw_ep->max_packet_sz_tx = maxpacket; + break; + case FIFO_RX: + musb_write_rxfifosz(mbase, c_size); + musb_write_rxfifoadd(mbase, c_off); + hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); + hw_ep->max_packet_sz_rx = maxpacket; + break; + case FIFO_RXTX: + musb_write_txfifosz(mbase, c_size); + musb_write_txfifoadd(mbase, c_off); + hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); + hw_ep->max_packet_sz_rx = maxpacket; + + musb_write_rxfifosz(mbase, c_size); + musb_write_rxfifoadd(mbase, c_off); + hw_ep->tx_double_buffered = hw_ep->rx_double_buffered; + hw_ep->max_packet_sz_tx = maxpacket; + + hw_ep->is_shared_fifo = true; + break; + } + + /* NOTE rx and tx endpoint irqs aren't managed separately, + * which happens to be ok + */ + musb->epmask |= (1 << hw_ep->epnum); + + return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0)); +} + +static struct musb_fifo_cfg __devinitdata ep0_cfg = { + .style = FIFO_RXTX, .maxpacket = 64, +}; + +static int __devinit ep_config_from_table(struct musb *musb) +{ + const struct musb_fifo_cfg *cfg; + unsigned i, n; + int offset; + struct musb_hw_ep *hw_ep = musb->endpoints; + + if (musb->config->fifo_cfg) { + cfg = musb->config->fifo_cfg; + n = musb->config->fifo_cfg_size; + goto done; + } + + switch (fifo_mode) { + default: + fifo_mode = 0; + /* FALLTHROUGH */ + case 0: + cfg = mode_0_cfg; + n = ARRAY_SIZE(mode_0_cfg); + break; + case 1: + cfg = mode_1_cfg; + n = ARRAY_SIZE(mode_1_cfg); + break; + case 2: + cfg = mode_2_cfg; + n = ARRAY_SIZE(mode_2_cfg); + break; + case 3: + cfg = mode_3_cfg; + n = ARRAY_SIZE(mode_3_cfg); + break; + case 4: + cfg = mode_4_cfg; + n = ARRAY_SIZE(mode_4_cfg); + break; + case 5: + cfg = mode_5_cfg; + n = ARRAY_SIZE(mode_5_cfg); + break; + } + + printk(KERN_DEBUG "%s: setup fifo_mode %d\n", + musb_driver_name, fifo_mode); + + +done: + offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0); + /* assert(offset > 0) */ + + /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would + * be better than static musb->config->num_eps and DYN_FIFO_SIZE... + */ + + for (i = 0; i < n; i++) { + u8 epn = cfg->hw_ep_num; + + if (epn >= musb->config->num_eps) { + pr_debug("%s: invalid ep %d\n", + musb_driver_name, epn); + return -EINVAL; + } + offset = fifo_setup(musb, hw_ep + epn, cfg++, offset); + if (offset < 0) { + pr_debug("%s: mem overrun, ep %d\n", + musb_driver_name, epn); + return -EINVAL; + } + epn++; + musb->nr_endpoints = max(epn, musb->nr_endpoints); + } + + printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n", + musb_driver_name, + n + 1, musb->config->num_eps * 2 - 1, + offset, (1 << (musb->config->ram_bits + 2))); + + if (!musb->bulk_ep) { + pr_debug("%s: missing bulk\n", musb_driver_name); + return -EINVAL; + } + + return 0; +} + + +/* + * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false + * @param musb the controller + */ +static int __devinit ep_config_from_hw(struct musb *musb) +{ + u8 epnum = 0; + struct musb_hw_ep *hw_ep; + void *mbase = musb->mregs; + int ret = 0; + + dev_dbg(musb->controller, "<== static silicon ep config\n"); + + /* FIXME pick up ep0 maxpacket size */ + + for (epnum = 1; epnum < musb->config->num_eps; epnum++) { + musb_ep_select(mbase, epnum); + hw_ep = musb->endpoints + epnum; + + ret = musb_read_fifosize(musb, hw_ep, epnum); + if (ret < 0) + break; + + /* FIXME set up hw_ep->{rx,tx}_double_buffered */ + + /* pick an RX/TX endpoint for bulk */ + if (hw_ep->max_packet_sz_tx < 512 + || hw_ep->max_packet_sz_rx < 512) + continue; + + /* REVISIT: this algorithm is lazy, we should at least + * try to pick a double buffered endpoint. + */ + if (musb->bulk_ep) + continue; + musb->bulk_ep = hw_ep; + } + + if (!musb->bulk_ep) { + pr_debug("%s: missing bulk\n", musb_driver_name); + return -EINVAL; + } + + return 0; +} + +enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, }; + +/* Initialize MUSB (M)HDRC part of the USB hardware subsystem; + * configure endpoints, or take their config from silicon + */ +static int __devinit musb_core_init(u16 musb_type, struct musb *musb) +{ + u8 reg; + char *type; + char aInfo[90], aRevision[32], aDate[12]; + void __iomem *mbase = musb->mregs; + int status = 0; + int i; + + /* log core options (read using indexed model) */ + reg = musb_read_configdata(mbase); + + strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); + if (reg & MUSB_CONFIGDATA_DYNFIFO) { + strcat(aInfo, ", dyn FIFOs"); + musb->dyn_fifo = true; + } + if (reg & MUSB_CONFIGDATA_MPRXE) { + strcat(aInfo, ", bulk combine"); + musb->bulk_combine = true; + } + if (reg & MUSB_CONFIGDATA_MPTXE) { + strcat(aInfo, ", bulk split"); + musb->bulk_split = true; + } + if (reg & MUSB_CONFIGDATA_HBRXE) { + strcat(aInfo, ", HB-ISO Rx"); + musb->hb_iso_rx = true; + } + if (reg & MUSB_CONFIGDATA_HBTXE) { + strcat(aInfo, ", HB-ISO Tx"); + musb->hb_iso_tx = true; + } + if (reg & MUSB_CONFIGDATA_SOFTCONE) + strcat(aInfo, ", SoftConn"); + + printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", + musb_driver_name, reg, aInfo); + + aDate[0] = 0; + if (MUSB_CONTROLLER_MHDRC == musb_type) { + musb->is_multipoint = 1; + type = "M"; + } else { + musb->is_multipoint = 0; + type = ""; +#ifndef CONFIG_USB_OTG_BLACKLIST_HUB + printk(KERN_ERR + "%s: kernel must blacklist external hubs\n", + musb_driver_name); +#endif + } + + /* log release info */ + musb->hwvers = musb_read_hwvers(mbase); + snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers), + MUSB_HWVERS_MINOR(musb->hwvers), + (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : ""); + printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n", + musb_driver_name, type, aRevision, aDate); + + /* configure ep0 */ + musb_configure_ep0(musb); + + /* discover endpoint configuration */ + musb->nr_endpoints = 1; + musb->epmask = 1; + + if (musb->dyn_fifo) + status = ep_config_from_table(musb); + else + status = ep_config_from_hw(musb); + + if (status < 0) + return status; + + /* finish init, and print endpoint config */ + for (i = 0; i < musb->nr_endpoints; i++) { + struct musb_hw_ep *hw_ep = musb->endpoints + i; + + hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase; +#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE) + hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i); + hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i); + hw_ep->fifo_sync_va = + musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i); + + if (i == 0) + hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF; + else + hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2); +#endif + + hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase; + hw_ep->target_regs = musb_read_target_reg_base(i, mbase); + hw_ep->rx_reinit = 1; + hw_ep->tx_reinit = 1; + + if (hw_ep->max_packet_sz_tx) { + dev_dbg(musb->controller, + "%s: hw_ep %d%s, %smax %d\n", + musb_driver_name, i, + hw_ep->is_shared_fifo ? "shared" : "tx", + hw_ep->tx_double_buffered + ? "doublebuffer, " : "", + hw_ep->max_packet_sz_tx); + } + if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { + dev_dbg(musb->controller, + "%s: hw_ep %d%s, %smax %d\n", + musb_driver_name, i, + "rx", + hw_ep->rx_double_buffered + ? "doublebuffer, " : "", + hw_ep->max_packet_sz_rx); + } + if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx)) + dev_dbg(musb->controller, "hw_ep %d not configured\n", i); + } + + return 0; +} + +/*-------------------------------------------------------------------------*/ + +#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ + defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) + +static irqreturn_t generic_interrupt(int irq, void *__hci) +{ + unsigned long flags; + irqreturn_t retval = IRQ_NONE; + struct musb *musb = __hci; + + spin_lock_irqsave(&musb->lock, flags); + + musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); + musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); + musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); + + if (musb->int_usb || musb->int_tx || musb->int_rx) + retval = musb_interrupt(musb); + + spin_unlock_irqrestore(&musb->lock, flags); + + return retval; +} + +#else +#define generic_interrupt NULL +#endif + +/* + * handle all the irqs defined by the HDRC core. for now we expect: other + * irq sources (phy, dma, etc) will be handled first, musb->int_* values + * will be assigned, and the irq will already have been acked. + * + * called in irq context with spinlock held, irqs blocked + */ +irqreturn_t musb_interrupt(struct musb *musb) +{ + irqreturn_t retval = IRQ_NONE; + u8 devctl, power; + int ep_num; + u32 reg; + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + power = musb_readb(musb->mregs, MUSB_POWER); + + dev_dbg(musb->controller, "** IRQ %s usb%04x tx%04x rx%04x\n", + (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", + musb->int_usb, musb->int_tx, musb->int_rx); + + /* the core can interrupt us for multiple reasons; docs have + * a generic interrupt flowchart to follow + */ + if (musb->int_usb) + retval |= musb_stage0_irq(musb, musb->int_usb, + devctl, power); + + /* "stage 1" is handling endpoint irqs */ + + /* handle endpoint 0 first */ + if (musb->int_tx & 1) { + if (devctl & MUSB_DEVCTL_HM) { + if (is_host_capable()) + retval |= musb_h_ep0_irq(musb); + } else { + if (is_peripheral_capable()) + retval |= musb_g_ep0_irq(musb); + } + } + + /* RX on endpoints 1-15 */ + reg = musb->int_rx >> 1; + ep_num = 1; + while (reg) { + if (reg & 1) { + /* musb_ep_select(musb->mregs, ep_num); */ + /* REVISIT just retval = ep->rx_irq(...) */ + retval = IRQ_HANDLED; + if (devctl & MUSB_DEVCTL_HM) { + if (is_host_capable()) + musb_host_rx(musb, ep_num); + } else { + if (is_peripheral_capable()) + musb_g_rx(musb, ep_num); + } + } + + reg >>= 1; + ep_num++; + } + + /* TX on endpoints 1-15 */ + reg = musb->int_tx >> 1; + ep_num = 1; + while (reg) { + if (reg & 1) { + /* musb_ep_select(musb->mregs, ep_num); */ + /* REVISIT just retval |= ep->tx_irq(...) */ + retval = IRQ_HANDLED; + if (devctl & MUSB_DEVCTL_HM) { + if (is_host_capable()) + musb_host_tx(musb, ep_num); + } else { + if (is_peripheral_capable()) + musb_g_tx(musb, ep_num); + } + } + reg >>= 1; + ep_num++; + } + + return retval; +} +EXPORT_SYMBOL_GPL(musb_interrupt); + +#ifndef CONFIG_MUSB_PIO_ONLY +static bool __devinitdata use_dma = 1; + +/* "modprobe ... use_dma=0" etc */ +module_param(use_dma, bool, 0); +MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); + +void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) +{ + u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + /* called with controller lock already held */ + + if (!epnum) { +#ifndef CONFIG_USB_TUSB_OMAP_DMA + if (!is_cppi_enabled()) { + /* endpoint 0 */ + if (devctl & MUSB_DEVCTL_HM) + musb_h_ep0_irq(musb); + else + musb_g_ep0_irq(musb); + } +#endif + } else { + /* endpoints 1..15 */ + if (transmit) { + if (devctl & MUSB_DEVCTL_HM) { + if (is_host_capable()) + musb_host_tx(musb, epnum); + } else { + if (is_peripheral_capable()) + musb_g_tx(musb, epnum); + } + } else { + /* receive */ + if (devctl & MUSB_DEVCTL_HM) { + if (is_host_capable()) + musb_host_rx(musb, epnum); + } else { + if (is_peripheral_capable()) + musb_g_rx(musb, epnum); + } + } + } +} +EXPORT_SYMBOL_GPL(musb_dma_completion); + +#else +#define use_dma 0 +#endif + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_SYSFS + +static ssize_t +musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&musb->lock, flags); + ret = sprintf(buf, "%s\n", otg_state_string(musb->xceiv->state)); + spin_unlock_irqrestore(&musb->lock, flags); + + return ret; +} + +static ssize_t +musb_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + int status; + + spin_lock_irqsave(&musb->lock, flags); + if (sysfs_streq(buf, "host")) + status = musb_platform_set_mode(musb, MUSB_HOST); + else if (sysfs_streq(buf, "peripheral")) + status = musb_platform_set_mode(musb, MUSB_PERIPHERAL); + else if (sysfs_streq(buf, "otg")) + status = musb_platform_set_mode(musb, MUSB_OTG); + else + status = -EINVAL; + spin_unlock_irqrestore(&musb->lock, flags); + + return (status == 0) ? n : status; +} +static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store); + +static ssize_t +musb_vbus_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + unsigned long val; + + if (sscanf(buf, "%lu", &val) < 1) { + dev_err(dev, "Invalid VBUS timeout ms value\n"); + return -EINVAL; + } + + spin_lock_irqsave(&musb->lock, flags); + /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */ + musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ; + if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON) + musb->is_active = 0; + musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val)); + spin_unlock_irqrestore(&musb->lock, flags); + + return n; +} + +static ssize_t +musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + unsigned long val; + int vbus; + + spin_lock_irqsave(&musb->lock, flags); + val = musb->a_wait_bcon; + /* FIXME get_vbus_status() is normally #defined as false... + * and is effectively TUSB-specific. + */ + vbus = musb_platform_get_vbus_status(musb); + spin_unlock_irqrestore(&musb->lock, flags); + + return sprintf(buf, "Vbus %s, timeout %lu msec\n", + vbus ? "on" : "off", val); +} +static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store); + +/* Gadget drivers can't know that a host is connected so they might want + * to start SRP, but users can. This allows userspace to trigger SRP. + */ +static ssize_t +musb_srp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + struct musb *musb = dev_to_musb(dev); + unsigned short srp; + + if (sscanf(buf, "%hu", &srp) != 1 + || (srp != 1)) { + dev_err(dev, "SRP: Value must be 1\n"); + return -EINVAL; + } + + if (srp == 1) + musb_g_wakeup(musb); + + return n; +} +static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store); + +static struct attribute *musb_attributes[] = { + &dev_attr_mode.attr, + &dev_attr_vbus.attr, + &dev_attr_srp.attr, + NULL +}; + +static const struct attribute_group musb_attr_group = { + .attrs = musb_attributes, +}; + +#endif /* sysfs */ + +#ifndef __UBOOT__ +/* Only used to provide driver mode change events */ +static void musb_irq_work(struct work_struct *data) +{ + struct musb *musb = container_of(data, struct musb, irq_work); + static int old_state; + + if (musb->xceiv->state != old_state) { + old_state = musb->xceiv->state; + sysfs_notify(&musb->controller->kobj, NULL, "mode"); + } +} +#endif + +/* -------------------------------------------------------------------------- + * Init support + */ + +static struct musb *__devinit +allocate_instance(struct device *dev, + struct musb_hdrc_config *config, void __iomem *mbase) +{ + struct musb *musb; + struct musb_hw_ep *ep; + int epnum; +#ifndef __UBOOT__ + struct usb_hcd *hcd; + + hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev)); + if (!hcd) + return NULL; + /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ + + musb = hcd_to_musb(hcd); +#else + musb = calloc(1, sizeof(*musb)); + if (!musb) + return NULL; +#endif + INIT_LIST_HEAD(&musb->control); + INIT_LIST_HEAD(&musb->in_bulk); + INIT_LIST_HEAD(&musb->out_bulk); + +#ifndef __UBOOT__ + hcd->uses_new_polling = 1; + hcd->has_tt = 1; +#endif + + musb->vbuserr_retry = VBUSERR_RETRY_COUNT; + musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; + dev_set_drvdata(dev, musb); + musb->mregs = mbase; + musb->ctrl_base = mbase; + musb->nIrq = -ENODEV; + musb->config = config; + BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS); + for (epnum = 0, ep = musb->endpoints; + epnum < musb->config->num_eps; + epnum++, ep++) { + ep->musb = musb; + ep->epnum = epnum; + } + + musb->controller = dev; + + return musb; +} + +static void musb_free(struct musb *musb) +{ + /* this has multiple entry modes. it handles fault cleanup after + * probe(), where things may be partially set up, as well as rmmod + * cleanup after everything's been de-activated. + */ + +#ifdef CONFIG_SYSFS + sysfs_remove_group(&musb->controller->kobj, &musb_attr_group); +#endif + + if (musb->nIrq >= 0) { + if (musb->irq_wake) + disable_irq_wake(musb->nIrq); + free_irq(musb->nIrq, musb); + } + if (is_dma_capable() && musb->dma_controller) { + struct dma_controller *c = musb->dma_controller; + + (void) c->stop(c); + dma_controller_destroy(c); + } + + kfree(musb); +} + +/* + * Perform generic per-controller initialization. + * + * @pDevice: the controller (already clocked, etc) + * @nIrq: irq + * @mregs: virtual address of controller registers, + * not yet corrected for platform-specific offsets + */ +#ifndef __UBOOT__ +static int __devinit +musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) +#else +struct musb * +musb_init_controller(struct musb_hdrc_platform_data *plat, struct device *dev, + void *ctrl) +#endif +{ + int status; + struct musb *musb; +#ifndef __UBOOT__ + struct musb_hdrc_platform_data *plat = dev->platform_data; +#else + int nIrq = 0; +#endif + + /* The driver might handle more features than the board; OK. + * Fail when the board needs a feature that's not enabled. + */ + if (!plat) { + dev_dbg(dev, "no platform_data?\n"); + status = -ENODEV; + goto fail0; + } + + /* allocate */ + musb = allocate_instance(dev, plat->config, ctrl); + if (!musb) { + status = -ENOMEM; + goto fail0; + } + + pm_runtime_use_autosuspend(musb->controller); + pm_runtime_set_autosuspend_delay(musb->controller, 200); + pm_runtime_enable(musb->controller); + + spin_lock_init(&musb->lock); + musb->board_mode = plat->mode; + musb->board_set_power = plat->set_power; + musb->min_power = plat->min_power; + musb->ops = plat->platform_ops; + + /* The musb_platform_init() call: + * - adjusts musb->mregs and musb->isr if needed, + * - may initialize an integrated tranceiver + * - initializes musb->xceiv, usually by otg_get_phy() + * - stops powering VBUS + * + * There are various transceiver configurations. Blackfin, + * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses + * external/discrete ones in various flavors (twl4030 family, + * isp1504, non-OTG, etc) mostly hooking up through ULPI. + */ + musb->isr = generic_interrupt; + status = musb_platform_init(musb); + if (status < 0) + goto fail1; + + if (!musb->isr) { + status = -ENODEV; + goto fail2; + } + +#ifndef __UBOOT__ + if (!musb->xceiv->io_ops) { + musb->xceiv->io_dev = musb->controller; + musb->xceiv->io_priv = musb->mregs; + musb->xceiv->io_ops = &musb_ulpi_access; + } +#endif + + pm_runtime_get_sync(musb->controller); + +#ifndef CONFIG_MUSB_PIO_ONLY + if (use_dma && dev->dma_mask) { + struct dma_controller *c; + + c = dma_controller_create(musb, musb->mregs); + musb->dma_controller = c; + if (c) + (void) c->start(c); + } +#endif +#ifndef __UBOOT__ + /* ideally this would be abstracted in platform setup */ + if (!is_dma_capable() || !musb->dma_controller) + dev->dma_mask = NULL; +#endif + + /* be sure interrupts are disabled before connecting ISR */ + musb_platform_disable(musb); + musb_generic_disable(musb); + + /* setup musb parts of the core (especially endpoints) */ + status = musb_core_init(plat->config->multipoint + ? MUSB_CONTROLLER_MHDRC + : MUSB_CONTROLLER_HDRC, musb); + if (status < 0) + goto fail3; + + setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); + + /* Init IRQ workqueue before request_irq */ + INIT_WORK(&musb->irq_work, musb_irq_work); + + /* attach to the IRQ */ + if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) { + dev_err(dev, "request_irq %d failed!\n", nIrq); + status = -ENODEV; + goto fail3; + } + musb->nIrq = nIrq; +/* FIXME this handles wakeup irqs wrong */ + if (enable_irq_wake(nIrq) == 0) { + musb->irq_wake = 1; + device_init_wakeup(dev, 1); + } else { + musb->irq_wake = 0; + } + +#ifndef __UBOOT__ + /* host side needs more setup */ + if (is_host_enabled(musb)) { + struct usb_hcd *hcd = musb_to_hcd(musb); + + otg_set_host(musb->xceiv->otg, &hcd->self); + + if (is_otg_enabled(musb)) + hcd->self.otg_port = 1; + musb->xceiv->otg->host = &hcd->self; + hcd->power_budget = 2 * (plat->power ? : 250); + + /* program PHY to use external vBus if required */ + if (plat->extvbus) { + u8 busctl = musb_read_ulpi_buscontrol(musb->mregs); + busctl |= MUSB_ULPI_USE_EXTVBUS; + musb_write_ulpi_buscontrol(musb->mregs, busctl); + } + } +#endif + + /* For the host-only role, we can activate right away. + * (We expect the ID pin to be forcibly grounded!!) + * Otherwise, wait till the gadget driver hooks up. + */ + if (!is_otg_enabled(musb) && is_host_enabled(musb)) { + struct usb_hcd *hcd = musb_to_hcd(musb); + + MUSB_HST_MODE(musb); +#ifndef __UBOOT__ + musb->xceiv->otg->default_a = 1; + musb->xceiv->state = OTG_STATE_A_IDLE; + + status = usb_add_hcd(musb_to_hcd(musb), 0, 0); + + hcd->self.uses_pio_for_control = 1; + dev_dbg(musb->controller, "%s mode, status %d, devctl %02x %c\n", + "HOST", status, + musb_readb(musb->mregs, MUSB_DEVCTL), + (musb_readb(musb->mregs, MUSB_DEVCTL) + & MUSB_DEVCTL_BDEVICE + ? 'B' : 'A')); +#endif + + } else /* peripheral is enabled */ { + MUSB_DEV_MODE(musb); +#ifndef __UBOOT__ + musb->xceiv->otg->default_a = 0; + musb->xceiv->state = OTG_STATE_B_IDLE; +#endif + + if (is_peripheral_capable()) + status = musb_gadget_setup(musb); + +#ifndef __UBOOT__ + dev_dbg(musb->controller, "%s mode, status %d, dev%02x\n", + is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", + status, + musb_readb(musb->mregs, MUSB_DEVCTL)); +#endif + + } + if (status < 0) + goto fail3; + + status = musb_init_debugfs(musb); + if (status < 0) + goto fail4; + +#ifdef CONFIG_SYSFS + status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group); + if (status) + goto fail5; +#endif + + pm_runtime_put(musb->controller); + + dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n", + ({char *s; + switch (musb->board_mode) { + case MUSB_HOST: s = "Host"; break; + case MUSB_PERIPHERAL: s = "Peripheral"; break; + default: s = "OTG"; break; + }; s; }), + ctrl, + (is_dma_capable() && musb->dma_controller) + ? "DMA" : "PIO", + musb->nIrq); + +#ifndef __UBOOT__ + return 0; +#else + return status == 0 ? musb : NULL; +#endif + +fail5: + musb_exit_debugfs(musb); + +fail4: +#ifndef __UBOOT__ + if (!is_otg_enabled(musb) && is_host_enabled(musb)) + usb_remove_hcd(musb_to_hcd(musb)); + else +#endif + musb_gadget_cleanup(musb); + +fail3: + pm_runtime_put_sync(musb->controller); + +fail2: + if (musb->irq_wake) + device_init_wakeup(dev, 0); + musb_platform_exit(musb); + +fail1: + dev_err(musb->controller, + "musb_init_controller failed with status %d\n", status); + + musb_free(musb); + +fail0: + +#ifndef __UBOOT__ + return status; +#else + return status == 0 ? musb : NULL; +#endif + +} + +/*-------------------------------------------------------------------------*/ + +/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just + * bridge to a platform device; this driver then suffices. + */ + +#ifndef CONFIG_MUSB_PIO_ONLY +static u64 *orig_dma_mask; +#endif + +#ifndef __UBOOT__ +static int __devinit musb_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int irq = platform_get_irq_byname(pdev, "mc"); + int status; + struct resource *iomem; + void __iomem *base; + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iomem || irq <= 0) + return -ENODEV; + + base = ioremap(iomem->start, resource_size(iomem)); + if (!base) { + dev_err(dev, "ioremap failed\n"); + return -ENOMEM; + } + +#ifndef CONFIG_MUSB_PIO_ONLY + /* clobbered by use_dma=n */ + orig_dma_mask = dev->dma_mask; +#endif + status = musb_init_controller(dev, irq, base); + if (status < 0) + iounmap(base); + + return status; +} + +static int __devexit musb_remove(struct platform_device *pdev) +{ + struct musb *musb = dev_to_musb(&pdev->dev); + void __iomem *ctrl_base = musb->ctrl_base; + + /* this gets called on rmmod. + * - Host mode: host may still be active + * - Peripheral mode: peripheral is deactivated (or never-activated) + * - OTG mode: both roles are deactivated (or never-activated) + */ + musb_exit_debugfs(musb); + musb_shutdown(pdev); + + musb_free(musb); + iounmap(ctrl_base); + device_init_wakeup(&pdev->dev, 0); +#ifndef CONFIG_MUSB_PIO_ONLY + pdev->dev.dma_mask = orig_dma_mask; +#endif + return 0; +} + +#ifdef CONFIG_PM + +static void musb_save_context(struct musb *musb) +{ + int i; + void __iomem *musb_base = musb->mregs; + void __iomem *epio; + + if (is_host_enabled(musb)) { + musb->context.frame = musb_readw(musb_base, MUSB_FRAME); + musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE); + musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs); + } + musb->context.power = musb_readb(musb_base, MUSB_POWER); + musb->context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE); + musb->context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE); + musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE); + musb->context.index = musb_readb(musb_base, MUSB_INDEX); + musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL); + + for (i = 0; i < musb->config->num_eps; ++i) { + struct musb_hw_ep *hw_ep; + + hw_ep = &musb->endpoints[i]; + if (!hw_ep) + continue; + + epio = hw_ep->regs; + if (!epio) + continue; + + musb_writeb(musb_base, MUSB_INDEX, i); + musb->context.index_regs[i].txmaxp = + musb_readw(epio, MUSB_TXMAXP); + musb->context.index_regs[i].txcsr = + musb_readw(epio, MUSB_TXCSR); + musb->context.index_regs[i].rxmaxp = + musb_readw(epio, MUSB_RXMAXP); + musb->context.index_regs[i].rxcsr = + musb_readw(epio, MUSB_RXCSR); + + if (musb->dyn_fifo) { + musb->context.index_regs[i].txfifoadd = + musb_read_txfifoadd(musb_base); + musb->context.index_regs[i].rxfifoadd = + musb_read_rxfifoadd(musb_base); + musb->context.index_regs[i].txfifosz = + musb_read_txfifosz(musb_base); + musb->context.index_regs[i].rxfifosz = + musb_read_rxfifosz(musb_base); + } + if (is_host_enabled(musb)) { + musb->context.index_regs[i].txtype = + musb_readb(epio, MUSB_TXTYPE); + musb->context.index_regs[i].txinterval = + musb_readb(epio, MUSB_TXINTERVAL); + musb->context.index_regs[i].rxtype = + musb_readb(epio, MUSB_RXTYPE); + musb->context.index_regs[i].rxinterval = + musb_readb(epio, MUSB_RXINTERVAL); + + musb->context.index_regs[i].txfunaddr = + musb_read_txfunaddr(musb_base, i); + musb->context.index_regs[i].txhubaddr = + musb_read_txhubaddr(musb_base, i); + musb->context.index_regs[i].txhubport = + musb_read_txhubport(musb_base, i); + + musb->context.index_regs[i].rxfunaddr = + musb_read_rxfunaddr(musb_base, i); + musb->context.index_regs[i].rxhubaddr = + musb_read_rxhubaddr(musb_base, i); + musb->context.index_regs[i].rxhubport = + musb_read_rxhubport(musb_base, i); + } + } +} + +static void musb_restore_context(struct musb *musb) +{ + int i; + void __iomem *musb_base = musb->mregs; + void __iomem *ep_target_regs; + void __iomem *epio; + + if (is_host_enabled(musb)) { + musb_writew(musb_base, MUSB_FRAME, musb->context.frame); + musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode); + musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl); + } + musb_writeb(musb_base, MUSB_POWER, musb->context.power); + musb_writew(musb_base, MUSB_INTRTXE, musb->context.intrtxe); + musb_writew(musb_base, MUSB_INTRRXE, musb->context.intrrxe); + musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); + musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); + + for (i = 0; i < musb->config->num_eps; ++i) { + struct musb_hw_ep *hw_ep; + + hw_ep = &musb->endpoints[i]; + if (!hw_ep) + continue; + + epio = hw_ep->regs; + if (!epio) + continue; + + musb_writeb(musb_base, MUSB_INDEX, i); + musb_writew(epio, MUSB_TXMAXP, + musb->context.index_regs[i].txmaxp); + musb_writew(epio, MUSB_TXCSR, + musb->context.index_regs[i].txcsr); + musb_writew(epio, MUSB_RXMAXP, + musb->context.index_regs[i].rxmaxp); + musb_writew(epio, MUSB_RXCSR, + musb->context.index_regs[i].rxcsr); + + if (musb->dyn_fifo) { + musb_write_txfifosz(musb_base, + musb->context.index_regs[i].txfifosz); + musb_write_rxfifosz(musb_base, + musb->context.index_regs[i].rxfifosz); + musb_write_txfifoadd(musb_base, + musb->context.index_regs[i].txfifoadd); + musb_write_rxfifoadd(musb_base, + musb->context.index_regs[i].rxfifoadd); + } + + if (is_host_enabled(musb)) { + musb_writeb(epio, MUSB_TXTYPE, + musb->context.index_regs[i].txtype); + musb_writeb(epio, MUSB_TXINTERVAL, + musb->context.index_regs[i].txinterval); + musb_writeb(epio, MUSB_RXTYPE, + musb->context.index_regs[i].rxtype); + musb_writeb(epio, MUSB_RXINTERVAL, + + musb->context.index_regs[i].rxinterval); + musb_write_txfunaddr(musb_base, i, + musb->context.index_regs[i].txfunaddr); + musb_write_txhubaddr(musb_base, i, + musb->context.index_regs[i].txhubaddr); + musb_write_txhubport(musb_base, i, + musb->context.index_regs[i].txhubport); + + ep_target_regs = + musb_read_target_reg_base(i, musb_base); + + musb_write_rxfunaddr(ep_target_regs, + musb->context.index_regs[i].rxfunaddr); + musb_write_rxhubaddr(ep_target_regs, + musb->context.index_regs[i].rxhubaddr); + musb_write_rxhubport(ep_target_regs, + musb->context.index_regs[i].rxhubport); + } + } + musb_writeb(musb_base, MUSB_INDEX, musb->context.index); +} + +static int musb_suspend(struct device *dev) +{ + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + + if (is_peripheral_active(musb)) { + /* FIXME force disconnect unless we know USB will wake + * the system up quickly enough to respond ... + */ + } else if (is_host_active(musb)) { + /* we know all the children are suspended; sometimes + * they will even be wakeup-enabled. + */ + } + + spin_unlock_irqrestore(&musb->lock, flags); + return 0; +} + +static int musb_resume_noirq(struct device *dev) +{ + /* for static cmos like DaVinci, register values were preserved + * unless for some reason the whole soc powered down or the USB + * module got reset through the PSC (vs just being disabled). + */ + return 0; +} + +static int musb_runtime_suspend(struct device *dev) +{ + struct musb *musb = dev_to_musb(dev); + + musb_save_context(musb); + + return 0; +} + +static int musb_runtime_resume(struct device *dev) +{ + struct musb *musb = dev_to_musb(dev); + static int first = 1; + + /* + * When pm_runtime_get_sync called for the first time in driver + * init, some of the structure is still not initialized which is + * used in restore function. But clock needs to be + * enabled before any register access, so + * pm_runtime_get_sync has to be called. + * Also context restore without save does not make + * any sense + */ + if (!first) + musb_restore_context(musb); + first = 0; + + return 0; +} + +static const struct dev_pm_ops musb_dev_pm_ops = { + .suspend = musb_suspend, + .resume_noirq = musb_resume_noirq, + .runtime_suspend = musb_runtime_suspend, + .runtime_resume = musb_runtime_resume, +}; + +#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops) +#else +#define MUSB_DEV_PM_OPS NULL +#endif + +static struct platform_driver musb_driver = { + .driver = { + .name = (char *)musb_driver_name, + .bus = &platform_bus_type, + .owner = THIS_MODULE, + .pm = MUSB_DEV_PM_OPS, + }, + .probe = musb_probe, + .remove = __devexit_p(musb_remove), + .shutdown = musb_shutdown, +}; + +/*-------------------------------------------------------------------------*/ + +static int __init musb_init(void) +{ + if (usb_disabled()) + return 0; + + pr_info("%s: version " MUSB_VERSION ", " + "?dma?" + ", " + "otg (peripheral+host)", + musb_driver_name); + return platform_driver_register(&musb_driver); +} +module_init(musb_init); + +static void __exit musb_cleanup(void) +{ + platform_driver_unregister(&musb_driver); +} +module_exit(musb_cleanup); +#endif diff --git a/drivers/usb/musb-new/musb_core.h b/drivers/usb/musb-new/musb_core.h new file mode 100644 index 00000000000..26957420981 --- /dev/null +++ b/drivers/usb/musb-new/musb_core.h @@ -0,0 +1,623 @@ +/* + * MUSB OTG driver defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_CORE_H__ +#define __MUSB_CORE_H__ + +#ifndef __UBOOT__ +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/timer.h> +#include <linux/device.h> +#include <linux/usb.h> +#include <linux/usb/otg.h> +#else +#include <asm/errno.h> +#endif +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/usb/musb.h> + +struct musb; +struct musb_hw_ep; +struct musb_ep; + +/* Helper defines for struct musb->hwvers */ +#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f) +#define MUSB_HWVERS_MINOR(x) (x & 0x3ff) +#define MUSB_HWVERS_RC 0x8000 +#define MUSB_HWVERS_1300 0x52C +#define MUSB_HWVERS_1400 0x590 +#define MUSB_HWVERS_1800 0x720 +#define MUSB_HWVERS_1900 0x784 +#define MUSB_HWVERS_2000 0x800 + +#include "musb_debug.h" +#include "musb_dma.h" + +#include "musb_io.h" +#include "musb_regs.h" + +#include "musb_gadget.h" +#ifndef __UBOOT__ +#include <linux/usb/hcd.h> +#endif +#include "musb_host.h" + +#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST) +#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL) +#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG) + +/* NOTE: otg and peripheral-only state machines start at B_IDLE. + * OTG or host-only go to A_IDLE when ID is sensed. + */ +#define is_peripheral_active(m) (!(m)->is_host) +#define is_host_active(m) ((m)->is_host) + +#ifdef CONFIG_PROC_FS +#include <linux/fs.h> +#define MUSB_CONFIG_PROC_FS +#endif + +/****************************** PERIPHERAL ROLE *****************************/ + +#ifndef __UBOOT__ +#define is_peripheral_capable() (1) +#else +#ifdef CONFIG_MUSB_GADGET +#define is_peripheral_capable() (1) +#else +#define is_peripheral_capable() (0) +#endif +#endif + +extern irqreturn_t musb_g_ep0_irq(struct musb *); +extern void musb_g_tx(struct musb *, u8); +extern void musb_g_rx(struct musb *, u8); +extern void musb_g_reset(struct musb *); +extern void musb_g_suspend(struct musb *); +extern void musb_g_resume(struct musb *); +extern void musb_g_wakeup(struct musb *); +extern void musb_g_disconnect(struct musb *); + +/****************************** HOST ROLE ***********************************/ + +#ifndef __UBOOT__ +#define is_host_capable() (1) +#else +#ifdef CONFIG_MUSB_HOST +#define is_host_capable() (1) +#else +#define is_host_capable() (0) +#endif +#endif + +extern irqreturn_t musb_h_ep0_irq(struct musb *); +extern void musb_host_tx(struct musb *, u8); +extern void musb_host_rx(struct musb *, u8); + +/****************************** CONSTANTS ********************************/ + +#ifndef MUSB_C_NUM_EPS +#define MUSB_C_NUM_EPS ((u8)16) +#endif + +#ifndef MUSB_MAX_END0_PACKET +#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE) +#endif + +/* host side ep0 states */ +enum musb_h_ep0_state { + MUSB_EP0_IDLE, + MUSB_EP0_START, /* expect ack of setup */ + MUSB_EP0_IN, /* expect IN DATA */ + MUSB_EP0_OUT, /* expect ack of OUT DATA */ + MUSB_EP0_STATUS, /* expect ack of STATUS */ +} __attribute__ ((packed)); + +/* peripheral side ep0 states */ +enum musb_g_ep0_state { + MUSB_EP0_STAGE_IDLE, /* idle, waiting for SETUP */ + MUSB_EP0_STAGE_SETUP, /* received SETUP */ + MUSB_EP0_STAGE_TX, /* IN data */ + MUSB_EP0_STAGE_RX, /* OUT data */ + MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */ + MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */ + MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */ +} __attribute__ ((packed)); + +/* + * OTG protocol constants. See USB OTG 1.3 spec, + * sections 5.5 "Device Timings" and 6.6.5 "Timers". + */ +#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */ +#define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */ +#define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */ +#define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */ + + +/*************************** REGISTER ACCESS ********************************/ + +/* Endpoint registers (other than dynfifo setup) can be accessed either + * directly with the "flat" model, or after setting up an index register. + */ + +#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \ + || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \ + || defined(CONFIG_ARCH_OMAP4) +/* REVISIT indexed access seemed to + * misbehave (on DaVinci) for at least peripheral IN ... + */ +#define MUSB_FLAT_REG +#endif + +/* TUSB mapping: "flat" plus ep0 special cases */ +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) +#define musb_ep_select(_mbase, _epnum) \ + musb_writeb((_mbase), MUSB_INDEX, (_epnum)) +#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET + +/* "flat" mapping: each endpoint has its own i/o address */ +#elif defined(MUSB_FLAT_REG) +#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum))) +#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET + +/* "indexed" mapping: INDEX register controls register bank select */ +#else +#define musb_ep_select(_mbase, _epnum) \ + musb_writeb((_mbase), MUSB_INDEX, (_epnum)) +#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET +#endif + +/****************************** FUNCTIONS ********************************/ + +#define MUSB_HST_MODE(_musb)\ + { (_musb)->is_host = true; } +#define MUSB_DEV_MODE(_musb) \ + { (_musb)->is_host = false; } + +#define test_devctl_hst_mode(_x) \ + (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM) + +#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral") + +/******************************** TYPES *************************************/ + +/** + * struct musb_platform_ops - Operations passed to musb_core by HW glue layer + * @init: turns on clocks, sets up platform-specific registers, etc + * @exit: undoes @init + * @set_mode: forcefully changes operating mode + * @try_ilde: tries to idle the IP + * @vbus_status: returns vbus status if possible + * @set_vbus: forces vbus status + * @adjust_channel_params: pre check for standard dma channel_program func + */ +struct musb_platform_ops { + int (*init)(struct musb *musb); + int (*exit)(struct musb *musb); + + void (*enable)(struct musb *musb); + void (*disable)(struct musb *musb); + + int (*set_mode)(struct musb *musb, u8 mode); + void (*try_idle)(struct musb *musb, unsigned long timeout); + + int (*vbus_status)(struct musb *musb); + void (*set_vbus)(struct musb *musb, int on); + + int (*adjust_channel_params)(struct dma_channel *channel, + u16 packet_sz, u8 *mode, + dma_addr_t *dma_addr, u32 *len); +}; + +/* + * struct musb_hw_ep - endpoint hardware (bidirectional) + * + * Ordered slightly for better cacheline locality. + */ +struct musb_hw_ep { + struct musb *musb; + void __iomem *fifo; + void __iomem *regs; + +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) + void __iomem *conf; +#endif + + /* index in musb->endpoints[] */ + u8 epnum; + + /* hardware configuration, possibly dynamic */ + bool is_shared_fifo; + bool tx_double_buffered; + bool rx_double_buffered; + u16 max_packet_sz_tx; + u16 max_packet_sz_rx; + + struct dma_channel *tx_channel; + struct dma_channel *rx_channel; + +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) + /* TUSB has "asynchronous" and "synchronous" dma modes */ + dma_addr_t fifo_async; + dma_addr_t fifo_sync; + void __iomem *fifo_sync_va; +#endif + + void __iomem *target_regs; + + /* currently scheduled peripheral endpoint */ + struct musb_qh *in_qh; + struct musb_qh *out_qh; + + u8 rx_reinit; + u8 tx_reinit; + + /* peripheral side */ + struct musb_ep ep_in; /* TX */ + struct musb_ep ep_out; /* RX */ +}; + +static inline struct musb_request *next_in_request(struct musb_hw_ep *hw_ep) +{ + return next_request(&hw_ep->ep_in); +} + +static inline struct musb_request *next_out_request(struct musb_hw_ep *hw_ep) +{ + return next_request(&hw_ep->ep_out); +} + +struct musb_csr_regs { + /* FIFO registers */ + u16 txmaxp, txcsr, rxmaxp, rxcsr; + u16 rxfifoadd, txfifoadd; + u8 txtype, txinterval, rxtype, rxinterval; + u8 rxfifosz, txfifosz; + u8 txfunaddr, txhubaddr, txhubport; + u8 rxfunaddr, rxhubaddr, rxhubport; +}; + +struct musb_context_registers { + + u8 power; + u16 intrtxe, intrrxe; + u8 intrusbe; + u16 frame; + u8 index, testmode; + + u8 devctl, busctl, misc; + u32 otg_interfsel; + + struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; +}; + +/* + * struct musb - Driver instance data. + */ +struct musb { + /* device lock */ + spinlock_t lock; + + const struct musb_platform_ops *ops; + struct musb_context_registers context; + + irqreturn_t (*isr)(int, void *); + struct work_struct irq_work; + u16 hwvers; + +/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ +#define MUSB_PORT_STAT_RESUME (1 << 31) + + u32 port1_status; + + unsigned long rh_timer; + + enum musb_h_ep0_state ep0_stage; + + /* bulk traffic normally dedicates endpoint hardware, and each + * direction has its own ring of host side endpoints. + * we try to progress the transfer at the head of each endpoint's + * queue until it completes or NAKs too much; then we try the next + * endpoint. + */ + struct musb_hw_ep *bulk_ep; + + struct list_head control; /* of musb_qh */ + struct list_head in_bulk; /* of musb_qh */ + struct list_head out_bulk; /* of musb_qh */ + + struct timer_list otg_timer; + struct notifier_block nb; + + struct dma_controller *dma_controller; + + struct device *controller; + void __iomem *ctrl_base; + void __iomem *mregs; + +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) + dma_addr_t async; + dma_addr_t sync; + void __iomem *sync_va; +#endif + + /* passed down from chip/board specific irq handlers */ + u8 int_usb; + u16 int_rx; + u16 int_tx; + + struct usb_phy *xceiv; + + int nIrq; + unsigned irq_wake:1; + + struct musb_hw_ep endpoints[MUSB_C_NUM_EPS]; +#define control_ep endpoints + +#define VBUSERR_RETRY_COUNT 3 + u16 vbuserr_retry; + u16 epmask; + u8 nr_endpoints; + + u8 board_mode; /* enum musb_mode */ + int (*board_set_power)(int state); + + u8 min_power; /* vbus for periph, in mA/2 */ + + bool is_host; + + int a_wait_bcon; /* VBUS timeout in msecs */ + unsigned long idle_timeout; /* Next timeout in jiffies */ + + /* active means connected and not suspended */ + unsigned is_active:1; + + unsigned is_multipoint:1; + unsigned ignore_disconnect:1; /* during bus resets */ + + unsigned hb_iso_rx:1; /* high bandwidth iso rx? */ + unsigned hb_iso_tx:1; /* high bandwidth iso tx? */ + unsigned dyn_fifo:1; /* dynamic FIFO supported? */ + + unsigned bulk_split:1; +#define can_bulk_split(musb,type) \ + (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) + + unsigned bulk_combine:1; +#define can_bulk_combine(musb,type) \ + (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) + + /* is_suspended means USB B_PERIPHERAL suspend */ + unsigned is_suspended:1; + + /* may_wakeup means remote wakeup is enabled */ + unsigned may_wakeup:1; + + /* is_self_powered is reported in device status and the + * config descriptor. is_bus_powered means B_PERIPHERAL + * draws some VBUS current; both can be true. + */ + unsigned is_self_powered:1; + unsigned is_bus_powered:1; + + unsigned set_address:1; + unsigned test_mode:1; + unsigned softconnect:1; + + u8 address; + u8 test_mode_nr; + u16 ackpend; /* ep0 */ + enum musb_g_ep0_state ep0_state; + struct usb_gadget g; /* the gadget */ + struct usb_gadget_driver *gadget_driver; /* its driver */ + + /* + * FIXME: Remove this flag. + * + * This is only added to allow Blackfin to work + * with current driver. For some unknown reason + * Blackfin doesn't work with double buffering + * and that's enabled by default. + * + * We added this flag to forcefully disable double + * buffering until we get it working. + */ + unsigned double_buffer_not_ok:1; + + struct musb_hdrc_config *config; + +#ifdef MUSB_CONFIG_PROC_FS + struct proc_dir_entry *proc_entry; +#endif +}; + +static inline struct musb *gadget_to_musb(struct usb_gadget *g) +{ + return container_of(g, struct musb, g); +} + +#ifdef CONFIG_BLACKFIN +static inline int musb_read_fifosize(struct musb *musb, + struct musb_hw_ep *hw_ep, u8 epnum) +{ + musb->nr_endpoints++; + musb->epmask |= (1 << epnum); + + if (epnum < 5) { + hw_ep->max_packet_sz_tx = 128; + hw_ep->max_packet_sz_rx = 128; + } else { + hw_ep->max_packet_sz_tx = 1024; + hw_ep->max_packet_sz_rx = 1024; + } + hw_ep->is_shared_fifo = false; + + return 0; +} + +static inline void musb_configure_ep0(struct musb *musb) +{ + musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; + musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; + musb->endpoints[0].is_shared_fifo = true; +} + +#else + +static inline int musb_read_fifosize(struct musb *musb, + struct musb_hw_ep *hw_ep, u8 epnum) +{ + void *mbase = musb->mregs; + u8 reg = 0; + + /* read from core using indexed model */ + reg = musb_readb(mbase, MUSB_EP_OFFSET(epnum, MUSB_FIFOSIZE)); + /* 0's returned when no more endpoints */ + if (!reg) + return -ENODEV; + + musb->nr_endpoints++; + musb->epmask |= (1 << epnum); + + hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f); + + /* shared TX/RX FIFO? */ + if ((reg & 0xf0) == 0xf0) { + hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx; + hw_ep->is_shared_fifo = true; + return 0; + } else { + hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4); + hw_ep->is_shared_fifo = false; + } + + return 0; +} + +static inline void musb_configure_ep0(struct musb *musb) +{ + musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; + musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; + musb->endpoints[0].is_shared_fifo = true; +} +#endif /* CONFIG_BLACKFIN */ + + +/***************************** Glue it together *****************************/ + +extern const char musb_driver_name[]; + +extern void musb_start(struct musb *musb); +extern void musb_stop(struct musb *musb); + +extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); +extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst); + +extern void musb_load_testpacket(struct musb *); + +extern irqreturn_t musb_interrupt(struct musb *); + +extern void musb_hnp_stop(struct musb *musb); + +static inline void musb_platform_set_vbus(struct musb *musb, int is_on) +{ + if (musb->ops->set_vbus) + musb->ops->set_vbus(musb, is_on); +} + +static inline void musb_platform_enable(struct musb *musb) +{ + if (musb->ops->enable) + musb->ops->enable(musb); +} + +static inline void musb_platform_disable(struct musb *musb) +{ + if (musb->ops->disable) + musb->ops->disable(musb); +} + +static inline int musb_platform_set_mode(struct musb *musb, u8 mode) +{ + if (!musb->ops->set_mode) + return 0; + + return musb->ops->set_mode(musb, mode); +} + +static inline void musb_platform_try_idle(struct musb *musb, + unsigned long timeout) +{ + if (musb->ops->try_idle) + musb->ops->try_idle(musb, timeout); +} + +static inline int musb_platform_get_vbus_status(struct musb *musb) +{ + if (!musb->ops->vbus_status) + return 0; + + return musb->ops->vbus_status(musb); +} + +static inline int musb_platform_init(struct musb *musb) +{ + if (!musb->ops->init) + return -EINVAL; + + return musb->ops->init(musb); +} + +static inline int musb_platform_exit(struct musb *musb) +{ + if (!musb->ops->exit) + return -EINVAL; + + return musb->ops->exit(musb); +} + +#ifdef __UBOOT__ +struct musb * +musb_init_controller(struct musb_hdrc_platform_data *plat, struct device *dev, + void *ctrl); +#endif +#endif /* __MUSB_CORE_H__ */ diff --git a/drivers/usb/musb-new/musb_debug.h b/drivers/usb/musb-new/musb_debug.h new file mode 100644 index 00000000000..27ba8f79946 --- /dev/null +++ b/drivers/usb/musb-new/musb_debug.h @@ -0,0 +1,58 @@ +/* + * MUSB OTG driver debug defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_LINUX_DEBUG_H__ +#define __MUSB_LINUX_DEBUG_H__ + +#define yprintk(facility, format, args...) \ + do { printk(facility "%s %d: " format , \ + __func__, __LINE__ , ## args); } while (0) +#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args) +#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args) +#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args) + +#ifdef CONFIG_DEBUG_FS +int musb_init_debugfs(struct musb *musb); +void musb_exit_debugfs(struct musb *musb); +#else +static inline int musb_init_debugfs(struct musb *musb) +{ + return 0; +} +static inline void musb_exit_debugfs(struct musb *musb) +{ +} +#endif + +#endif /* __MUSB_LINUX_DEBUG_H__ */ diff --git a/drivers/usb/musb-new/musb_dma.h b/drivers/usb/musb-new/musb_dma.h new file mode 100644 index 00000000000..3a97c4e2d4f --- /dev/null +++ b/drivers/usb/musb-new/musb_dma.h @@ -0,0 +1,186 @@ +/* + * MUSB OTG driver DMA controller abstraction + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_DMA_H__ +#define __MUSB_DMA_H__ + +struct musb_hw_ep; + +/* + * DMA Controller Abstraction + * + * DMA Controllers are abstracted to allow use of a variety of different + * implementations of DMA, as allowed by the Inventra USB cores. On the + * host side, usbcore sets up the DMA mappings and flushes caches; on the + * peripheral side, the gadget controller driver does. Responsibilities + * of a DMA controller driver include: + * + * - Handling the details of moving multiple USB packets + * in cooperation with the Inventra USB core, including especially + * the correct RX side treatment of short packets and buffer-full + * states (both of which terminate transfers). + * + * - Knowing the correlation between dma channels and the + * Inventra core's local endpoint resources and data direction. + * + * - Maintaining a list of allocated/available channels. + * + * - Updating channel status on interrupts, + * whether shared with the Inventra core or separate. + */ + +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + +#ifndef CONFIG_MUSB_PIO_ONLY +#define is_dma_capable() (1) +#else +#define is_dma_capable() (0) +#endif + +#ifdef CONFIG_USB_TI_CPPI_DMA +#define is_cppi_enabled() 1 +#else +#define is_cppi_enabled() 0 +#endif + +#ifdef CONFIG_USB_TUSB_OMAP_DMA +#define tusb_dma_omap() 1 +#else +#define tusb_dma_omap() 0 +#endif + +/* Anomaly 05000456 - USB Receive Interrupt Is Not Generated in DMA Mode 1 + * Only allow DMA mode 1 to be used when the USB will actually generate the + * interrupts we expect. + */ +#ifdef CONFIG_BLACKFIN +# undef USE_MODE1 +# if !ANOMALY_05000456 +# define USE_MODE1 +# endif +#endif + +/* + * DMA channel status ... updated by the dma controller driver whenever that + * status changes, and protected by the overall controller spinlock. + */ +enum dma_channel_status { + /* unallocated */ + MUSB_DMA_STATUS_UNKNOWN, + /* allocated ... but not busy, no errors */ + MUSB_DMA_STATUS_FREE, + /* busy ... transactions are active */ + MUSB_DMA_STATUS_BUSY, + /* transaction(s) aborted due to ... dma or memory bus error */ + MUSB_DMA_STATUS_BUS_ABORT, + /* transaction(s) aborted due to ... core error or USB fault */ + MUSB_DMA_STATUS_CORE_ABORT +}; + +struct dma_controller; + +/** + * struct dma_channel - A DMA channel. + * @private_data: channel-private data + * @max_len: the maximum number of bytes the channel can move in one + * transaction (typically representing many USB maximum-sized packets) + * @actual_len: how many bytes have been transferred + * @status: current channel status (updated e.g. on interrupt) + * @desired_mode: true if mode 1 is desired; false if mode 0 is desired + * + * channels are associated with an endpoint for the duration of at least + * one usb transfer. + */ +struct dma_channel { + void *private_data; + /* FIXME not void* private_data, but a dma_controller * */ + size_t max_len; + size_t actual_len; + enum dma_channel_status status; + bool desired_mode; +}; + +/* + * dma_channel_status - return status of dma channel + * @c: the channel + * + * Returns the software's view of the channel status. If that status is BUSY + * then it's possible that the hardware has completed (or aborted) a transfer, + * so the driver needs to update that status. + */ +static inline enum dma_channel_status +dma_channel_status(struct dma_channel *c) +{ + return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN; +} + +/** + * struct dma_controller - A DMA Controller. + * @start: call this to start a DMA controller; + * return 0 on success, else negative errno + * @stop: call this to stop a DMA controller + * return 0 on success, else negative errno + * @channel_alloc: call this to allocate a DMA channel + * @channel_release: call this to release a DMA channel + * @channel_abort: call this to abort a pending DMA transaction, + * returning it to FREE (but allocated) state + * + * Controllers manage dma channels. + */ +struct dma_controller { + int (*start)(struct dma_controller *); + int (*stop)(struct dma_controller *); + struct dma_channel *(*channel_alloc)(struct dma_controller *, + struct musb_hw_ep *, u8 is_tx); + void (*channel_release)(struct dma_channel *); + int (*channel_program)(struct dma_channel *channel, + u16 maxpacket, u8 mode, + dma_addr_t dma_addr, + u32 length); + int (*channel_abort)(struct dma_channel *); + int (*is_compatible)(struct dma_channel *channel, + u16 maxpacket, + void *buf, u32 length); +}; + +/* called after channel_program(), may indicate a fault */ +extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit); + + +extern struct dma_controller *__init +dma_controller_create(struct musb *, void __iomem *); + +extern void dma_controller_destroy(struct dma_controller *); + +#endif /* __MUSB_DMA_H__ */ diff --git a/drivers/usb/musb-new/musb_dsps.c b/drivers/usb/musb-new/musb_dsps.c new file mode 100644 index 00000000000..9a03917e87d --- /dev/null +++ b/drivers/usb/musb-new/musb_dsps.c @@ -0,0 +1,771 @@ +/* + * Texas Instruments DSPS platforms "glue layer" + * + * Copyright (C) 2012, by Texas Instruments + * + * Based on the am35x "glue layer" code. + * + * This file is part of the Inventra Controller Driver for Linux. + * + * The Inventra Controller Driver for Linux is free software; you + * can redistribute it and/or modify it under the terms of the GNU + * General Public License version 2 as published by the Free Software + * Foundation. + * + * The Inventra Controller Driver for Linux is distributed in + * the hope that it will be useful, but WITHOUT ANY WARRANTY; + * without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + * License for more details. + * + * You should have received a copy of the GNU General Public License + * along with The Inventra Controller Driver for Linux ; if not, + * write to the Free Software Foundation, Inc., 59 Temple Place, + * Suite 330, Boston, MA 02111-1307 USA + * + * musb_dsps.c will be a common file for all the TI DSPS platforms + * such as dm64x, dm36x, dm35x, da8x, am35x and ti81x. + * For now only ti81x is using this and in future davinci.c, am35x.c + * da8xx.c would be merged to this file after testing. + */ + +#define __UBOOT__ +#ifndef __UBOOT__ +#include <linux/init.h> +#include <linux/io.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <linux/module.h> + +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> + +#include <plat/usb.h> +#else +#include <common.h> +#include <asm/omap_musb.h> +#include "linux-compat.h" +#endif + +#include "musb_core.h" + +/** + * avoid using musb_readx()/musb_writex() as glue layer should not be + * dependent on musb core layer symbols. + */ +static inline u8 dsps_readb(const void __iomem *addr, unsigned offset) + { return __raw_readb(addr + offset); } + +static inline u32 dsps_readl(const void __iomem *addr, unsigned offset) + { return __raw_readl(addr + offset); } + +static inline void dsps_writeb(void __iomem *addr, unsigned offset, u8 data) + { __raw_writeb(data, addr + offset); } + +static inline void dsps_writel(void __iomem *addr, unsigned offset, u32 data) + { __raw_writel(data, addr + offset); } + +/** + * DSPS musb wrapper register offset. + * FIXME: This should be expanded to have all the wrapper registers from TI DSPS + * musb ips. + */ +struct dsps_musb_wrapper { + u16 revision; + u16 control; + u16 status; + u16 eoi; + u16 epintr_set; + u16 epintr_clear; + u16 epintr_status; + u16 coreintr_set; + u16 coreintr_clear; + u16 coreintr_status; + u16 phy_utmi; + u16 mode; + + /* bit positions for control */ + unsigned reset:5; + + /* bit positions for interrupt */ + unsigned usb_shift:5; + u32 usb_mask; + u32 usb_bitmap; + unsigned drvvbus:5; + + unsigned txep_shift:5; + u32 txep_mask; + u32 txep_bitmap; + + unsigned rxep_shift:5; + u32 rxep_mask; + u32 rxep_bitmap; + + /* bit positions for phy_utmi */ + unsigned otg_disable:5; + + /* bit positions for mode */ + unsigned iddig:5; + /* miscellaneous stuff */ + u32 musb_core_offset; + u8 poll_seconds; +}; + +static const struct dsps_musb_wrapper ti81xx_driver_data __devinitconst = { + .revision = 0x00, + .control = 0x14, + .status = 0x18, + .eoi = 0x24, + .epintr_set = 0x38, + .epintr_clear = 0x40, + .epintr_status = 0x30, + .coreintr_set = 0x3c, + .coreintr_clear = 0x44, + .coreintr_status = 0x34, + .phy_utmi = 0xe0, + .mode = 0xe8, + .reset = 0, + .otg_disable = 21, + .iddig = 8, + .usb_shift = 0, + .usb_mask = 0x1ff, + .usb_bitmap = (0x1ff << 0), + .drvvbus = 8, + .txep_shift = 0, + .txep_mask = 0xffff, + .txep_bitmap = (0xffff << 0), + .rxep_shift = 16, + .rxep_mask = 0xfffe, + .rxep_bitmap = (0xfffe << 16), + .musb_core_offset = 0x400, + .poll_seconds = 2, +}; + +/** + * DSPS glue structure. + */ +struct dsps_glue { + struct device *dev; + struct platform_device *musb; /* child musb pdev */ + const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */ + struct timer_list timer; /* otg_workaround timer */ +}; + +/** + * dsps_musb_enable - enable interrupts + */ +static void dsps_musb_enable(struct musb *musb) +{ +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct platform_device *pdev = to_platform_device(dev->parent); + struct dsps_glue *glue = platform_get_drvdata(pdev); + const struct dsps_musb_wrapper *wrp = glue->wrp; +#else + const struct dsps_musb_wrapper *wrp = &ti81xx_driver_data; +#endif + void __iomem *reg_base = musb->ctrl_base; + u32 epmask, coremask; + + /* Workaround: setup IRQs through both register sets. */ + epmask = ((musb->epmask & wrp->txep_mask) << wrp->txep_shift) | + ((musb->epmask & wrp->rxep_mask) << wrp->rxep_shift); + coremask = (wrp->usb_bitmap & ~MUSB_INTR_SOF); + + dsps_writel(reg_base, wrp->epintr_set, epmask); + dsps_writel(reg_base, wrp->coreintr_set, coremask); + /* Force the DRVVBUS IRQ so we can start polling for ID change. */ +#ifndef __UBOOT__ + if (is_otg_enabled(musb)) + dsps_writel(reg_base, wrp->coreintr_set, + (1 << wrp->drvvbus) << wrp->usb_shift); +#endif +} + +/** + * dsps_musb_disable - disable HDRC and flush interrupts + */ +static void dsps_musb_disable(struct musb *musb) +{ +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct platform_device *pdev = to_platform_device(dev->parent); + struct dsps_glue *glue = platform_get_drvdata(pdev); + const struct dsps_musb_wrapper *wrp = glue->wrp; + void __iomem *reg_base = musb->ctrl_base; + + dsps_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); + dsps_writel(reg_base, wrp->epintr_clear, + wrp->txep_bitmap | wrp->rxep_bitmap); + dsps_writeb(musb->mregs, MUSB_DEVCTL, 0); + dsps_writel(reg_base, wrp->eoi, 0); +#endif +} + +#ifndef __UBOOT__ +static void otg_timer(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + void __iomem *mregs = musb->mregs; + struct device *dev = musb->controller; + struct platform_device *pdev = to_platform_device(dev->parent); + struct dsps_glue *glue = platform_get_drvdata(pdev); + const struct dsps_musb_wrapper *wrp = glue->wrp; + u8 devctl; + unsigned long flags; + + /* + * We poll because DSPS IP's won't expose several OTG-critical + * status change events (from the transceiver) otherwise. + */ + devctl = dsps_readb(mregs, MUSB_DEVCTL); + dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, + otg_state_string(musb->xceiv->state)); + + spin_lock_irqsave(&musb->lock, flags); + switch (musb->xceiv->state) { + case OTG_STATE_A_WAIT_BCON: + devctl &= ~MUSB_DEVCTL_SESSION; + dsps_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + devctl = dsps_readb(musb->mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) { + musb->xceiv->state = OTG_STATE_B_IDLE; + MUSB_DEV_MODE(musb); + } else { + musb->xceiv->state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + } + break; + case OTG_STATE_A_WAIT_VFALL: + musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; + dsps_writel(musb->ctrl_base, wrp->coreintr_set, + MUSB_INTR_VBUSERROR << wrp->usb_shift); + break; + case OTG_STATE_B_IDLE: + if (!is_peripheral_enabled(musb)) + break; + + devctl = dsps_readb(mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) + mod_timer(&glue->timer, + jiffies + wrp->poll_seconds * HZ); + else + musb->xceiv->state = OTG_STATE_A_IDLE; + break; + default: + break; + } + spin_unlock_irqrestore(&musb->lock, flags); +} + +static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout) +{ + struct device *dev = musb->controller; + struct platform_device *pdev = to_platform_device(dev->parent); + struct dsps_glue *glue = platform_get_drvdata(pdev); + static unsigned long last_timer; + + if (!is_otg_enabled(musb)) + return; + + if (timeout == 0) + timeout = jiffies + msecs_to_jiffies(3); + + /* Never idle if active, or when VBUS timeout is not set as host */ + if (musb->is_active || (musb->a_wait_bcon == 0 && + musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { + dev_dbg(musb->controller, "%s active, deleting timer\n", + otg_state_string(musb->xceiv->state)); + del_timer(&glue->timer); + last_timer = jiffies; + return; + } + + if (time_after(last_timer, timeout) && timer_pending(&glue->timer)) { + dev_dbg(musb->controller, + "Longer idle timer already pending, ignoring...\n"); + return; + } + last_timer = timeout; + + dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", + otg_state_string(musb->xceiv->state), + jiffies_to_msecs(timeout - jiffies)); + mod_timer(&glue->timer, timeout); +} +#endif + +static irqreturn_t dsps_interrupt(int irq, void *hci) +{ + struct musb *musb = hci; + void __iomem *reg_base = musb->ctrl_base; +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct platform_device *pdev = to_platform_device(dev->parent); + struct dsps_glue *glue = platform_get_drvdata(pdev); + const struct dsps_musb_wrapper *wrp = glue->wrp; +#else + const struct dsps_musb_wrapper *wrp = &ti81xx_driver_data; +#endif + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + u32 epintr, usbintr; + + spin_lock_irqsave(&musb->lock, flags); + + /* Get endpoint interrupts */ + epintr = dsps_readl(reg_base, wrp->epintr_status); + musb->int_rx = (epintr & wrp->rxep_bitmap) >> wrp->rxep_shift; + musb->int_tx = (epintr & wrp->txep_bitmap) >> wrp->txep_shift; + + if (epintr) + dsps_writel(reg_base, wrp->epintr_status, epintr); + + /* Get usb core interrupts */ + usbintr = dsps_readl(reg_base, wrp->coreintr_status); + if (!usbintr && !epintr) + goto eoi; + + musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift; + if (usbintr) + dsps_writel(reg_base, wrp->coreintr_status, usbintr); + + dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n", + usbintr, epintr); +#ifndef __UBOOT__ + /* + * DRVVBUS IRQs are the only proxy we have (a very poor one!) for + * DSPS IP's missing ID change IRQ. We need an ID change IRQ to + * switch appropriately between halves of the OTG state machine. + * Managing DEVCTL.SESSION per Mentor docs requires that we know its + * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. + * Also, DRVVBUS pulses for SRP (but not at 5V) ... + */ + if ((usbintr & MUSB_INTR_BABBLE) && is_host_enabled(musb)) + pr_info("CAUTION: musb: Babble Interrupt Occured\n"); + + if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) { + int drvvbus = dsps_readl(reg_base, wrp->status); + void __iomem *mregs = musb->mregs; + u8 devctl = dsps_readb(mregs, MUSB_DEVCTL); + int err; + + err = is_host_enabled(musb) && (musb->int_usb & + MUSB_INTR_VBUSERROR); + if (err) { + /* + * The Mentor core doesn't debounce VBUS as needed + * to cope with device connect current spikes. This + * means it's not uncommon for bus-powered devices + * to get VBUS errors during enumeration. + * + * This is a workaround, but newer RTL from Mentor + * seems to allow a better one: "re"-starting sessions + * without waiting for VBUS to stop registering in + * devctl. + */ + musb->int_usb &= ~MUSB_INTR_VBUSERROR; + musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; + mod_timer(&glue->timer, + jiffies + wrp->poll_seconds * HZ); + WARNING("VBUS error workaround (delay coming)\n"); + } else if (is_host_enabled(musb) && drvvbus) { + musb->is_active = 1; + MUSB_HST_MODE(musb); + musb->xceiv->otg->default_a = 1; + musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; + del_timer(&glue->timer); + } else { + musb->is_active = 0; + MUSB_DEV_MODE(musb); + musb->xceiv->otg->default_a = 0; + musb->xceiv->state = OTG_STATE_B_IDLE; + } + + /* NOTE: this must complete power-on within 100 ms. */ + dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", + drvvbus ? "on" : "off", + otg_state_string(musb->xceiv->state), + err ? " ERROR" : "", + devctl); + ret = IRQ_HANDLED; + } +#endif + + if (musb->int_tx || musb->int_rx || musb->int_usb) + ret |= musb_interrupt(musb); + + eoi: + /* EOI needs to be written for the IRQ to be re-asserted. */ + if (ret == IRQ_HANDLED || epintr || usbintr) + dsps_writel(reg_base, wrp->eoi, 1); + +#ifndef __UBOOT__ + /* Poll for ID change */ + if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) + mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ); +#endif + + spin_unlock_irqrestore(&musb->lock, flags); + + return ret; +} + +static int dsps_musb_init(struct musb *musb) +{ +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct platform_device *pdev = to_platform_device(dev->parent); + struct dsps_glue *glue = platform_get_drvdata(pdev); + const struct dsps_musb_wrapper *wrp = glue->wrp; + struct omap_musb_board_data *data = plat->board_data; +#else + struct omap_musb_board_data *data = + (struct omap_musb_board_data *)musb->controller; + const struct dsps_musb_wrapper *wrp = &ti81xx_driver_data; +#endif + void __iomem *reg_base = musb->ctrl_base; + u32 rev, val; + int status; + + /* mentor core register starts at offset of 0x400 from musb base */ + musb->mregs += wrp->musb_core_offset; + +#ifndef __UBOOT__ + /* NOP driver needs change if supporting dual instance */ + usb_nop_xceiv_register(); + musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); + if (IS_ERR_OR_NULL(musb->xceiv)) + return -ENODEV; +#endif + + /* Returns zero if e.g. not clocked */ + rev = dsps_readl(reg_base, wrp->revision); + if (!rev) { + status = -ENODEV; + goto err0; + } + +#ifndef __UBOOT__ + if (is_host_enabled(musb)) + setup_timer(&glue->timer, otg_timer, (unsigned long) musb); +#endif + + /* Reset the musb */ + dsps_writel(reg_base, wrp->control, (1 << wrp->reset)); + + /* Start the on-chip PHY and its PLL. */ + if (data->set_phy_power) + data->set_phy_power(1); + + musb->isr = dsps_interrupt; + + /* reset the otgdisable bit, needed for host mode to work */ + val = dsps_readl(reg_base, wrp->phy_utmi); + val &= ~(1 << wrp->otg_disable); + dsps_writel(musb->ctrl_base, wrp->phy_utmi, val); + + /* clear level interrupt */ + dsps_writel(reg_base, wrp->eoi, 0); + + return 0; +err0: +#ifndef __UBOOT__ + usb_put_phy(musb->xceiv); + usb_nop_xceiv_unregister(); +#endif + return status; +} + +static int dsps_musb_exit(struct musb *musb) +{ +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; + struct platform_device *pdev = to_platform_device(dev->parent); + struct dsps_glue *glue = platform_get_drvdata(pdev); +#else + struct omap_musb_board_data *data = + (struct omap_musb_board_data *)musb->controller; +#endif + +#ifndef __UBOOT__ + if (is_host_enabled(musb)) + del_timer_sync(&glue->timer); +#endif + + /* Shutdown the on-chip PHY and its PLL. */ + if (data->set_phy_power) + data->set_phy_power(0); + +#ifndef __UBOOT__ + /* NOP driver needs change if supporting dual instance */ + usb_put_phy(musb->xceiv); + usb_nop_xceiv_unregister(); +#endif + + return 0; +} + +#ifndef __UBOOT__ +static struct musb_platform_ops dsps_ops = { +#else +struct musb_platform_ops musb_dsps_ops = { +#endif + .init = dsps_musb_init, + .exit = dsps_musb_exit, + + .enable = dsps_musb_enable, + .disable = dsps_musb_disable, + +#ifndef __UBOOT__ + .try_idle = dsps_musb_try_idle, +#endif +}; + +#ifndef __UBOOT__ +static u64 musb_dmamask = DMA_BIT_MASK(32); +#endif + +#ifndef __UBOOT__ +static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id) +{ + struct device *dev = glue->dev; + struct platform_device *pdev = to_platform_device(dev); + struct musb_hdrc_platform_data *pdata = dev->platform_data; + struct platform_device *musb; + struct resource *res; + struct resource resources[2]; + char res_name[10]; + int ret; + + /* get memory resource */ + sprintf(res_name, "musb%d", id); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); + if (!res) { + dev_err(dev, "%s get mem resource failed\n", res_name); + ret = -ENODEV; + goto err0; + } + res->parent = NULL; + resources[0] = *res; + + /* get irq resource */ + sprintf(res_name, "musb%d-irq", id); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); + if (!res) { + dev_err(dev, "%s get irq resource failed\n", res_name); + ret = -ENODEV; + goto err0; + } + res->parent = NULL; + resources[1] = *res; + resources[1].name = "mc"; + + /* allocate the child platform device */ + musb = platform_device_alloc("musb-hdrc", -1); + if (!musb) { + dev_err(dev, "failed to allocate musb device\n"); + ret = -ENOMEM; + goto err0; + } + + musb->dev.parent = dev; + musb->dev.dma_mask = &musb_dmamask; + musb->dev.coherent_dma_mask = musb_dmamask; + + glue->musb = musb; + + pdata->platform_ops = &dsps_ops; + + ret = platform_device_add_resources(musb, resources, 2); + if (ret) { + dev_err(dev, "failed to add resources\n"); + goto err1; + } + + ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); + if (ret) { + dev_err(dev, "failed to add platform_data\n"); + goto err1; + } + + ret = platform_device_add(musb); + if (ret) { + dev_err(dev, "failed to register musb device\n"); + goto err1; + } + + return 0; + +err1: + platform_device_put(musb); +err0: + return ret; +} + +static void __devexit dsps_delete_musb_pdev(struct dsps_glue *glue) +{ + platform_device_del(glue->musb); + platform_device_put(glue->musb); +} + +static int __devinit dsps_probe(struct platform_device *pdev) +{ + const struct platform_device_id *id = platform_get_device_id(pdev); + const struct dsps_musb_wrapper *wrp = + (struct dsps_musb_wrapper *)id->driver_data; + struct dsps_glue *glue; + struct resource *iomem; + int ret; + + /* allocate glue */ + glue = kzalloc(sizeof(*glue), GFP_KERNEL); + if (!glue) { + dev_err(&pdev->dev, "unable to allocate glue memory\n"); + ret = -ENOMEM; + goto err0; + } + + /* get memory resource */ + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iomem) { + dev_err(&pdev->dev, "failed to get usbss mem resourse\n"); + ret = -ENODEV; + goto err1; + } + + glue->dev = &pdev->dev; + + glue->wrp = kmemdup(wrp, sizeof(*wrp), GFP_KERNEL); + if (!glue->wrp) { + dev_err(&pdev->dev, "failed to duplicate wrapper struct memory\n"); + ret = -ENOMEM; + goto err1; + } + platform_set_drvdata(pdev, glue); + + /* enable the usbss clocks */ + pm_runtime_enable(&pdev->dev); + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); + goto err2; + } + + /* create the child platform device for first instances of musb */ + ret = dsps_create_musb_pdev(glue, 0); + if (ret != 0) { + dev_err(&pdev->dev, "failed to create child pdev\n"); + goto err3; + } + + return 0; + +err3: + pm_runtime_put(&pdev->dev); +err2: + pm_runtime_disable(&pdev->dev); + kfree(glue->wrp); +err1: + kfree(glue); +err0: + return ret; +} +static int __devexit dsps_remove(struct platform_device *pdev) +{ + struct dsps_glue *glue = platform_get_drvdata(pdev); + + /* delete the child platform device */ + dsps_delete_musb_pdev(glue); + + /* disable usbss clocks */ + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + kfree(glue->wrp); + kfree(glue); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int dsps_suspend(struct device *dev) +{ + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; + + /* Shutdown the on-chip PHY and its PLL. */ + if (data->set_phy_power) + data->set_phy_power(0); + + return 0; +} + +static int dsps_resume(struct device *dev) +{ + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; + + /* Start the on-chip PHY and its PLL. */ + if (data->set_phy_power) + data->set_phy_power(1); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(dsps_pm_ops, dsps_suspend, dsps_resume); +#endif + +#ifndef __UBOOT__ +static const struct platform_device_id musb_dsps_id_table[] __devinitconst = { + { + .name = "musb-ti81xx", + .driver_data = (kernel_ulong_t) &ti81xx_driver_data, + }, + { }, /* Terminating Entry */ +}; +MODULE_DEVICE_TABLE(platform, musb_dsps_id_table); + +static const struct of_device_id musb_dsps_of_match[] __devinitconst = { + { .compatible = "musb-ti81xx", }, + { .compatible = "ti,ti81xx-musb", }, + { .compatible = "ti,am335x-musb", }, + { }, +}; +MODULE_DEVICE_TABLE(of, musb_dsps_of_match); + +static struct platform_driver dsps_usbss_driver = { + .probe = dsps_probe, + .remove = __devexit_p(dsps_remove), + .driver = { + .name = "musb-dsps", + .pm = &dsps_pm_ops, + .of_match_table = musb_dsps_of_match, + }, + .id_table = musb_dsps_id_table, +}; + +MODULE_DESCRIPTION("TI DSPS MUSB Glue Layer"); +MODULE_AUTHOR("Ravi B <ravibabu@ti.com>"); +MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>"); +MODULE_LICENSE("GPL v2"); + +static int __init dsps_init(void) +{ + return platform_driver_register(&dsps_usbss_driver); +} +subsys_initcall(dsps_init); + +static void __exit dsps_exit(void) +{ + platform_driver_unregister(&dsps_usbss_driver); +} +module_exit(dsps_exit); +#endif diff --git a/drivers/usb/musb-new/musb_gadget.c b/drivers/usb/musb-new/musb_gadget.c new file mode 100644 index 00000000000..d2cb91a898f --- /dev/null +++ b/drivers/usb/musb-new/musb_gadget.c @@ -0,0 +1,2333 @@ +/* + * MUSB OTG driver peripheral support + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#define __UBOOT__ +#ifndef __UBOOT__ +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/timer.h> +#include <linux/module.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#else +#include <common.h> +#include <linux/usb/ch9.h> +#include "linux-compat.h" +#endif + +#include "musb_core.h" + + +/* MUSB PERIPHERAL status 3-mar-2006: + * + * - EP0 seems solid. It passes both USBCV and usbtest control cases. + * Minor glitches: + * + * + remote wakeup to Linux hosts work, but saw USBCV failures; + * in one test run (operator error?) + * + endpoint halt tests -- in both usbtest and usbcv -- seem + * to break when dma is enabled ... is something wrongly + * clearing SENDSTALL? + * + * - Mass storage behaved ok when last tested. Network traffic patterns + * (with lots of short transfers etc) need retesting; they turn up the + * worst cases of the DMA, since short packets are typical but are not + * required. + * + * - TX/IN + * + both pio and dma behave in with network and g_zero tests + * + no cppi throughput issues other than no-hw-queueing + * + failed with FLAT_REG (DaVinci) + * + seems to behave with double buffering, PIO -and- CPPI + * + with gadgetfs + AIO, requests got lost? + * + * - RX/OUT + * + both pio and dma behave in with network and g_zero tests + * + dma is slow in typical case (short_not_ok is clear) + * + double buffering ok with PIO + * + double buffering *FAILS* with CPPI, wrong data bytes sometimes + * + request lossage observed with gadgetfs + * + * - ISO not tested ... might work, but only weakly isochronous + * + * - Gadget driver disabling of softconnect during bind() is ignored; so + * drivers can't hold off host requests until userspace is ready. + * (Workaround: they can turn it off later.) + * + * - PORTABILITY (assumes PIO works): + * + DaVinci, basically works with cppi dma + * + OMAP 2430, ditto with mentor dma + * + TUSB 6010, platform-specific dma in the works + */ + +/* ----------------------------------------------------------------------- */ + +#define is_buffer_mapped(req) (is_dma_capable() && \ + (req->map_state != UN_MAPPED)) + +#ifndef CONFIG_MUSB_PIO_ONLY +/* Maps the buffer to dma */ + +static inline void map_dma_buffer(struct musb_request *request, + struct musb *musb, struct musb_ep *musb_ep) +{ + int compatible = true; + struct dma_controller *dma = musb->dma_controller; + + request->map_state = UN_MAPPED; + + if (!is_dma_capable() || !musb_ep->dma) + return; + + /* Check if DMA engine can handle this request. + * DMA code must reject the USB request explicitly. + * Default behaviour is to map the request. + */ + if (dma->is_compatible) + compatible = dma->is_compatible(musb_ep->dma, + musb_ep->packet_sz, request->request.buf, + request->request.length); + if (!compatible) + return; + + if (request->request.dma == DMA_ADDR_INVALID) { + request->request.dma = dma_map_single( + musb->controller, + request->request.buf, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->map_state = MUSB_MAPPED; + } else { + dma_sync_single_for_device(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->map_state = PRE_MAPPED; + } +} + +/* Unmap the buffer from dma and maps it back to cpu */ +static inline void unmap_dma_buffer(struct musb_request *request, + struct musb *musb) +{ + if (!is_buffer_mapped(request)) + return; + + if (request->request.dma == DMA_ADDR_INVALID) { + dev_vdbg(musb->controller, + "not unmapping a never mapped buffer\n"); + return; + } + if (request->map_state == MUSB_MAPPED) { + dma_unmap_single(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->request.dma = DMA_ADDR_INVALID; + } else { /* PRE_MAPPED */ + dma_sync_single_for_cpu(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + } + request->map_state = UN_MAPPED; +} +#else +static inline void map_dma_buffer(struct musb_request *request, + struct musb *musb, struct musb_ep *musb_ep) +{ +} + +static inline void unmap_dma_buffer(struct musb_request *request, + struct musb *musb) +{ +} +#endif + +/* + * Immediately complete a request. + * + * @param request the request to complete + * @param status the status to complete the request with + * Context: controller locked, IRQs blocked. + */ +void musb_g_giveback( + struct musb_ep *ep, + struct usb_request *request, + int status) +__releases(ep->musb->lock) +__acquires(ep->musb->lock) +{ + struct musb_request *req; + struct musb *musb; + int busy = ep->busy; + + req = to_musb_request(request); + + list_del(&req->list); + if (req->request.status == -EINPROGRESS) + req->request.status = status; + musb = req->musb; + + ep->busy = 1; + spin_unlock(&musb->lock); + unmap_dma_buffer(req, musb); + if (request->status == 0) + dev_dbg(musb->controller, "%s done request %p, %d/%d\n", + ep->end_point.name, request, + req->request.actual, req->request.length); + else + dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", + ep->end_point.name, request, + req->request.actual, req->request.length, + request->status); + req->request.complete(&req->ep->end_point, &req->request); + spin_lock(&musb->lock); + ep->busy = busy; +} + +/* ----------------------------------------------------------------------- */ + +/* + * Abort requests queued to an endpoint using the status. Synchronous. + * caller locked controller and blocked irqs, and selected this ep. + */ +static void nuke(struct musb_ep *ep, const int status) +{ + struct musb *musb = ep->musb; + struct musb_request *req = NULL; + void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; + + ep->busy = 1; + + if (is_dma_capable() && ep->dma) { + struct dma_controller *c = ep->musb->dma_controller; + int value; + + if (ep->is_in) { + /* + * The programming guide says that we must not clear + * the DMAMODE bit before DMAENAB, so we only + * clear it in the second write... + */ + musb_writew(epio, MUSB_TXCSR, + MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); + musb_writew(epio, MUSB_TXCSR, + 0 | MUSB_TXCSR_FLUSHFIFO); + } else { + musb_writew(epio, MUSB_RXCSR, + 0 | MUSB_RXCSR_FLUSHFIFO); + musb_writew(epio, MUSB_RXCSR, + 0 | MUSB_RXCSR_FLUSHFIFO); + } + + value = c->channel_abort(ep->dma); + dev_dbg(musb->controller, "%s: abort DMA --> %d\n", + ep->name, value); + c->channel_release(ep->dma); + ep->dma = NULL; + } + + while (!list_empty(&ep->req_list)) { + req = list_first_entry(&ep->req_list, struct musb_request, list); + musb_g_giveback(ep, &req->request, status); + } +} + +/* ----------------------------------------------------------------------- */ + +/* Data transfers - pure PIO, pure DMA, or mixed mode */ + +/* + * This assumes the separate CPPI engine is responding to DMA requests + * from the usb core ... sequenced a bit differently from mentor dma. + */ + +static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) +{ + if (can_bulk_split(musb, ep->type)) + return ep->hw_ep->max_packet_sz_tx; + else + return ep->packet_sz; +} + + +#ifdef CONFIG_USB_INVENTRA_DMA + +/* Peripheral tx (IN) using Mentor DMA works as follows: + Only mode 0 is used for transfers <= wPktSize, + mode 1 is used for larger transfers, + + One of the following happens: + - Host sends IN token which causes an endpoint interrupt + -> TxAvail + -> if DMA is currently busy, exit. + -> if queue is non-empty, txstate(). + + - Request is queued by the gadget driver. + -> if queue was previously empty, txstate() + + txstate() + -> start + /\ -> setup DMA + | (data is transferred to the FIFO, then sent out when + | IN token(s) are recd from Host. + | -> DMA interrupt on completion + | calls TxAvail. + | -> stop DMA, ~DMAENAB, + | -> set TxPktRdy for last short pkt or zlp + | -> Complete Request + | -> Continue next request (call txstate) + |___________________________________| + + * Non-Mentor DMA engines can of course work differently, such as by + * upleveling from irq-per-packet to irq-per-buffer. + */ + +#endif + +/* + * An endpoint is transmitting data. This can be called either from + * the IRQ routine or from ep.queue() to kickstart a request on an + * endpoint. + * + * Context: controller locked, IRQs blocked, endpoint selected + */ +static void txstate(struct musb *musb, struct musb_request *req) +{ + u8 epnum = req->epnum; + struct musb_ep *musb_ep; + void __iomem *epio = musb->endpoints[epnum].regs; + struct usb_request *request; + u16 fifo_count = 0, csr; + int use_dma = 0; + + musb_ep = req->ep; + + /* Check if EP is disabled */ + if (!musb_ep->desc) { + dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", + musb_ep->end_point.name); + return; + } + + /* we shouldn't get here while DMA is active ... but we do ... */ + if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { + dev_dbg(musb->controller, "dma pending...\n"); + return; + } + + /* read TXCSR before */ + csr = musb_readw(epio, MUSB_TXCSR); + + request = &req->request; + fifo_count = min(max_ep_writesize(musb, musb_ep), + (int)(request->length - request->actual)); + + if (csr & MUSB_TXCSR_TXPKTRDY) { + dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n", + musb_ep->end_point.name, csr); + return; + } + + if (csr & MUSB_TXCSR_P_SENDSTALL) { + dev_dbg(musb->controller, "%s stalling, txcsr %03x\n", + musb_ep->end_point.name, csr); + return; + } + + dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", + epnum, musb_ep->packet_sz, fifo_count, + csr); + +#ifndef CONFIG_MUSB_PIO_ONLY + if (is_buffer_mapped(req)) { + struct dma_controller *c = musb->dma_controller; + size_t request_size; + + /* setup DMA, then program endpoint CSR */ + request_size = min_t(size_t, request->length - request->actual, + musb_ep->dma->max_len); + + use_dma = (request->dma != DMA_ADDR_INVALID); + + /* MUSB_TXCSR_P_ISO is still set correctly */ + +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) + { + if (request_size < musb_ep->packet_sz) + musb_ep->dma->desired_mode = 0; + else + musb_ep->dma->desired_mode = 1; + + use_dma = use_dma && c->channel_program( + musb_ep->dma, musb_ep->packet_sz, + musb_ep->dma->desired_mode, + request->dma + request->actual, request_size); + if (use_dma) { + if (musb_ep->dma->desired_mode == 0) { + /* + * We must not clear the DMAMODE bit + * before the DMAENAB bit -- and the + * latter doesn't always get cleared + * before we get here... + */ + csr &= ~(MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAENAB); + musb_writew(epio, MUSB_TXCSR, csr + | MUSB_TXCSR_P_WZC_BITS); + csr &= ~MUSB_TXCSR_DMAMODE; + csr |= (MUSB_TXCSR_DMAENAB | + MUSB_TXCSR_MODE); + /* against programming guide */ + } else { + csr |= (MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_DMAMODE + | MUSB_TXCSR_MODE); + if (!musb_ep->hb_mult) + csr |= MUSB_TXCSR_AUTOSET; + } + csr &= ~MUSB_TXCSR_P_UNDERRUN; + + musb_writew(epio, MUSB_TXCSR, csr); + } + } + +#elif defined(CONFIG_USB_TI_CPPI_DMA) + /* program endpoint CSR first, then setup DMA */ + csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); + csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | + MUSB_TXCSR_MODE; + musb_writew(epio, MUSB_TXCSR, + (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) + | csr); + + /* ensure writebuffer is empty */ + csr = musb_readw(epio, MUSB_TXCSR); + + /* NOTE host side sets DMAENAB later than this; both are + * OK since the transfer dma glue (between CPPI and Mentor + * fifos) just tells CPPI it could start. Data only moves + * to the USB TX fifo when both fifos are ready. + */ + + /* "mode" is irrelevant here; handle terminating ZLPs like + * PIO does, since the hardware RNDIS mode seems unreliable + * except for the last-packet-is-already-short case. + */ + use_dma = use_dma && c->channel_program( + musb_ep->dma, musb_ep->packet_sz, + 0, + request->dma + request->actual, + request_size); + if (!use_dma) { + c->channel_release(musb_ep->dma); + musb_ep->dma = NULL; + csr &= ~MUSB_TXCSR_DMAENAB; + musb_writew(epio, MUSB_TXCSR, csr); + /* invariant: prequest->buf is non-null */ + } +#elif defined(CONFIG_USB_TUSB_OMAP_DMA) + use_dma = use_dma && c->channel_program( + musb_ep->dma, musb_ep->packet_sz, + request->zero, + request->dma + request->actual, + request_size); +#endif + } +#endif + + if (!use_dma) { + /* + * Unmap the dma buffer back to cpu if dma channel + * programming fails + */ + unmap_dma_buffer(req, musb); + + musb_write_fifo(musb_ep->hw_ep, fifo_count, + (u8 *) (request->buf + request->actual)); + request->actual += fifo_count; + csr |= MUSB_TXCSR_TXPKTRDY; + csr &= ~MUSB_TXCSR_P_UNDERRUN; + musb_writew(epio, MUSB_TXCSR, csr); + } + + /* host may already have the data when this message shows... */ + dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", + musb_ep->end_point.name, use_dma ? "dma" : "pio", + request->actual, request->length, + musb_readw(epio, MUSB_TXCSR), + fifo_count, + musb_readw(epio, MUSB_TXMAXP)); +} + +/* + * FIFO state update (e.g. data ready). + * Called from IRQ, with controller locked. + */ +void musb_g_tx(struct musb *musb, u8 epnum) +{ + u16 csr; + struct musb_request *req; + struct usb_request *request; + u8 __iomem *mbase = musb->mregs; + struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; + void __iomem *epio = musb->endpoints[epnum].regs; + struct dma_channel *dma; + + musb_ep_select(mbase, epnum); + req = next_request(musb_ep); + request = &req->request; + + csr = musb_readw(epio, MUSB_TXCSR); + dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); + + dma = is_dma_capable() ? musb_ep->dma : NULL; + + /* + * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX + * probably rates reporting as a host error. + */ + if (csr & MUSB_TXCSR_P_SENTSTALL) { + csr |= MUSB_TXCSR_P_WZC_BITS; + csr &= ~MUSB_TXCSR_P_SENTSTALL; + musb_writew(epio, MUSB_TXCSR, csr); + return; + } + + if (csr & MUSB_TXCSR_P_UNDERRUN) { + /* We NAKed, no big deal... little reason to care. */ + csr |= MUSB_TXCSR_P_WZC_BITS; + csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); + musb_writew(epio, MUSB_TXCSR, csr); + dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", + epnum, request); + } + + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + /* + * SHOULD NOT HAPPEN... has with CPPI though, after + * changing SENDSTALL (and other cases); harmless? + */ + dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name); + return; + } + + if (request) { + u8 is_dma = 0; + + if (dma && (csr & MUSB_TXCSR_DMAENAB)) { + is_dma = 1; + csr |= MUSB_TXCSR_P_WZC_BITS; + csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | + MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); + musb_writew(epio, MUSB_TXCSR, csr); + /* Ensure writebuffer is empty. */ + csr = musb_readw(epio, MUSB_TXCSR); + request->actual += musb_ep->dma->actual_len; + dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n", + epnum, csr, musb_ep->dma->actual_len, request); + } + + /* + * First, maybe a terminating short packet. Some DMA + * engines might handle this by themselves. + */ + if ((request->zero && request->length + && (request->length % musb_ep->packet_sz == 0) + && (request->actual == request->length)) +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) + || (is_dma && (!dma->desired_mode || + (request->actual & + (musb_ep->packet_sz - 1)))) +#endif + ) { + /* + * On DMA completion, FIFO may not be + * available yet... + */ + if (csr & MUSB_TXCSR_TXPKTRDY) + return; + + dev_dbg(musb->controller, "sending zero pkt\n"); + musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE + | MUSB_TXCSR_TXPKTRDY); + request->zero = 0; + } + + if (request->actual == request->length) { + musb_g_giveback(musb_ep, request, 0); + /* + * In the giveback function the MUSB lock is + * released and acquired after sometime. During + * this time period the INDEX register could get + * changed by the gadget_queue function especially + * on SMP systems. Reselect the INDEX to be sure + * we are reading/modifying the right registers + */ + musb_ep_select(mbase, epnum); + req = musb_ep->desc ? next_request(musb_ep) : NULL; + if (!req) { + dev_dbg(musb->controller, "%s idle now\n", + musb_ep->end_point.name); + return; + } + } + + txstate(musb, req); + } +} + +/* ------------------------------------------------------------ */ + +#ifdef CONFIG_USB_INVENTRA_DMA + +/* Peripheral rx (OUT) using Mentor DMA works as follows: + - Only mode 0 is used. + + - Request is queued by the gadget class driver. + -> if queue was previously empty, rxstate() + + - Host sends OUT token which causes an endpoint interrupt + /\ -> RxReady + | -> if request queued, call rxstate + | /\ -> setup DMA + | | -> DMA interrupt on completion + | | -> RxReady + | | -> stop DMA + | | -> ack the read + | | -> if data recd = max expected + | | by the request, or host + | | sent a short packet, + | | complete the request, + | | and start the next one. + | |_____________________________________| + | else just wait for the host + | to send the next OUT token. + |__________________________________________________| + + * Non-Mentor DMA engines can of course work differently. + */ + +#endif + +/* + * Context: controller locked, IRQs blocked, endpoint selected + */ +static void rxstate(struct musb *musb, struct musb_request *req) +{ + const u8 epnum = req->epnum; + struct usb_request *request = &req->request; + struct musb_ep *musb_ep; + void __iomem *epio = musb->endpoints[epnum].regs; + unsigned fifo_count = 0; + u16 len; + u16 csr = musb_readw(epio, MUSB_RXCSR); + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; + u8 use_mode_1; + + if (hw_ep->is_shared_fifo) + musb_ep = &hw_ep->ep_in; + else + musb_ep = &hw_ep->ep_out; + + len = musb_ep->packet_sz; + + /* Check if EP is disabled */ + if (!musb_ep->desc) { + dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", + musb_ep->end_point.name); + return; + } + + /* We shouldn't get here while DMA is active, but we do... */ + if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { + dev_dbg(musb->controller, "DMA pending...\n"); + return; + } + + if (csr & MUSB_RXCSR_P_SENDSTALL) { + dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n", + musb_ep->end_point.name, csr); + return; + } + + if (is_cppi_enabled() && is_buffer_mapped(req)) { + struct dma_controller *c = musb->dma_controller; + struct dma_channel *channel = musb_ep->dma; + + /* NOTE: CPPI won't actually stop advancing the DMA + * queue after short packet transfers, so this is almost + * always going to run as IRQ-per-packet DMA so that + * faults will be handled correctly. + */ + if (c->channel_program(channel, + musb_ep->packet_sz, + !request->short_not_ok, + request->dma + request->actual, + request->length - request->actual)) { + + /* make sure that if an rxpkt arrived after the irq, + * the cppi engine will be ready to take it as soon + * as DMA is enabled + */ + csr &= ~(MUSB_RXCSR_AUTOCLEAR + | MUSB_RXCSR_DMAMODE); + csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; + musb_writew(epio, MUSB_RXCSR, csr); + return; + } + } + + if (csr & MUSB_RXCSR_RXPKTRDY) { + len = musb_readw(epio, MUSB_RXCOUNT); + + /* + * Enable Mode 1 on RX transfers only when short_not_ok flag + * is set. Currently short_not_ok flag is set only from + * file_storage and f_mass_storage drivers + */ + + if (request->short_not_ok && len == musb_ep->packet_sz) + use_mode_1 = 1; + else + use_mode_1 = 0; + + if (request->actual < request->length) { +#ifdef CONFIG_USB_INVENTRA_DMA + if (is_buffer_mapped(req)) { + struct dma_controller *c; + struct dma_channel *channel; + int use_dma = 0; + + c = musb->dma_controller; + channel = musb_ep->dma; + + /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in + * mode 0 only. So we do not get endpoint interrupts due to DMA + * completion. We only get interrupts from DMA controller. + * + * We could operate in DMA mode 1 if we knew the size of the tranfer + * in advance. For mass storage class, request->length = what the host + * sends, so that'd work. But for pretty much everything else, + * request->length is routinely more than what the host sends. For + * most these gadgets, end of is signified either by a short packet, + * or filling the last byte of the buffer. (Sending extra data in + * that last pckate should trigger an overflow fault.) But in mode 1, + * we don't get DMA completion interrupt for short packets. + * + * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), + * to get endpoint interrupt on every DMA req, but that didn't seem + * to work reliably. + * + * REVISIT an updated g_file_storage can set req->short_not_ok, which + * then becomes usable as a runtime "use mode 1" hint... + */ + + /* Experimental: Mode1 works with mass storage use cases */ + if (use_mode_1) { + csr |= MUSB_RXCSR_AUTOCLEAR; + musb_writew(epio, MUSB_RXCSR, csr); + csr |= MUSB_RXCSR_DMAENAB; + musb_writew(epio, MUSB_RXCSR, csr); + + /* + * this special sequence (enabling and then + * disabling MUSB_RXCSR_DMAMODE) is required + * to get DMAReq to activate + */ + musb_writew(epio, MUSB_RXCSR, + csr | MUSB_RXCSR_DMAMODE); + musb_writew(epio, MUSB_RXCSR, csr); + + } else { + if (!musb_ep->hb_mult && + musb_ep->hw_ep->rx_double_buffered) + csr |= MUSB_RXCSR_AUTOCLEAR; + csr |= MUSB_RXCSR_DMAENAB; + musb_writew(epio, MUSB_RXCSR, csr); + } + + if (request->actual < request->length) { + int transfer_size = 0; + if (use_mode_1) { + transfer_size = min(request->length - request->actual, + channel->max_len); + musb_ep->dma->desired_mode = 1; + } else { + transfer_size = min(request->length - request->actual, + (unsigned)len); + musb_ep->dma->desired_mode = 0; + } + + use_dma = c->channel_program( + channel, + musb_ep->packet_sz, + channel->desired_mode, + request->dma + + request->actual, + transfer_size); + } + + if (use_dma) + return; + } +#elif defined(CONFIG_USB_UX500_DMA) + if ((is_buffer_mapped(req)) && + (request->actual < request->length)) { + + struct dma_controller *c; + struct dma_channel *channel; + int transfer_size = 0; + + c = musb->dma_controller; + channel = musb_ep->dma; + + /* In case first packet is short */ + if (len < musb_ep->packet_sz) + transfer_size = len; + else if (request->short_not_ok) + transfer_size = min(request->length - + request->actual, + channel->max_len); + else + transfer_size = min(request->length - + request->actual, + (unsigned)len); + + csr &= ~MUSB_RXCSR_DMAMODE; + csr |= (MUSB_RXCSR_DMAENAB | + MUSB_RXCSR_AUTOCLEAR); + + musb_writew(epio, MUSB_RXCSR, csr); + + if (transfer_size <= musb_ep->packet_sz) { + musb_ep->dma->desired_mode = 0; + } else { + musb_ep->dma->desired_mode = 1; + /* Mode must be set after DMAENAB */ + csr |= MUSB_RXCSR_DMAMODE; + musb_writew(epio, MUSB_RXCSR, csr); + } + + if (c->channel_program(channel, + musb_ep->packet_sz, + channel->desired_mode, + request->dma + + request->actual, + transfer_size)) + + return; + } +#endif /* Mentor's DMA */ + + fifo_count = request->length - request->actual; + dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", + musb_ep->end_point.name, + len, fifo_count, + musb_ep->packet_sz); + + fifo_count = min_t(unsigned, len, fifo_count); + +#ifdef CONFIG_USB_TUSB_OMAP_DMA + if (tusb_dma_omap() && is_buffer_mapped(req)) { + struct dma_controller *c = musb->dma_controller; + struct dma_channel *channel = musb_ep->dma; + u32 dma_addr = request->dma + request->actual; + int ret; + + ret = c->channel_program(channel, + musb_ep->packet_sz, + channel->desired_mode, + dma_addr, + fifo_count); + if (ret) + return; + } +#endif + /* + * Unmap the dma buffer back to cpu if dma channel + * programming fails. This buffer is mapped if the + * channel allocation is successful + */ + if (is_buffer_mapped(req)) { + unmap_dma_buffer(req, musb); + + /* + * Clear DMAENAB and AUTOCLEAR for the + * PIO mode transfer + */ + csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); + musb_writew(epio, MUSB_RXCSR, csr); + } + + musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) + (request->buf + request->actual)); + request->actual += fifo_count; + + /* REVISIT if we left anything in the fifo, flush + * it and report -EOVERFLOW + */ + + /* ack the read! */ + csr |= MUSB_RXCSR_P_WZC_BITS; + csr &= ~MUSB_RXCSR_RXPKTRDY; + musb_writew(epio, MUSB_RXCSR, csr); + } + } + + /* reach the end or short packet detected */ + if (request->actual == request->length || len < musb_ep->packet_sz) + musb_g_giveback(musb_ep, request, 0); +} + +/* + * Data ready for a request; called from IRQ + */ +void musb_g_rx(struct musb *musb, u8 epnum) +{ + u16 csr; + struct musb_request *req; + struct usb_request *request; + void __iomem *mbase = musb->mregs; + struct musb_ep *musb_ep; + void __iomem *epio = musb->endpoints[epnum].regs; + struct dma_channel *dma; + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; + + if (hw_ep->is_shared_fifo) + musb_ep = &hw_ep->ep_in; + else + musb_ep = &hw_ep->ep_out; + + musb_ep_select(mbase, epnum); + + req = next_request(musb_ep); + if (!req) + return; + + request = &req->request; + + csr = musb_readw(epio, MUSB_RXCSR); + dma = is_dma_capable() ? musb_ep->dma : NULL; + + dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, + csr, dma ? " (dma)" : "", request); + + if (csr & MUSB_RXCSR_P_SENTSTALL) { + csr |= MUSB_RXCSR_P_WZC_BITS; + csr &= ~MUSB_RXCSR_P_SENTSTALL; + musb_writew(epio, MUSB_RXCSR, csr); + return; + } + + if (csr & MUSB_RXCSR_P_OVERRUN) { + /* csr |= MUSB_RXCSR_P_WZC_BITS; */ + csr &= ~MUSB_RXCSR_P_OVERRUN; + musb_writew(epio, MUSB_RXCSR, csr); + + dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request); + if (request->status == -EINPROGRESS) + request->status = -EOVERFLOW; + } + if (csr & MUSB_RXCSR_INCOMPRX) { + /* REVISIT not necessarily an error */ + dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name); + } + + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + /* "should not happen"; likely RXPKTRDY pending for DMA */ + dev_dbg(musb->controller, "%s busy, csr %04x\n", + musb_ep->end_point.name, csr); + return; + } + + if (dma && (csr & MUSB_RXCSR_DMAENAB)) { + csr &= ~(MUSB_RXCSR_AUTOCLEAR + | MUSB_RXCSR_DMAENAB + | MUSB_RXCSR_DMAMODE); + musb_writew(epio, MUSB_RXCSR, + MUSB_RXCSR_P_WZC_BITS | csr); + + request->actual += musb_ep->dma->actual_len; + + dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", + epnum, csr, + musb_readw(epio, MUSB_RXCSR), + musb_ep->dma->actual_len, request); + +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ + defined(CONFIG_USB_UX500_DMA) + /* Autoclear doesn't clear RxPktRdy for short packets */ + if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) + || (dma->actual_len + & (musb_ep->packet_sz - 1))) { + /* ack the read! */ + csr &= ~MUSB_RXCSR_RXPKTRDY; + musb_writew(epio, MUSB_RXCSR, csr); + } + + /* incomplete, and not short? wait for next IN packet */ + if ((request->actual < request->length) + && (musb_ep->dma->actual_len + == musb_ep->packet_sz)) { + /* In double buffer case, continue to unload fifo if + * there is Rx packet in FIFO. + **/ + csr = musb_readw(epio, MUSB_RXCSR); + if ((csr & MUSB_RXCSR_RXPKTRDY) && + hw_ep->rx_double_buffered) + goto exit; + return; + } +#endif + musb_g_giveback(musb_ep, request, 0); + /* + * In the giveback function the MUSB lock is + * released and acquired after sometime. During + * this time period the INDEX register could get + * changed by the gadget_queue function especially + * on SMP systems. Reselect the INDEX to be sure + * we are reading/modifying the right registers + */ + musb_ep_select(mbase, epnum); + + req = next_request(musb_ep); + if (!req) + return; + } +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ + defined(CONFIG_USB_UX500_DMA) +exit: +#endif + /* Analyze request */ + rxstate(musb, req); +} + +/* ------------------------------------------------------------ */ + +static int musb_gadget_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + unsigned long flags; + struct musb_ep *musb_ep; + struct musb_hw_ep *hw_ep; + void __iomem *regs; + struct musb *musb; + void __iomem *mbase; + u8 epnum; + u16 csr; + unsigned tmp; + int status = -EINVAL; + + if (!ep || !desc) + return -EINVAL; + + musb_ep = to_musb_ep(ep); + hw_ep = musb_ep->hw_ep; + regs = hw_ep->regs; + musb = musb_ep->musb; + mbase = musb->mregs; + epnum = musb_ep->current_epnum; + + spin_lock_irqsave(&musb->lock, flags); + + if (musb_ep->desc) { + status = -EBUSY; + goto fail; + } + musb_ep->type = usb_endpoint_type(desc); + + /* check direction and (later) maxpacket size against endpoint */ + if (usb_endpoint_num(desc) != epnum) + goto fail; + + /* REVISIT this rules out high bandwidth periodic transfers */ + tmp = usb_endpoint_maxp(desc); + if (tmp & ~0x07ff) { + int ok; + + if (usb_endpoint_dir_in(desc)) + ok = musb->hb_iso_tx; + else + ok = musb->hb_iso_rx; + + if (!ok) { + dev_dbg(musb->controller, "no support for high bandwidth ISO\n"); + goto fail; + } + musb_ep->hb_mult = (tmp >> 11) & 3; + } else { + musb_ep->hb_mult = 0; + } + + musb_ep->packet_sz = tmp & 0x7ff; + tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); + + /* enable the interrupts for the endpoint, set the endpoint + * packet size (or fail), set the mode, clear the fifo + */ + musb_ep_select(mbase, epnum); + if (usb_endpoint_dir_in(desc)) { + u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); + + if (hw_ep->is_shared_fifo) + musb_ep->is_in = 1; + if (!musb_ep->is_in) + goto fail; + + if (tmp > hw_ep->max_packet_sz_tx) { + dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); + goto fail; + } + + int_txe |= (1 << epnum); + musb_writew(mbase, MUSB_INTRTXE, int_txe); + + /* REVISIT if can_bulk_split(), use by updating "tmp"; + * likewise high bandwidth periodic tx + */ + /* Set TXMAXP with the FIFO size of the endpoint + * to disable double buffering mode. + */ + if (musb->double_buffer_not_ok) + musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); + else + musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz + | (musb_ep->hb_mult << 11)); + + csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; + if (musb_readw(regs, MUSB_TXCSR) + & MUSB_TXCSR_FIFONOTEMPTY) + csr |= MUSB_TXCSR_FLUSHFIFO; + if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) + csr |= MUSB_TXCSR_P_ISO; + + /* set twice in case of double buffering */ + musb_writew(regs, MUSB_TXCSR, csr); + /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ + musb_writew(regs, MUSB_TXCSR, csr); + + } else { + u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); + + if (hw_ep->is_shared_fifo) + musb_ep->is_in = 0; + if (musb_ep->is_in) + goto fail; + + if (tmp > hw_ep->max_packet_sz_rx) { + dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); + goto fail; + } + + int_rxe |= (1 << epnum); + musb_writew(mbase, MUSB_INTRRXE, int_rxe); + + /* REVISIT if can_bulk_combine() use by updating "tmp" + * likewise high bandwidth periodic rx + */ + /* Set RXMAXP with the FIFO size of the endpoint + * to disable double buffering mode. + */ + if (musb->double_buffer_not_ok) + musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); + else + musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz + | (musb_ep->hb_mult << 11)); + + /* force shared fifo to OUT-only mode */ + if (hw_ep->is_shared_fifo) { + csr = musb_readw(regs, MUSB_TXCSR); + csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); + musb_writew(regs, MUSB_TXCSR, csr); + } + + csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; + if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) + csr |= MUSB_RXCSR_P_ISO; + else if (musb_ep->type == USB_ENDPOINT_XFER_INT) + csr |= MUSB_RXCSR_DISNYET; + + /* set twice in case of double buffering */ + musb_writew(regs, MUSB_RXCSR, csr); + musb_writew(regs, MUSB_RXCSR, csr); + } + + /* NOTE: all the I/O code _should_ work fine without DMA, in case + * for some reason you run out of channels here. + */ + if (is_dma_capable() && musb->dma_controller) { + struct dma_controller *c = musb->dma_controller; + + musb_ep->dma = c->channel_alloc(c, hw_ep, + (desc->bEndpointAddress & USB_DIR_IN)); + } else + musb_ep->dma = NULL; + + musb_ep->desc = desc; + musb_ep->busy = 0; + musb_ep->wedged = 0; + status = 0; + + pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", + musb_driver_name, musb_ep->end_point.name, + ({ char *s; switch (musb_ep->type) { + case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; + case USB_ENDPOINT_XFER_INT: s = "int"; break; + default: s = "iso"; break; + }; s; }), + musb_ep->is_in ? "IN" : "OUT", + musb_ep->dma ? "dma, " : "", + musb_ep->packet_sz); + + schedule_work(&musb->irq_work); + +fail: + spin_unlock_irqrestore(&musb->lock, flags); + return status; +} + +/* + * Disable an endpoint flushing all requests queued. + */ +static int musb_gadget_disable(struct usb_ep *ep) +{ + unsigned long flags; + struct musb *musb; + u8 epnum; + struct musb_ep *musb_ep; + void __iomem *epio; + int status = 0; + + musb_ep = to_musb_ep(ep); + musb = musb_ep->musb; + epnum = musb_ep->current_epnum; + epio = musb->endpoints[epnum].regs; + + spin_lock_irqsave(&musb->lock, flags); + musb_ep_select(musb->mregs, epnum); + + /* zero the endpoint sizes */ + if (musb_ep->is_in) { + u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); + int_txe &= ~(1 << epnum); + musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); + musb_writew(epio, MUSB_TXMAXP, 0); + } else { + u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); + int_rxe &= ~(1 << epnum); + musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); + musb_writew(epio, MUSB_RXMAXP, 0); + } + + musb_ep->desc = NULL; +#ifndef __UBOOT__ + musb_ep->end_point.desc = NULL; +#endif + + /* abort all pending DMA and requests */ + nuke(musb_ep, -ESHUTDOWN); + + schedule_work(&musb->irq_work); + + spin_unlock_irqrestore(&(musb->lock), flags); + + dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name); + + return status; +} + +/* + * Allocate a request for an endpoint. + * Reused by ep0 code. + */ +struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + struct musb *musb = musb_ep->musb; + struct musb_request *request = NULL; + + request = kzalloc(sizeof *request, gfp_flags); + if (!request) { + dev_dbg(musb->controller, "not enough memory\n"); + return NULL; + } + + request->request.dma = DMA_ADDR_INVALID; + request->epnum = musb_ep->current_epnum; + request->ep = musb_ep; + + return &request->request; +} + +/* + * Free a request + * Reused by ep0 code. + */ +void musb_free_request(struct usb_ep *ep, struct usb_request *req) +{ + kfree(to_musb_request(req)); +} + +static LIST_HEAD(buffers); + +struct free_record { + struct list_head list; + struct device *dev; + unsigned bytes; + dma_addr_t dma; +}; + +/* + * Context: controller locked, IRQs blocked. + */ +void musb_ep_restart(struct musb *musb, struct musb_request *req) +{ + dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", + req->tx ? "TX/IN" : "RX/OUT", + &req->request, req->request.length, req->epnum); + + musb_ep_select(musb->mregs, req->epnum); + if (req->tx) + txstate(musb, req); + else + rxstate(musb, req); +} + +static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, + gfp_t gfp_flags) +{ + struct musb_ep *musb_ep; + struct musb_request *request; + struct musb *musb; + int status = 0; + unsigned long lockflags; + + if (!ep || !req) + return -EINVAL; + if (!req->buf) + return -ENODATA; + + musb_ep = to_musb_ep(ep); + musb = musb_ep->musb; + + request = to_musb_request(req); + request->musb = musb; + + if (request->ep != musb_ep) + return -EINVAL; + + dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); + + /* request is mine now... */ + request->request.actual = 0; + request->request.status = -EINPROGRESS; + request->epnum = musb_ep->current_epnum; + request->tx = musb_ep->is_in; + + map_dma_buffer(request, musb, musb_ep); + + spin_lock_irqsave(&musb->lock, lockflags); + + /* don't queue if the ep is down */ + if (!musb_ep->desc) { + dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", + req, ep->name, "disabled"); + status = -ESHUTDOWN; + goto cleanup; + } + + /* add request to the list */ + list_add_tail(&request->list, &musb_ep->req_list); + + /* it this is the head of the queue, start i/o ... */ + if (!musb_ep->busy && &request->list == musb_ep->req_list.next) + musb_ep_restart(musb, request); + +cleanup: + spin_unlock_irqrestore(&musb->lock, lockflags); + return status; +} + +static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + struct musb_request *req = to_musb_request(request); + struct musb_request *r; + unsigned long flags; + int status = 0; + struct musb *musb = musb_ep->musb; + + if (!ep || !request || to_musb_request(request)->ep != musb_ep) + return -EINVAL; + + spin_lock_irqsave(&musb->lock, flags); + + list_for_each_entry(r, &musb_ep->req_list, list) { + if (r == req) + break; + } + if (r != req) { + dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); + status = -EINVAL; + goto done; + } + + /* if the hardware doesn't have the request, easy ... */ + if (musb_ep->req_list.next != &req->list || musb_ep->busy) + musb_g_giveback(musb_ep, request, -ECONNRESET); + + /* ... else abort the dma transfer ... */ + else if (is_dma_capable() && musb_ep->dma) { + struct dma_controller *c = musb->dma_controller; + + musb_ep_select(musb->mregs, musb_ep->current_epnum); + if (c->channel_abort) + status = c->channel_abort(musb_ep->dma); + else + status = -EBUSY; + if (status == 0) + musb_g_giveback(musb_ep, request, -ECONNRESET); + } else { + /* NOTE: by sticking to easily tested hardware/driver states, + * we leave counting of in-flight packets imprecise. + */ + musb_g_giveback(musb_ep, request, -ECONNRESET); + } + +done: + spin_unlock_irqrestore(&musb->lock, flags); + return status; +} + +/* + * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any + * data but will queue requests. + * + * exported to ep0 code + */ +static int musb_gadget_set_halt(struct usb_ep *ep, int value) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + u8 epnum = musb_ep->current_epnum; + struct musb *musb = musb_ep->musb; + void __iomem *epio = musb->endpoints[epnum].regs; + void __iomem *mbase; + unsigned long flags; + u16 csr; + struct musb_request *request; + int status = 0; + + if (!ep) + return -EINVAL; + mbase = musb->mregs; + + spin_lock_irqsave(&musb->lock, flags); + + if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { + status = -EINVAL; + goto done; + } + + musb_ep_select(mbase, epnum); + + request = next_request(musb_ep); + if (value) { + if (request) { + dev_dbg(musb->controller, "request in progress, cannot halt %s\n", + ep->name); + status = -EAGAIN; + goto done; + } + /* Cannot portably stall with non-empty FIFO */ + if (musb_ep->is_in) { + csr = musb_readw(epio, MUSB_TXCSR); + if (csr & MUSB_TXCSR_FIFONOTEMPTY) { + dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); + status = -EAGAIN; + goto done; + } + } + } else + musb_ep->wedged = 0; + + /* set/clear the stall and toggle bits */ + dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); + if (musb_ep->is_in) { + csr = musb_readw(epio, MUSB_TXCSR); + csr |= MUSB_TXCSR_P_WZC_BITS + | MUSB_TXCSR_CLRDATATOG; + if (value) + csr |= MUSB_TXCSR_P_SENDSTALL; + else + csr &= ~(MUSB_TXCSR_P_SENDSTALL + | MUSB_TXCSR_P_SENTSTALL); + csr &= ~MUSB_TXCSR_TXPKTRDY; + musb_writew(epio, MUSB_TXCSR, csr); + } else { + csr = musb_readw(epio, MUSB_RXCSR); + csr |= MUSB_RXCSR_P_WZC_BITS + | MUSB_RXCSR_FLUSHFIFO + | MUSB_RXCSR_CLRDATATOG; + if (value) + csr |= MUSB_RXCSR_P_SENDSTALL; + else + csr &= ~(MUSB_RXCSR_P_SENDSTALL + | MUSB_RXCSR_P_SENTSTALL); + musb_writew(epio, MUSB_RXCSR, csr); + } + + /* maybe start the first request in the queue */ + if (!musb_ep->busy && !value && request) { + dev_dbg(musb->controller, "restarting the request\n"); + musb_ep_restart(musb, request); + } + +done: + spin_unlock_irqrestore(&musb->lock, flags); + return status; +} + +#ifndef __UBOOT__ +/* + * Sets the halt feature with the clear requests ignored + */ +static int musb_gadget_set_wedge(struct usb_ep *ep) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + + if (!ep) + return -EINVAL; + + musb_ep->wedged = 1; + + return usb_ep_set_halt(ep); +} +#endif + +static int musb_gadget_fifo_status(struct usb_ep *ep) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + void __iomem *epio = musb_ep->hw_ep->regs; + int retval = -EINVAL; + + if (musb_ep->desc && !musb_ep->is_in) { + struct musb *musb = musb_ep->musb; + int epnum = musb_ep->current_epnum; + void __iomem *mbase = musb->mregs; + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + + musb_ep_select(mbase, epnum); + /* FIXME return zero unless RXPKTRDY is set */ + retval = musb_readw(epio, MUSB_RXCOUNT); + + spin_unlock_irqrestore(&musb->lock, flags); + } + return retval; +} + +static void musb_gadget_fifo_flush(struct usb_ep *ep) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + struct musb *musb = musb_ep->musb; + u8 epnum = musb_ep->current_epnum; + void __iomem *epio = musb->endpoints[epnum].regs; + void __iomem *mbase; + unsigned long flags; + u16 csr, int_txe; + + mbase = musb->mregs; + + spin_lock_irqsave(&musb->lock, flags); + musb_ep_select(mbase, (u8) epnum); + + /* disable interrupts */ + int_txe = musb_readw(mbase, MUSB_INTRTXE); + musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); + + if (musb_ep->is_in) { + csr = musb_readw(epio, MUSB_TXCSR); + if (csr & MUSB_TXCSR_FIFONOTEMPTY) { + csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; + /* + * Setting both TXPKTRDY and FLUSHFIFO makes controller + * to interrupt current FIFO loading, but not flushing + * the already loaded ones. + */ + csr &= ~MUSB_TXCSR_TXPKTRDY; + musb_writew(epio, MUSB_TXCSR, csr); + /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ + musb_writew(epio, MUSB_TXCSR, csr); + } + } else { + csr = musb_readw(epio, MUSB_RXCSR); + csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; + musb_writew(epio, MUSB_RXCSR, csr); + musb_writew(epio, MUSB_RXCSR, csr); + } + + /* re-enable interrupt */ + musb_writew(mbase, MUSB_INTRTXE, int_txe); + spin_unlock_irqrestore(&musb->lock, flags); +} + +static const struct usb_ep_ops musb_ep_ops = { + .enable = musb_gadget_enable, + .disable = musb_gadget_disable, + .alloc_request = musb_alloc_request, + .free_request = musb_free_request, + .queue = musb_gadget_queue, + .dequeue = musb_gadget_dequeue, + .set_halt = musb_gadget_set_halt, +#ifndef __UBOOT__ + .set_wedge = musb_gadget_set_wedge, +#endif + .fifo_status = musb_gadget_fifo_status, + .fifo_flush = musb_gadget_fifo_flush +}; + +/* ----------------------------------------------------------------------- */ + +static int musb_gadget_get_frame(struct usb_gadget *gadget) +{ + struct musb *musb = gadget_to_musb(gadget); + + return (int)musb_readw(musb->mregs, MUSB_FRAME); +} + +static int musb_gadget_wakeup(struct usb_gadget *gadget) +{ +#ifndef __UBOOT__ + struct musb *musb = gadget_to_musb(gadget); + void __iomem *mregs = musb->mregs; + unsigned long flags; + int status = -EINVAL; + u8 power, devctl; + int retries; + + spin_lock_irqsave(&musb->lock, flags); + + switch (musb->xceiv->state) { + case OTG_STATE_B_PERIPHERAL: + /* NOTE: OTG state machine doesn't include B_SUSPENDED; + * that's part of the standard usb 1.1 state machine, and + * doesn't affect OTG transitions. + */ + if (musb->may_wakeup && musb->is_suspended) + break; + goto done; + case OTG_STATE_B_IDLE: + /* Start SRP ... OTG not required. */ + devctl = musb_readb(mregs, MUSB_DEVCTL); + dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl); + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(mregs, MUSB_DEVCTL, devctl); + devctl = musb_readb(mregs, MUSB_DEVCTL); + retries = 100; + while (!(devctl & MUSB_DEVCTL_SESSION)) { + devctl = musb_readb(mregs, MUSB_DEVCTL); + if (retries-- < 1) + break; + } + retries = 10000; + while (devctl & MUSB_DEVCTL_SESSION) { + devctl = musb_readb(mregs, MUSB_DEVCTL); + if (retries-- < 1) + break; + } + + spin_unlock_irqrestore(&musb->lock, flags); + otg_start_srp(musb->xceiv->otg); + spin_lock_irqsave(&musb->lock, flags); + + /* Block idling for at least 1s */ + musb_platform_try_idle(musb, + jiffies + msecs_to_jiffies(1 * HZ)); + + status = 0; + goto done; + default: + dev_dbg(musb->controller, "Unhandled wake: %s\n", + otg_state_string(musb->xceiv->state)); + goto done; + } + + status = 0; + + power = musb_readb(mregs, MUSB_POWER); + power |= MUSB_POWER_RESUME; + musb_writeb(mregs, MUSB_POWER, power); + dev_dbg(musb->controller, "issue wakeup\n"); + + /* FIXME do this next chunk in a timer callback, no udelay */ + mdelay(2); + + power = musb_readb(mregs, MUSB_POWER); + power &= ~MUSB_POWER_RESUME; + musb_writeb(mregs, MUSB_POWER, power); +done: + spin_unlock_irqrestore(&musb->lock, flags); + return status; +#else + return 0; +#endif +} + +static int +musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) +{ + struct musb *musb = gadget_to_musb(gadget); + + musb->is_self_powered = !!is_selfpowered; + return 0; +} + +static void musb_pullup(struct musb *musb, int is_on) +{ + u8 power; + + power = musb_readb(musb->mregs, MUSB_POWER); + if (is_on) + power |= MUSB_POWER_SOFTCONN; + else + power &= ~MUSB_POWER_SOFTCONN; + + /* FIXME if on, HdrcStart; if off, HdrcStop */ + + dev_dbg(musb->controller, "gadget D+ pullup %s\n", + is_on ? "on" : "off"); + musb_writeb(musb->mregs, MUSB_POWER, power); +} + +#if 0 +static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) +{ + dev_dbg(musb->controller, "<= %s =>\n", __func__); + + /* + * FIXME iff driver's softconnect flag is set (as it is during probe, + * though that can clear it), just musb_pullup(). + */ + + return -EINVAL; +} +#endif + +static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) +{ +#ifndef __UBOOT__ + struct musb *musb = gadget_to_musb(gadget); + + if (!musb->xceiv->set_power) + return -EOPNOTSUPP; + return usb_phy_set_power(musb->xceiv, mA); +#else + return 0; +#endif +} + +static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + struct musb *musb = gadget_to_musb(gadget); + unsigned long flags; + + is_on = !!is_on; + + pm_runtime_get_sync(musb->controller); + + /* NOTE: this assumes we are sensing vbus; we'd rather + * not pullup unless the B-session is active. + */ + spin_lock_irqsave(&musb->lock, flags); + if (is_on != musb->softconnect) { + musb->softconnect = is_on; + musb_pullup(musb, is_on); + } + spin_unlock_irqrestore(&musb->lock, flags); + + pm_runtime_put(musb->controller); + + return 0; +} + +#ifndef __UBOOT__ +static int musb_gadget_start(struct usb_gadget *g, + struct usb_gadget_driver *driver); +static int musb_gadget_stop(struct usb_gadget *g, + struct usb_gadget_driver *driver); +#endif + +static const struct usb_gadget_ops musb_gadget_operations = { + .get_frame = musb_gadget_get_frame, + .wakeup = musb_gadget_wakeup, + .set_selfpowered = musb_gadget_set_self_powered, + /* .vbus_session = musb_gadget_vbus_session, */ + .vbus_draw = musb_gadget_vbus_draw, + .pullup = musb_gadget_pullup, +#ifndef __UBOOT__ + .udc_start = musb_gadget_start, + .udc_stop = musb_gadget_stop, +#endif +}; + +/* ----------------------------------------------------------------------- */ + +/* Registration */ + +/* Only this registration code "knows" the rule (from USB standards) + * about there being only one external upstream port. It assumes + * all peripheral ports are external... + */ + +#ifndef __UBOOT__ +static void musb_gadget_release(struct device *dev) +{ + /* kref_put(WHAT) */ + dev_dbg(dev, "%s\n", __func__); +} +#endif + + +static void __devinit +init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) +{ + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + + memset(ep, 0, sizeof *ep); + + ep->current_epnum = epnum; + ep->musb = musb; + ep->hw_ep = hw_ep; + ep->is_in = is_in; + + INIT_LIST_HEAD(&ep->req_list); + + sprintf(ep->name, "ep%d%s", epnum, + (!epnum || hw_ep->is_shared_fifo) ? "" : ( + is_in ? "in" : "out")); + ep->end_point.name = ep->name; + INIT_LIST_HEAD(&ep->end_point.ep_list); + if (!epnum) { + ep->end_point.maxpacket = 64; + ep->end_point.ops = &musb_g_ep0_ops; + musb->g.ep0 = &ep->end_point; + } else { + if (is_in) + ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; + else + ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; + ep->end_point.ops = &musb_ep_ops; + list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); + } +} + +/* + * Initialize the endpoints exposed to peripheral drivers, with backlinks + * to the rest of the driver state. + */ +static inline void __devinit musb_g_init_endpoints(struct musb *musb) +{ + u8 epnum; + struct musb_hw_ep *hw_ep; + unsigned count = 0; + + /* initialize endpoint list just once */ + INIT_LIST_HEAD(&(musb->g.ep_list)); + + for (epnum = 0, hw_ep = musb->endpoints; + epnum < musb->nr_endpoints; + epnum++, hw_ep++) { + if (hw_ep->is_shared_fifo /* || !epnum */) { + init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); + count++; + } else { + if (hw_ep->max_packet_sz_tx) { + init_peripheral_ep(musb, &hw_ep->ep_in, + epnum, 1); + count++; + } + if (hw_ep->max_packet_sz_rx) { + init_peripheral_ep(musb, &hw_ep->ep_out, + epnum, 0); + count++; + } + } + } +} + +/* called once during driver setup to initialize and link into + * the driver model; memory is zeroed. + */ +int __devinit musb_gadget_setup(struct musb *musb) +{ + int status; + + /* REVISIT minor race: if (erroneously) setting up two + * musb peripherals at the same time, only the bus lock + * is probably held. + */ + + musb->g.ops = &musb_gadget_operations; +#ifndef __UBOOT__ + musb->g.max_speed = USB_SPEED_HIGH; +#endif + musb->g.speed = USB_SPEED_UNKNOWN; + +#ifndef __UBOOT__ + /* this "gadget" abstracts/virtualizes the controller */ + dev_set_name(&musb->g.dev, "gadget"); + musb->g.dev.parent = musb->controller; + musb->g.dev.dma_mask = musb->controller->dma_mask; + musb->g.dev.release = musb_gadget_release; +#endif + musb->g.name = musb_driver_name; + +#ifndef __UBOOT__ + if (is_otg_enabled(musb)) + musb->g.is_otg = 1; +#endif + + musb_g_init_endpoints(musb); + + musb->is_active = 0; + musb_platform_try_idle(musb, 0); + +#ifndef __UBOOT__ + status = device_register(&musb->g.dev); + if (status != 0) { + put_device(&musb->g.dev); + return status; + } + status = usb_add_gadget_udc(musb->controller, &musb->g); + if (status) + goto err; +#endif + + return 0; +#ifndef __UBOOT__ +err: + musb->g.dev.parent = NULL; + device_unregister(&musb->g.dev); + return status; +#endif +} + +void musb_gadget_cleanup(struct musb *musb) +{ +#ifndef __UBOOT__ + usb_del_gadget_udc(&musb->g); + if (musb->g.dev.parent) + device_unregister(&musb->g.dev); +#endif +} + +/* + * Register the gadget driver. Used by gadget drivers when + * registering themselves with the controller. + * + * -EINVAL something went wrong (not driver) + * -EBUSY another gadget is already using the controller + * -ENOMEM no memory to perform the operation + * + * @param driver the gadget driver + * @return <0 if error, 0 if everything is fine + */ +#ifndef __UBOOT__ +static int musb_gadget_start(struct usb_gadget *g, + struct usb_gadget_driver *driver) +#else +int musb_gadget_start(struct usb_gadget *g, + struct usb_gadget_driver *driver) +#endif +{ + struct musb *musb = gadget_to_musb(g); +#ifndef __UBOOT__ + struct usb_otg *otg = musb->xceiv->otg; +#endif + unsigned long flags; + int retval = -EINVAL; + +#ifndef __UBOOT__ + if (driver->max_speed < USB_SPEED_HIGH) + goto err0; +#endif + + pm_runtime_get_sync(musb->controller); + +#ifndef __UBOOT__ + dev_dbg(musb->controller, "registering driver %s\n", driver->function); +#endif + + musb->softconnect = 0; + musb->gadget_driver = driver; + + spin_lock_irqsave(&musb->lock, flags); + musb->is_active = 1; + +#ifndef __UBOOT__ + otg_set_peripheral(otg, &musb->g); + musb->xceiv->state = OTG_STATE_B_IDLE; + + /* + * FIXME this ignores the softconnect flag. Drivers are + * allowed hold the peripheral inactive until for example + * userspace hooks up printer hardware or DSP codecs, so + * hosts only see fully functional devices. + */ + + if (!is_otg_enabled(musb)) +#endif + musb_start(musb); + + spin_unlock_irqrestore(&musb->lock, flags); + +#ifndef __UBOOT__ + if (is_otg_enabled(musb)) { + struct usb_hcd *hcd = musb_to_hcd(musb); + + dev_dbg(musb->controller, "OTG startup...\n"); + + /* REVISIT: funcall to other code, which also + * handles power budgeting ... this way also + * ensures HdrcStart is indirectly called. + */ + retval = usb_add_hcd(musb_to_hcd(musb), 0, 0); + if (retval < 0) { + dev_dbg(musb->controller, "add_hcd failed, %d\n", retval); + goto err2; + } + + if ((musb->xceiv->last_event == USB_EVENT_ID) + && otg->set_vbus) + otg_set_vbus(otg, 1); + + hcd->self.uses_pio_for_control = 1; + } + if (musb->xceiv->last_event == USB_EVENT_NONE) + pm_runtime_put(musb->controller); +#endif + + return 0; + +#ifndef __UBOOT__ +err2: + if (!is_otg_enabled(musb)) + musb_stop(musb); +err0: + return retval; +#endif +} + +#ifndef __UBOOT__ +static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) +{ + int i; + struct musb_hw_ep *hw_ep; + + /* don't disconnect if it's not connected */ + if (musb->g.speed == USB_SPEED_UNKNOWN) + driver = NULL; + else + musb->g.speed = USB_SPEED_UNKNOWN; + + /* deactivate the hardware */ + if (musb->softconnect) { + musb->softconnect = 0; + musb_pullup(musb, 0); + } + musb_stop(musb); + + /* killing any outstanding requests will quiesce the driver; + * then report disconnect + */ + if (driver) { + for (i = 0, hw_ep = musb->endpoints; + i < musb->nr_endpoints; + i++, hw_ep++) { + musb_ep_select(musb->mregs, i); + if (hw_ep->is_shared_fifo /* || !epnum */) { + nuke(&hw_ep->ep_in, -ESHUTDOWN); + } else { + if (hw_ep->max_packet_sz_tx) + nuke(&hw_ep->ep_in, -ESHUTDOWN); + if (hw_ep->max_packet_sz_rx) + nuke(&hw_ep->ep_out, -ESHUTDOWN); + } + } + } +} + +/* + * Unregister the gadget driver. Used by gadget drivers when + * unregistering themselves from the controller. + * + * @param driver the gadget driver to unregister + */ +static int musb_gadget_stop(struct usb_gadget *g, + struct usb_gadget_driver *driver) +{ + struct musb *musb = gadget_to_musb(g); + unsigned long flags; + + if (musb->xceiv->last_event == USB_EVENT_NONE) + pm_runtime_get_sync(musb->controller); + + /* + * REVISIT always use otg_set_peripheral() here too; + * this needs to shut down the OTG engine. + */ + + spin_lock_irqsave(&musb->lock, flags); + + musb_hnp_stop(musb); + + (void) musb_gadget_vbus_draw(&musb->g, 0); + + musb->xceiv->state = OTG_STATE_UNDEFINED; + stop_activity(musb, driver); + otg_set_peripheral(musb->xceiv->otg, NULL); + + dev_dbg(musb->controller, "unregistering driver %s\n", driver->function); + + musb->is_active = 0; + musb_platform_try_idle(musb, 0); + spin_unlock_irqrestore(&musb->lock, flags); + + if (is_otg_enabled(musb)) { + usb_remove_hcd(musb_to_hcd(musb)); + /* FIXME we need to be able to register another + * gadget driver here and have everything work; + * that currently misbehaves. + */ + } + + if (!is_otg_enabled(musb)) + musb_stop(musb); + + pm_runtime_put(musb->controller); + + return 0; +} +#endif + +/* ----------------------------------------------------------------------- */ + +/* lifecycle operations called through plat_uds.c */ + +void musb_g_resume(struct musb *musb) +{ +#ifndef __UBOOT__ + musb->is_suspended = 0; + switch (musb->xceiv->state) { + case OTG_STATE_B_IDLE: + break; + case OTG_STATE_B_WAIT_ACON: + case OTG_STATE_B_PERIPHERAL: + musb->is_active = 1; + if (musb->gadget_driver && musb->gadget_driver->resume) { + spin_unlock(&musb->lock); + musb->gadget_driver->resume(&musb->g); + spin_lock(&musb->lock); + } + break; + default: + WARNING("unhandled RESUME transition (%s)\n", + otg_state_string(musb->xceiv->state)); + } +#endif +} + +/* called when SOF packets stop for 3+ msec */ +void musb_g_suspend(struct musb *musb) +{ +#ifndef __UBOOT__ + u8 devctl; + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + dev_dbg(musb->controller, "devctl %02x\n", devctl); + + switch (musb->xceiv->state) { + case OTG_STATE_B_IDLE: + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + break; + case OTG_STATE_B_PERIPHERAL: + musb->is_suspended = 1; + if (musb->gadget_driver && musb->gadget_driver->suspend) { + spin_unlock(&musb->lock); + musb->gadget_driver->suspend(&musb->g); + spin_lock(&musb->lock); + } + break; + default: + /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; + * A_PERIPHERAL may need care too + */ + WARNING("unhandled SUSPEND transition (%s)\n", + otg_state_string(musb->xceiv->state)); + } +#endif +} + +/* Called during SRP */ +void musb_g_wakeup(struct musb *musb) +{ + musb_gadget_wakeup(&musb->g); +} + +/* called when VBUS drops below session threshold, and in other cases */ +void musb_g_disconnect(struct musb *musb) +{ + void __iomem *mregs = musb->mregs; + u8 devctl = musb_readb(mregs, MUSB_DEVCTL); + + dev_dbg(musb->controller, "devctl %02x\n", devctl); + + /* clear HR */ + musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); + + /* don't draw vbus until new b-default session */ + (void) musb_gadget_vbus_draw(&musb->g, 0); + + musb->g.speed = USB_SPEED_UNKNOWN; + if (musb->gadget_driver && musb->gadget_driver->disconnect) { + spin_unlock(&musb->lock); + musb->gadget_driver->disconnect(&musb->g); + spin_lock(&musb->lock); + } + +#ifndef __UBOOT__ + switch (musb->xceiv->state) { + default: + dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", + otg_state_string(musb->xceiv->state)); + musb->xceiv->state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + break; + case OTG_STATE_A_PERIPHERAL: + musb->xceiv->state = OTG_STATE_A_WAIT_BCON; + MUSB_HST_MODE(musb); + break; + case OTG_STATE_B_WAIT_ACON: + case OTG_STATE_B_HOST: + case OTG_STATE_B_PERIPHERAL: + case OTG_STATE_B_IDLE: + musb->xceiv->state = OTG_STATE_B_IDLE; + break; + case OTG_STATE_B_SRP_INIT: + break; + } +#endif + + musb->is_active = 0; +} + +void musb_g_reset(struct musb *musb) +__releases(musb->lock) +__acquires(musb->lock) +{ + void __iomem *mbase = musb->mregs; + u8 devctl = musb_readb(mbase, MUSB_DEVCTL); + u8 power; + +#ifndef __UBOOT__ + dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n", + (devctl & MUSB_DEVCTL_BDEVICE) + ? "B-Device" : "A-Device", + musb_readb(mbase, MUSB_FADDR), + musb->gadget_driver + ? musb->gadget_driver->driver.name + : NULL + ); +#endif + + /* report disconnect, if we didn't already (flushing EP state) */ + if (musb->g.speed != USB_SPEED_UNKNOWN) + musb_g_disconnect(musb); + + /* clear HR */ + else if (devctl & MUSB_DEVCTL_HR) + musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); + + + /* what speed did we negotiate? */ + power = musb_readb(mbase, MUSB_POWER); + musb->g.speed = (power & MUSB_POWER_HSMODE) + ? USB_SPEED_HIGH : USB_SPEED_FULL; + + /* start in USB_STATE_DEFAULT */ + musb->is_active = 1; + musb->is_suspended = 0; + MUSB_DEV_MODE(musb); + musb->address = 0; + musb->ep0_state = MUSB_EP0_STAGE_SETUP; + + musb->may_wakeup = 0; + musb->g.b_hnp_enable = 0; + musb->g.a_alt_hnp_support = 0; + musb->g.a_hnp_support = 0; + +#ifndef __UBOOT__ + /* Normal reset, as B-Device; + * or else after HNP, as A-Device + */ + if (devctl & MUSB_DEVCTL_BDEVICE) { + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + musb->g.is_a_peripheral = 0; + } else if (is_otg_enabled(musb)) { + musb->xceiv->state = OTG_STATE_A_PERIPHERAL; + musb->g.is_a_peripheral = 1; + } else + WARN_ON(1); + + /* start with default limits on VBUS power draw */ + (void) musb_gadget_vbus_draw(&musb->g, + is_otg_enabled(musb) ? 8 : 100); +#endif +} diff --git a/drivers/usb/musb-new/musb_gadget.h b/drivers/usb/musb-new/musb_gadget.h new file mode 100644 index 00000000000..392f701a878 --- /dev/null +++ b/drivers/usb/musb-new/musb_gadget.h @@ -0,0 +1,130 @@ +/* + * MUSB OTG driver peripheral defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_GADGET_H +#define __MUSB_GADGET_H + +#include <linux/list.h> +#ifdef __UBOOT__ +#include <asm/byteorder.h> +#include <asm/errno.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#endif + +enum buffer_map_state { + UN_MAPPED = 0, + PRE_MAPPED, + MUSB_MAPPED +}; + +struct musb_request { + struct usb_request request; + struct list_head list; + struct musb_ep *ep; + struct musb *musb; + u8 tx; /* endpoint direction */ + u8 epnum; + enum buffer_map_state map_state; +}; + +static inline struct musb_request *to_musb_request(struct usb_request *req) +{ + return req ? container_of(req, struct musb_request, request) : NULL; +} + +extern struct usb_request * +musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); +extern void musb_free_request(struct usb_ep *ep, struct usb_request *req); + + +/* + * struct musb_ep - peripheral side view of endpoint rx or tx side + */ +struct musb_ep { + /* stuff towards the head is basically write-once. */ + struct usb_ep end_point; + char name[12]; + struct musb_hw_ep *hw_ep; + struct musb *musb; + u8 current_epnum; + + /* ... when enabled/disabled ... */ + u8 type; + u8 is_in; + u16 packet_sz; + const struct usb_endpoint_descriptor *desc; + struct dma_channel *dma; + + /* later things are modified based on usage */ + struct list_head req_list; + + u8 wedged; + + /* true if lock must be dropped but req_list may not be advanced */ + u8 busy; + + u8 hb_mult; +}; + +static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) +{ + return ep ? container_of(ep, struct musb_ep, end_point) : NULL; +} + +static inline struct musb_request *next_request(struct musb_ep *ep) +{ + struct list_head *queue = &ep->req_list; + + if (list_empty(queue)) + return NULL; + return container_of(queue->next, struct musb_request, list); +} + +extern void musb_g_tx(struct musb *musb, u8 epnum); +extern void musb_g_rx(struct musb *musb, u8 epnum); + +extern const struct usb_ep_ops musb_g_ep0_ops; + +extern int musb_gadget_setup(struct musb *); +extern void musb_gadget_cleanup(struct musb *); + +extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); + +extern void musb_ep_restart(struct musb *, struct musb_request *); + +#ifdef __UBOOT__ +int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver); +#endif +#endif /* __MUSB_GADGET_H */ diff --git a/drivers/usb/musb-new/musb_gadget_ep0.c b/drivers/usb/musb-new/musb_gadget_ep0.c new file mode 100644 index 00000000000..6599d386dc5 --- /dev/null +++ b/drivers/usb/musb-new/musb_gadget_ep0.c @@ -0,0 +1,1089 @@ +/* + * MUSB OTG peripheral driver ep0 handling + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#define __UBOOT__ +#ifndef __UBOOT__ +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/timer.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#else +#include <common.h> +#include "linux-compat.h" +#endif + +#include "musb_core.h" + +/* ep0 is always musb->endpoints[0].ep_in */ +#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0]) + +/* + * locking note: we use only the controller lock, for simpler correctness. + * It's always held with IRQs blocked. + * + * It protects the ep0 request queue as well as ep0_state, not just the + * controller and indexed registers. And that lock stays held unless it + * needs to be dropped to allow reentering this driver ... like upcalls to + * the gadget driver, or adjusting endpoint halt status. + */ + +static char *decode_ep0stage(u8 stage) +{ + switch (stage) { + case MUSB_EP0_STAGE_IDLE: return "idle"; + case MUSB_EP0_STAGE_SETUP: return "setup"; + case MUSB_EP0_STAGE_TX: return "in"; + case MUSB_EP0_STAGE_RX: return "out"; + case MUSB_EP0_STAGE_ACKWAIT: return "wait"; + case MUSB_EP0_STAGE_STATUSIN: return "in/status"; + case MUSB_EP0_STAGE_STATUSOUT: return "out/status"; + default: return "?"; + } +} + +/* handle a standard GET_STATUS request + * Context: caller holds controller lock + */ +static int service_tx_status_request( + struct musb *musb, + const struct usb_ctrlrequest *ctrlrequest) +{ + void __iomem *mbase = musb->mregs; + int handled = 1; + u8 result[2], epnum = 0; + const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; + + result[1] = 0; + + switch (recip) { + case USB_RECIP_DEVICE: + result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED; + result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; + if (musb->g.is_otg) { + result[0] |= musb->g.b_hnp_enable + << USB_DEVICE_B_HNP_ENABLE; + result[0] |= musb->g.a_alt_hnp_support + << USB_DEVICE_A_ALT_HNP_SUPPORT; + result[0] |= musb->g.a_hnp_support + << USB_DEVICE_A_HNP_SUPPORT; + } + break; + + case USB_RECIP_INTERFACE: + result[0] = 0; + break; + + case USB_RECIP_ENDPOINT: { + int is_in; + struct musb_ep *ep; + u16 tmp; + void __iomem *regs; + + epnum = (u8) ctrlrequest->wIndex; + if (!epnum) { + result[0] = 0; + break; + } + + is_in = epnum & USB_DIR_IN; + if (is_in) { + epnum &= 0x0f; + ep = &musb->endpoints[epnum].ep_in; + } else { + ep = &musb->endpoints[epnum].ep_out; + } + regs = musb->endpoints[epnum].regs; + + if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { + handled = -EINVAL; + break; + } + + musb_ep_select(mbase, epnum); + if (is_in) + tmp = musb_readw(regs, MUSB_TXCSR) + & MUSB_TXCSR_P_SENDSTALL; + else + tmp = musb_readw(regs, MUSB_RXCSR) + & MUSB_RXCSR_P_SENDSTALL; + musb_ep_select(mbase, 0); + + result[0] = tmp ? 1 : 0; + } break; + + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + + /* fill up the fifo; caller updates csr0 */ + if (handled > 0) { + u16 len = le16_to_cpu(ctrlrequest->wLength); + + if (len > 2) + len = 2; + musb_write_fifo(&musb->endpoints[0], len, result); + } + + return handled; +} + +/* + * handle a control-IN request, the end0 buffer contains the current request + * that is supposed to be a standard control request. Assumes the fifo to + * be at least 2 bytes long. + * + * @return 0 if the request was NOT HANDLED, + * < 0 when error + * > 0 when the request is processed + * + * Context: caller holds controller lock + */ +static int +service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) +{ + int handled = 0; /* not handled */ + + if ((ctrlrequest->bRequestType & USB_TYPE_MASK) + == USB_TYPE_STANDARD) { + switch (ctrlrequest->bRequest) { + case USB_REQ_GET_STATUS: + handled = service_tx_status_request(musb, + ctrlrequest); + break; + + /* case USB_REQ_SYNC_FRAME: */ + + default: + break; + } + } + return handled; +} + +/* + * Context: caller holds controller lock + */ +static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) +{ + musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); +} + +/* + * Tries to start B-device HNP negotiation if enabled via sysfs + */ +static inline void musb_try_b_hnp_enable(struct musb *musb) +{ + void __iomem *mbase = musb->mregs; + u8 devctl; + + dev_dbg(musb->controller, "HNP: Setting HR\n"); + devctl = musb_readb(mbase, MUSB_DEVCTL); + musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); +} + +/* + * Handle all control requests with no DATA stage, including standard + * requests such as: + * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized + * always delegated to the gadget driver + * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE + * always handled here, except for class/vendor/... features + * + * Context: caller holds controller lock + */ +static int +service_zero_data_request(struct musb *musb, + struct usb_ctrlrequest *ctrlrequest) +__releases(musb->lock) +__acquires(musb->lock) +{ + int handled = -EINVAL; + void __iomem *mbase = musb->mregs; + const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; + + /* the gadget driver handles everything except what we MUST handle */ + if ((ctrlrequest->bRequestType & USB_TYPE_MASK) + == USB_TYPE_STANDARD) { + switch (ctrlrequest->bRequest) { + case USB_REQ_SET_ADDRESS: + /* change it after the status stage */ + musb->set_address = true; + musb->address = (u8) (ctrlrequest->wValue & 0x7f); + handled = 1; + break; + + case USB_REQ_CLEAR_FEATURE: + switch (recip) { + case USB_RECIP_DEVICE: + if (ctrlrequest->wValue + != USB_DEVICE_REMOTE_WAKEUP) + break; + musb->may_wakeup = 0; + handled = 1; + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT:{ + const u8 epnum = + ctrlrequest->wIndex & 0x0f; + struct musb_ep *musb_ep; + struct musb_hw_ep *ep; + struct musb_request *request; + void __iomem *regs; + int is_in; + u16 csr; + + if (epnum == 0 || epnum >= MUSB_C_NUM_EPS || + ctrlrequest->wValue != USB_ENDPOINT_HALT) + break; + + ep = musb->endpoints + epnum; + regs = ep->regs; + is_in = ctrlrequest->wIndex & USB_DIR_IN; + if (is_in) + musb_ep = &ep->ep_in; + else + musb_ep = &ep->ep_out; + if (!musb_ep->desc) + break; + + handled = 1; + /* Ignore request if endpoint is wedged */ + if (musb_ep->wedged) + break; + + musb_ep_select(mbase, epnum); + if (is_in) { + csr = musb_readw(regs, MUSB_TXCSR); + csr |= MUSB_TXCSR_CLRDATATOG | + MUSB_TXCSR_P_WZC_BITS; + csr &= ~(MUSB_TXCSR_P_SENDSTALL | + MUSB_TXCSR_P_SENTSTALL | + MUSB_TXCSR_TXPKTRDY); + musb_writew(regs, MUSB_TXCSR, csr); + } else { + csr = musb_readw(regs, MUSB_RXCSR); + csr |= MUSB_RXCSR_CLRDATATOG | + MUSB_RXCSR_P_WZC_BITS; + csr &= ~(MUSB_RXCSR_P_SENDSTALL | + MUSB_RXCSR_P_SENTSTALL); + musb_writew(regs, MUSB_RXCSR, csr); + } + + /* Maybe start the first request in the queue */ + request = next_request(musb_ep); + if (!musb_ep->busy && request) { + dev_dbg(musb->controller, "restarting the request\n"); + musb_ep_restart(musb, request); + } + + /* select ep0 again */ + musb_ep_select(mbase, 0); + } break; + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + break; + + case USB_REQ_SET_FEATURE: + switch (recip) { + case USB_RECIP_DEVICE: + handled = 1; + switch (ctrlrequest->wValue) { + case USB_DEVICE_REMOTE_WAKEUP: + musb->may_wakeup = 1; + break; + case USB_DEVICE_TEST_MODE: + if (musb->g.speed != USB_SPEED_HIGH) + goto stall; + if (ctrlrequest->wIndex & 0xff) + goto stall; + + switch (ctrlrequest->wIndex >> 8) { + case 1: + pr_debug("TEST_J\n"); + /* TEST_J */ + musb->test_mode_nr = + MUSB_TEST_J; + break; + case 2: + /* TEST_K */ + pr_debug("TEST_K\n"); + musb->test_mode_nr = + MUSB_TEST_K; + break; + case 3: + /* TEST_SE0_NAK */ + pr_debug("TEST_SE0_NAK\n"); + musb->test_mode_nr = + MUSB_TEST_SE0_NAK; + break; + case 4: + /* TEST_PACKET */ + pr_debug("TEST_PACKET\n"); + musb->test_mode_nr = + MUSB_TEST_PACKET; + break; + + case 0xc0: + /* TEST_FORCE_HS */ + pr_debug("TEST_FORCE_HS\n"); + musb->test_mode_nr = + MUSB_TEST_FORCE_HS; + break; + case 0xc1: + /* TEST_FORCE_FS */ + pr_debug("TEST_FORCE_FS\n"); + musb->test_mode_nr = + MUSB_TEST_FORCE_FS; + break; + case 0xc2: + /* TEST_FIFO_ACCESS */ + pr_debug("TEST_FIFO_ACCESS\n"); + musb->test_mode_nr = + MUSB_TEST_FIFO_ACCESS; + break; + case 0xc3: + /* TEST_FORCE_HOST */ + pr_debug("TEST_FORCE_HOST\n"); + musb->test_mode_nr = + MUSB_TEST_FORCE_HOST; + break; + default: + goto stall; + } + + /* enter test mode after irq */ + if (handled > 0) + musb->test_mode = true; + break; + case USB_DEVICE_B_HNP_ENABLE: + if (!musb->g.is_otg) + goto stall; + musb->g.b_hnp_enable = 1; + musb_try_b_hnp_enable(musb); + break; + case USB_DEVICE_A_HNP_SUPPORT: + if (!musb->g.is_otg) + goto stall; + musb->g.a_hnp_support = 1; + break; + case USB_DEVICE_A_ALT_HNP_SUPPORT: + if (!musb->g.is_otg) + goto stall; + musb->g.a_alt_hnp_support = 1; + break; + case USB_DEVICE_DEBUG_MODE: + handled = 0; + break; +stall: + default: + handled = -EINVAL; + break; + } + break; + + case USB_RECIP_INTERFACE: + break; + + case USB_RECIP_ENDPOINT:{ + const u8 epnum = + ctrlrequest->wIndex & 0x0f; + struct musb_ep *musb_ep; + struct musb_hw_ep *ep; + void __iomem *regs; + int is_in; + u16 csr; + + if (epnum == 0 || epnum >= MUSB_C_NUM_EPS || + ctrlrequest->wValue != USB_ENDPOINT_HALT) + break; + + ep = musb->endpoints + epnum; + regs = ep->regs; + is_in = ctrlrequest->wIndex & USB_DIR_IN; + if (is_in) + musb_ep = &ep->ep_in; + else + musb_ep = &ep->ep_out; + if (!musb_ep->desc) + break; + + musb_ep_select(mbase, epnum); + if (is_in) { + csr = musb_readw(regs, MUSB_TXCSR); + if (csr & MUSB_TXCSR_FIFONOTEMPTY) + csr |= MUSB_TXCSR_FLUSHFIFO; + csr |= MUSB_TXCSR_P_SENDSTALL + | MUSB_TXCSR_CLRDATATOG + | MUSB_TXCSR_P_WZC_BITS; + musb_writew(regs, MUSB_TXCSR, csr); + } else { + csr = musb_readw(regs, MUSB_RXCSR); + csr |= MUSB_RXCSR_P_SENDSTALL + | MUSB_RXCSR_FLUSHFIFO + | MUSB_RXCSR_CLRDATATOG + | MUSB_RXCSR_P_WZC_BITS; + musb_writew(regs, MUSB_RXCSR, csr); + } + + /* select ep0 again */ + musb_ep_select(mbase, 0); + handled = 1; + } break; + + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + break; + default: + /* delegate SET_CONFIGURATION, etc */ + handled = 0; + } + } else + handled = 0; + return handled; +} + +/* we have an ep0out data packet + * Context: caller holds controller lock + */ +static void ep0_rxstate(struct musb *musb) +{ + void __iomem *regs = musb->control_ep->regs; + struct musb_request *request; + struct usb_request *req; + u16 count, csr; + + request = next_ep0_request(musb); + req = &request->request; + + /* read packet and ack; or stall because of gadget driver bug: + * should have provided the rx buffer before setup() returned. + */ + if (req) { + void *buf = req->buf + req->actual; + unsigned len = req->length - req->actual; + + /* read the buffer */ + count = musb_readb(regs, MUSB_COUNT0); + if (count > len) { + req->status = -EOVERFLOW; + count = len; + } + musb_read_fifo(&musb->endpoints[0], count, buf); + req->actual += count; + csr = MUSB_CSR0_P_SVDRXPKTRDY; + if (count < 64 || req->actual == req->length) { + musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; + csr |= MUSB_CSR0_P_DATAEND; + } else + req = NULL; + } else + csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL; + + + /* Completion handler may choose to stall, e.g. because the + * message just received holds invalid data. + */ + if (req) { + musb->ackpend = csr; + musb_g_ep0_giveback(musb, req); + if (!musb->ackpend) + return; + musb->ackpend = 0; + } + musb_ep_select(musb->mregs, 0); + musb_writew(regs, MUSB_CSR0, csr); +} + +/* + * transmitting to the host (IN), this code might be called from IRQ + * and from kernel thread. + * + * Context: caller holds controller lock + */ +static void ep0_txstate(struct musb *musb) +{ + void __iomem *regs = musb->control_ep->regs; + struct musb_request *req = next_ep0_request(musb); + struct usb_request *request; + u16 csr = MUSB_CSR0_TXPKTRDY; + u8 *fifo_src; + u8 fifo_count; + + if (!req) { + /* WARN_ON(1); */ + dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); + return; + } + + request = &req->request; + + /* load the data */ + fifo_src = (u8 *) request->buf + request->actual; + fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, + request->length - request->actual); + musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src); + request->actual += fifo_count; + + /* update the flags */ + if (fifo_count < MUSB_MAX_END0_PACKET + || (request->actual == request->length + && !request->zero)) { + musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; + csr |= MUSB_CSR0_P_DATAEND; + } else + request = NULL; + + /* report completions as soon as the fifo's loaded; there's no + * win in waiting till this last packet gets acked. (other than + * very precise fault reporting, needed by USB TMC; possible with + * this hardware, but not usable from portable gadget drivers.) + */ + if (request) { + musb->ackpend = csr; + musb_g_ep0_giveback(musb, request); + if (!musb->ackpend) + return; + musb->ackpend = 0; + } + + /* send it out, triggering a "txpktrdy cleared" irq */ + musb_ep_select(musb->mregs, 0); + musb_writew(regs, MUSB_CSR0, csr); +} + +/* + * Read a SETUP packet (struct usb_ctrlrequest) from the hardware. + * Fields are left in USB byte-order. + * + * Context: caller holds controller lock. + */ +static void +musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) +{ + struct musb_request *r; + void __iomem *regs = musb->control_ep->regs; + + musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); + + /* NOTE: earlier 2.6 versions changed setup packets to host + * order, but now USB packets always stay in USB byte order. + */ + dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n", + req->bRequestType, + req->bRequest, + le16_to_cpu(req->wValue), + le16_to_cpu(req->wIndex), + le16_to_cpu(req->wLength)); + + /* clean up any leftover transfers */ + r = next_ep0_request(musb); + if (r) + musb_g_ep0_giveback(musb, &r->request); + + /* For zero-data requests we want to delay the STATUS stage to + * avoid SETUPEND errors. If we read data (OUT), delay accepting + * packets until there's a buffer to store them in. + * + * If we write data, the controller acts happier if we enable + * the TX FIFO right away, and give the controller a moment + * to switch modes... + */ + musb->set_address = false; + musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY; + if (req->wLength == 0) { + if (req->bRequestType & USB_DIR_IN) + musb->ackpend |= MUSB_CSR0_TXPKTRDY; + musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT; + } else if (req->bRequestType & USB_DIR_IN) { + musb->ep0_state = MUSB_EP0_STAGE_TX; + musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY); + while ((musb_readw(regs, MUSB_CSR0) + & MUSB_CSR0_RXPKTRDY) != 0) + cpu_relax(); + musb->ackpend = 0; + } else + musb->ep0_state = MUSB_EP0_STAGE_RX; +} + +static int +forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) +__releases(musb->lock) +__acquires(musb->lock) +{ + int retval; + if (!musb->gadget_driver) + return -EOPNOTSUPP; + spin_unlock(&musb->lock); + retval = musb->gadget_driver->setup(&musb->g, ctrlrequest); + spin_lock(&musb->lock); + return retval; +} + +/* + * Handle peripheral ep0 interrupt + * + * Context: irq handler; we won't re-enter the driver that way. + */ +irqreturn_t musb_g_ep0_irq(struct musb *musb) +{ + u16 csr; + u16 len; + void __iomem *mbase = musb->mregs; + void __iomem *regs = musb->endpoints[0].regs; + irqreturn_t retval = IRQ_NONE; + + musb_ep_select(mbase, 0); /* select ep0 */ + csr = musb_readw(regs, MUSB_CSR0); + len = musb_readb(regs, MUSB_COUNT0); + + dev_dbg(musb->controller, "csr %04x, count %d, myaddr %d, ep0stage %s\n", + csr, len, + musb_readb(mbase, MUSB_FADDR), + decode_ep0stage(musb->ep0_state)); + + if (csr & MUSB_CSR0_P_DATAEND) { + /* + * If DATAEND is set we should not call the callback, + * hence the status stage is not complete. + */ + return IRQ_HANDLED; + } + + /* I sent a stall.. need to acknowledge it now.. */ + if (csr & MUSB_CSR0_P_SENTSTALL) { + musb_writew(regs, MUSB_CSR0, + csr & ~MUSB_CSR0_P_SENTSTALL); + retval = IRQ_HANDLED; + musb->ep0_state = MUSB_EP0_STAGE_IDLE; + csr = musb_readw(regs, MUSB_CSR0); + } + + /* request ended "early" */ + if (csr & MUSB_CSR0_P_SETUPEND) { + musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND); + retval = IRQ_HANDLED; + /* Transition into the early status phase */ + switch (musb->ep0_state) { + case MUSB_EP0_STAGE_TX: + musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; + break; + case MUSB_EP0_STAGE_RX: + musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; + break; + default: + ERR("SetupEnd came in a wrong ep0stage %s\n", + decode_ep0stage(musb->ep0_state)); + } + csr = musb_readw(regs, MUSB_CSR0); + /* NOTE: request may need completion */ + } + + /* docs from Mentor only describe tx, rx, and idle/setup states. + * we need to handle nuances around status stages, and also the + * case where status and setup stages come back-to-back ... + */ + switch (musb->ep0_state) { + + case MUSB_EP0_STAGE_TX: + /* irq on clearing txpktrdy */ + if ((csr & MUSB_CSR0_TXPKTRDY) == 0) { + ep0_txstate(musb); + retval = IRQ_HANDLED; + } + break; + + case MUSB_EP0_STAGE_RX: + /* irq on set rxpktrdy */ + if (csr & MUSB_CSR0_RXPKTRDY) { + ep0_rxstate(musb); + retval = IRQ_HANDLED; + } + break; + + case MUSB_EP0_STAGE_STATUSIN: + /* end of sequence #2 (OUT/RX state) or #3 (no data) */ + + /* update address (if needed) only @ the end of the + * status phase per usb spec, which also guarantees + * we get 10 msec to receive this irq... until this + * is done we won't see the next packet. + */ + if (musb->set_address) { + musb->set_address = false; + musb_writeb(mbase, MUSB_FADDR, musb->address); + } + + /* enter test mode if needed (exit by reset) */ + else if (musb->test_mode) { + dev_dbg(musb->controller, "entering TESTMODE\n"); + + if (MUSB_TEST_PACKET == musb->test_mode_nr) + musb_load_testpacket(musb); + + musb_writeb(mbase, MUSB_TESTMODE, + musb->test_mode_nr); + } + /* FALLTHROUGH */ + + case MUSB_EP0_STAGE_STATUSOUT: + /* end of sequence #1: write to host (TX state) */ + { + struct musb_request *req; + + req = next_ep0_request(musb); + if (req) + musb_g_ep0_giveback(musb, &req->request); + } + + /* + * In case when several interrupts can get coalesced, + * check to see if we've already received a SETUP packet... + */ + if (csr & MUSB_CSR0_RXPKTRDY) + goto setup; + + retval = IRQ_HANDLED; + musb->ep0_state = MUSB_EP0_STAGE_IDLE; + break; + + case MUSB_EP0_STAGE_IDLE: + /* + * This state is typically (but not always) indiscernible + * from the status states since the corresponding interrupts + * tend to happen within too little period of time (with only + * a zero-length packet in between) and so get coalesced... + */ + retval = IRQ_HANDLED; + musb->ep0_state = MUSB_EP0_STAGE_SETUP; + /* FALLTHROUGH */ + + case MUSB_EP0_STAGE_SETUP: +setup: + if (csr & MUSB_CSR0_RXPKTRDY) { + struct usb_ctrlrequest setup; + int handled = 0; + + if (len != 8) { + ERR("SETUP packet len %d != 8 ?\n", len); + break; + } + musb_read_setup(musb, &setup); + retval = IRQ_HANDLED; + + /* sometimes the RESET won't be reported */ + if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) { + u8 power; + + printk(KERN_NOTICE "%s: peripheral reset " + "irq lost!\n", + musb_driver_name); + power = musb_readb(mbase, MUSB_POWER); + musb->g.speed = (power & MUSB_POWER_HSMODE) + ? USB_SPEED_HIGH : USB_SPEED_FULL; + + } + + switch (musb->ep0_state) { + + /* sequence #3 (no data stage), includes requests + * we can't forward (notably SET_ADDRESS and the + * device/endpoint feature set/clear operations) + * plus SET_CONFIGURATION and others we must + */ + case MUSB_EP0_STAGE_ACKWAIT: + handled = service_zero_data_request( + musb, &setup); + + /* + * We're expecting no data in any case, so + * always set the DATAEND bit -- doing this + * here helps avoid SetupEnd interrupt coming + * in the idle stage when we're stalling... + */ + musb->ackpend |= MUSB_CSR0_P_DATAEND; + + /* status stage might be immediate */ + if (handled > 0) + musb->ep0_state = + MUSB_EP0_STAGE_STATUSIN; + break; + + /* sequence #1 (IN to host), includes GET_STATUS + * requests that we can't forward, GET_DESCRIPTOR + * and others that we must + */ + case MUSB_EP0_STAGE_TX: + handled = service_in_request(musb, &setup); + if (handled > 0) { + musb->ackpend = MUSB_CSR0_TXPKTRDY + | MUSB_CSR0_P_DATAEND; + musb->ep0_state = + MUSB_EP0_STAGE_STATUSOUT; + } + break; + + /* sequence #2 (OUT from host), always forward */ + default: /* MUSB_EP0_STAGE_RX */ + break; + } + + dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n", + handled, csr, + decode_ep0stage(musb->ep0_state)); + + /* unless we need to delegate this to the gadget + * driver, we know how to wrap this up: csr0 has + * not yet been written. + */ + if (handled < 0) + goto stall; + else if (handled > 0) + goto finish; + + handled = forward_to_driver(musb, &setup); + if (handled < 0) { + musb_ep_select(mbase, 0); +stall: + dev_dbg(musb->controller, "stall (%d)\n", handled); + musb->ackpend |= MUSB_CSR0_P_SENDSTALL; + musb->ep0_state = MUSB_EP0_STAGE_IDLE; +finish: + musb_writew(regs, MUSB_CSR0, + musb->ackpend); + musb->ackpend = 0; + } + } + break; + + case MUSB_EP0_STAGE_ACKWAIT: + /* This should not happen. But happens with tusb6010 with + * g_file_storage and high speed. Do nothing. + */ + retval = IRQ_HANDLED; + break; + + default: + /* "can't happen" */ + WARN_ON(1); + musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL); + musb->ep0_state = MUSB_EP0_STAGE_IDLE; + break; + } + + return retval; +} + + +static int +musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) +{ + /* always enabled */ + return -EINVAL; +} + +static int musb_g_ep0_disable(struct usb_ep *e) +{ + /* always enabled */ + return -EINVAL; +} + +static int +musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags) +{ + struct musb_ep *ep; + struct musb_request *req; + struct musb *musb; + int status; + unsigned long lockflags; + void __iomem *regs; + + if (!e || !r) + return -EINVAL; + + ep = to_musb_ep(e); + musb = ep->musb; + regs = musb->control_ep->regs; + + req = to_musb_request(r); + req->musb = musb; + req->request.actual = 0; + req->request.status = -EINPROGRESS; + req->tx = ep->is_in; + + spin_lock_irqsave(&musb->lock, lockflags); + + if (!list_empty(&ep->req_list)) { + status = -EBUSY; + goto cleanup; + } + + switch (musb->ep0_state) { + case MUSB_EP0_STAGE_RX: /* control-OUT data */ + case MUSB_EP0_STAGE_TX: /* control-IN data */ + case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */ + status = 0; + break; + default: + dev_dbg(musb->controller, "ep0 request queued in state %d\n", + musb->ep0_state); + status = -EINVAL; + goto cleanup; + } + + /* add request to the list */ + list_add_tail(&req->list, &ep->req_list); + + dev_dbg(musb->controller, "queue to %s (%s), length=%d\n", + ep->name, ep->is_in ? "IN/TX" : "OUT/RX", + req->request.length); + + musb_ep_select(musb->mregs, 0); + + /* sequence #1, IN ... start writing the data */ + if (musb->ep0_state == MUSB_EP0_STAGE_TX) + ep0_txstate(musb); + + /* sequence #3, no-data ... issue IN status */ + else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) { + if (req->request.length) + status = -EINVAL; + else { + musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; + musb_writew(regs, MUSB_CSR0, + musb->ackpend | MUSB_CSR0_P_DATAEND); + musb->ackpend = 0; + musb_g_ep0_giveback(ep->musb, r); + } + + /* else for sequence #2 (OUT), caller provides a buffer + * before the next packet arrives. deferred responses + * (after SETUP is acked) are racey. + */ + } else if (musb->ackpend) { + musb_writew(regs, MUSB_CSR0, musb->ackpend); + musb->ackpend = 0; + } + +cleanup: + spin_unlock_irqrestore(&musb->lock, lockflags); + return status; +} + +static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) +{ + /* we just won't support this */ + return -EINVAL; +} + +static int musb_g_ep0_halt(struct usb_ep *e, int value) +{ + struct musb_ep *ep; + struct musb *musb; + void __iomem *base, *regs; + unsigned long flags; + int status; + u16 csr; + + if (!e || !value) + return -EINVAL; + + ep = to_musb_ep(e); + musb = ep->musb; + base = musb->mregs; + regs = musb->control_ep->regs; + status = 0; + + spin_lock_irqsave(&musb->lock, flags); + + if (!list_empty(&ep->req_list)) { + status = -EBUSY; + goto cleanup; + } + + musb_ep_select(base, 0); + csr = musb->ackpend; + + switch (musb->ep0_state) { + + /* Stalls are usually issued after parsing SETUP packet, either + * directly in irq context from setup() or else later. + */ + case MUSB_EP0_STAGE_TX: /* control-IN data */ + case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */ + case MUSB_EP0_STAGE_RX: /* control-OUT data */ + csr = musb_readw(regs, MUSB_CSR0); + /* FALLTHROUGH */ + + /* It's also OK to issue stalls during callbacks when a non-empty + * DATA stage buffer has been read (or even written). + */ + case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */ + case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */ + + csr |= MUSB_CSR0_P_SENDSTALL; + musb_writew(regs, MUSB_CSR0, csr); + musb->ep0_state = MUSB_EP0_STAGE_IDLE; + musb->ackpend = 0; + break; + default: + dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state); + status = -EINVAL; + } + +cleanup: + spin_unlock_irqrestore(&musb->lock, flags); + return status; +} + +const struct usb_ep_ops musb_g_ep0_ops = { + .enable = musb_g_ep0_enable, + .disable = musb_g_ep0_disable, + .alloc_request = musb_alloc_request, + .free_request = musb_free_request, + .queue = musb_g_ep0_queue, + .dequeue = musb_g_ep0_dequeue, + .set_halt = musb_g_ep0_halt, +}; diff --git a/drivers/usb/musb-new/musb_host.c b/drivers/usb/musb-new/musb_host.c new file mode 100644 index 00000000000..9a2cf59d924 --- /dev/null +++ b/drivers/usb/musb-new/musb_host.c @@ -0,0 +1,2400 @@ +/* + * MUSB OTG driver host support + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#define __UBOOT__ +#ifndef __UBOOT__ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/dma-mapping.h> +#else +#include <common.h> +#include <usb.h> +#include "linux-compat.h" +#include "usb-compat.h" +#endif + +#include "musb_core.h" +#include "musb_host.h" + + +/* MUSB HOST status 22-mar-2006 + * + * - There's still lots of partial code duplication for fault paths, so + * they aren't handled as consistently as they need to be. + * + * - PIO mostly behaved when last tested. + * + including ep0, with all usbtest cases 9, 10 + * + usbtest 14 (ep0out) doesn't seem to run at all + * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest + * configurations, but otherwise double buffering passes basic tests. + * + for 2.6.N, for N > ~10, needs API changes for hcd framework. + * + * - DMA (CPPI) ... partially behaves, not currently recommended + * + about 1/15 the speed of typical EHCI implementations (PCI) + * + RX, all too often reqpkt seems to misbehave after tx + * + TX, no known issues (other than evident silicon issue) + * + * - DMA (Mentor/OMAP) ...has at least toggle update problems + * + * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet + * starvation ... nothing yet for TX, interrupt, or bulk. + * + * - Not tested with HNP, but some SRP paths seem to behave. + * + * NOTE 24-August-2006: + * + * - Bulk traffic finally uses both sides of hardware ep1, freeing up an + * extra endpoint for periodic use enabling hub + keybd + mouse. That + * mostly works, except that with "usbnet" it's easy to trigger cases + * with "ping" where RX loses. (a) ping to davinci, even "ping -f", + * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses + * although ARP RX wins. (That test was done with a full speed link.) + */ + + +/* + * NOTE on endpoint usage: + * + * CONTROL transfers all go through ep0. BULK ones go through dedicated IN + * and OUT endpoints ... hardware is dedicated for those "async" queue(s). + * (Yes, bulk _could_ use more of the endpoints than that, and would even + * benefit from it.) + * + * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. + * So far that scheduling is both dumb and optimistic: the endpoint will be + * "claimed" until its software queue is no longer refilled. No multiplexing + * of transfers between endpoints, or anything clever. + */ + + +static void musb_ep_program(struct musb *musb, u8 epnum, + struct urb *urb, int is_out, + u8 *buf, u32 offset, u32 len); + +/* + * Clear TX fifo. Needed to avoid BABBLE errors. + */ +static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) +{ + struct musb *musb = ep->musb; + void __iomem *epio = ep->regs; + u16 csr; + u16 lastcsr = 0; + int retries = 1000; + + csr = musb_readw(epio, MUSB_TXCSR); + while (csr & MUSB_TXCSR_FIFONOTEMPTY) { + if (csr != lastcsr) + dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); + lastcsr = csr; + csr |= MUSB_TXCSR_FLUSHFIFO; + musb_writew(epio, MUSB_TXCSR, csr); + csr = musb_readw(epio, MUSB_TXCSR); + if (WARN(retries-- < 1, + "Could not flush host TX%d fifo: csr: %04x\n", + ep->epnum, csr)) + return; + mdelay(1); + } +} + +static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) +{ + void __iomem *epio = ep->regs; + u16 csr; + int retries = 5; + + /* scrub any data left in the fifo */ + do { + csr = musb_readw(epio, MUSB_TXCSR); + if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) + break; + musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); + csr = musb_readw(epio, MUSB_TXCSR); + udelay(10); + } while (--retries); + + WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", + ep->epnum, csr); + + /* and reset for the next transfer */ + musb_writew(epio, MUSB_TXCSR, 0); +} + +/* + * Start transmit. Caller is responsible for locking shared resources. + * musb must be locked. + */ +static inline void musb_h_tx_start(struct musb_hw_ep *ep) +{ + u16 txcsr; + + /* NOTE: no locks here; caller should lock and select EP */ + if (ep->epnum) { + txcsr = musb_readw(ep->regs, MUSB_TXCSR); + txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; + musb_writew(ep->regs, MUSB_TXCSR, txcsr); + } else { + txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; + musb_writew(ep->regs, MUSB_CSR0, txcsr); + } + +} + +static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) +{ + u16 txcsr; + + /* NOTE: no locks here; caller should lock and select EP */ + txcsr = musb_readw(ep->regs, MUSB_TXCSR); + txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; + if (is_cppi_enabled()) + txcsr |= MUSB_TXCSR_DMAMODE; + musb_writew(ep->regs, MUSB_TXCSR, txcsr); +} + +static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) +{ + if (is_in != 0 || ep->is_shared_fifo) + ep->in_qh = qh; + if (is_in == 0 || ep->is_shared_fifo) + ep->out_qh = qh; +} + +static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) +{ + return is_in ? ep->in_qh : ep->out_qh; +} + +/* + * Start the URB at the front of an endpoint's queue + * end must be claimed from the caller. + * + * Context: controller locked, irqs blocked + */ +static void +musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) +{ + u16 frame; + u32 len; + void __iomem *mbase = musb->mregs; + struct urb *urb = next_urb(qh); + void *buf = urb->transfer_buffer; + u32 offset = 0; + struct musb_hw_ep *hw_ep = qh->hw_ep; + unsigned pipe = urb->pipe; + u8 address = usb_pipedevice(pipe); + int epnum = hw_ep->epnum; + + /* initialize software qh state */ + qh->offset = 0; + qh->segsize = 0; + + /* gather right source of data */ + switch (qh->type) { + case USB_ENDPOINT_XFER_CONTROL: + /* control transfers always start with SETUP */ + is_in = 0; + musb->ep0_stage = MUSB_EP0_START; + buf = urb->setup_packet; + len = 8; + break; +#ifndef __UBOOT__ + case USB_ENDPOINT_XFER_ISOC: + qh->iso_idx = 0; + qh->frame = 0; + offset = urb->iso_frame_desc[0].offset; + len = urb->iso_frame_desc[0].length; + break; +#endif + default: /* bulk, interrupt */ + /* actual_length may be nonzero on retry paths */ + buf = urb->transfer_buffer + urb->actual_length; + len = urb->transfer_buffer_length - urb->actual_length; + } + + dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", + qh, urb, address, qh->epnum, + is_in ? "in" : "out", + ({char *s; switch (qh->type) { + case USB_ENDPOINT_XFER_CONTROL: s = ""; break; + case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; +#ifndef __UBOOT__ + case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; +#endif + default: s = "-intr"; break; + }; s; }), + epnum, buf + offset, len); + + /* Configure endpoint */ + musb_ep_set_qh(hw_ep, is_in, qh); + musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); + + /* transmit may have more work: start it when it is time */ + if (is_in) + return; + + /* determine if the time is right for a periodic transfer */ + switch (qh->type) { +#ifndef __UBOOT__ + case USB_ENDPOINT_XFER_ISOC: +#endif + case USB_ENDPOINT_XFER_INT: + dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n"); + frame = musb_readw(mbase, MUSB_FRAME); + /* FIXME this doesn't implement that scheduling policy ... + * or handle framecounter wrapping + */ +#ifndef __UBOOT__ + if ((urb->transfer_flags & URB_ISO_ASAP) + || (frame >= urb->start_frame)) { + /* REVISIT the SOF irq handler shouldn't duplicate + * this code; and we don't init urb->start_frame... + */ + qh->frame = 0; + goto start; + } else { +#endif + qh->frame = urb->start_frame; + /* enable SOF interrupt so we can count down */ + dev_dbg(musb->controller, "SOF for %d\n", epnum); +#if 1 /* ifndef CONFIG_ARCH_DAVINCI */ + musb_writeb(mbase, MUSB_INTRUSBE, 0xff); +#endif +#ifndef __UBOOT__ + } +#endif + break; + default: +start: + dev_dbg(musb->controller, "Start TX%d %s\n", epnum, + hw_ep->tx_channel ? "dma" : "pio"); + + if (!hw_ep->tx_channel) + musb_h_tx_start(hw_ep); + else if (is_cppi_enabled() || tusb_dma_omap()) + musb_h_tx_dma_start(hw_ep); + } +} + +/* Context: caller owns controller lock, IRQs are blocked */ +static void musb_giveback(struct musb *musb, struct urb *urb, int status) +__releases(musb->lock) +__acquires(musb->lock) +{ + dev_dbg(musb->controller, + "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", + urb, urb->complete, status, + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out", + urb->actual_length, urb->transfer_buffer_length + ); + + usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); + spin_unlock(&musb->lock); + usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); + spin_lock(&musb->lock); +} + +/* For bulk/interrupt endpoints only */ +static inline void musb_save_toggle(struct musb_qh *qh, int is_in, + struct urb *urb) +{ + void __iomem *epio = qh->hw_ep->regs; + u16 csr; + + /* + * FIXME: the current Mentor DMA code seems to have + * problems getting toggle correct. + */ + + if (is_in) + csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; + else + csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; + + usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); +} + +/* + * Advance this hardware endpoint's queue, completing the specified URB and + * advancing to either the next URB queued to that qh, or else invalidating + * that qh and advancing to the next qh scheduled after the current one. + * + * Context: caller owns controller lock, IRQs are blocked + */ +static void musb_advance_schedule(struct musb *musb, struct urb *urb, + struct musb_hw_ep *hw_ep, int is_in) +{ + struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); + struct musb_hw_ep *ep = qh->hw_ep; + int ready = qh->is_ready; + int status; + + status = (urb->status == -EINPROGRESS) ? 0 : urb->status; + + /* save toggle eagerly, for paranoia */ + switch (qh->type) { + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + musb_save_toggle(qh, is_in, urb); + break; +#ifndef __UBOOT__ + case USB_ENDPOINT_XFER_ISOC: + if (status == 0 && urb->error_count) + status = -EXDEV; + break; +#endif + } + + qh->is_ready = 0; + musb_giveback(musb, urb, status); + qh->is_ready = ready; + + /* reclaim resources (and bandwidth) ASAP; deschedule it, and + * invalidate qh as soon as list_empty(&hep->urb_list) + */ + if (list_empty(&qh->hep->urb_list)) { + struct list_head *head; + struct dma_controller *dma = musb->dma_controller; + + if (is_in) { + ep->rx_reinit = 1; + if (ep->rx_channel) { + dma->channel_release(ep->rx_channel); + ep->rx_channel = NULL; + } + } else { + ep->tx_reinit = 1; + if (ep->tx_channel) { + dma->channel_release(ep->tx_channel); + ep->tx_channel = NULL; + } + } + + /* Clobber old pointers to this qh */ + musb_ep_set_qh(ep, is_in, NULL); + qh->hep->hcpriv = NULL; + + switch (qh->type) { + + case USB_ENDPOINT_XFER_CONTROL: + case USB_ENDPOINT_XFER_BULK: + /* fifo policy for these lists, except that NAKing + * should rotate a qh to the end (for fairness). + */ + if (qh->mux == 1) { + head = qh->ring.prev; + list_del(&qh->ring); + kfree(qh); + qh = first_qh(head); + break; + } + + case USB_ENDPOINT_XFER_ISOC: + case USB_ENDPOINT_XFER_INT: + /* this is where periodic bandwidth should be + * de-allocated if it's tracked and allocated; + * and where we'd update the schedule tree... + */ + kfree(qh); + qh = NULL; + break; + } + } + + if (qh != NULL && qh->is_ready) { + dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", + hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); + musb_start_urb(musb, is_in, qh); + } +} + +static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) +{ + /* we don't want fifo to fill itself again; + * ignore dma (various models), + * leave toggle alone (may not have been saved yet) + */ + csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; + csr &= ~(MUSB_RXCSR_H_REQPKT + | MUSB_RXCSR_H_AUTOREQ + | MUSB_RXCSR_AUTOCLEAR); + + /* write 2x to allow double buffering */ + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + + /* flush writebuffer */ + return musb_readw(hw_ep->regs, MUSB_RXCSR); +} + +/* + * PIO RX for a packet (or part of it). + */ +static bool +musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) +{ + u16 rx_count; + u8 *buf; + u16 csr; + bool done = false; + u32 length; + int do_flush = 0; + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh = hw_ep->in_qh; + int pipe = urb->pipe; + void *buffer = urb->transfer_buffer; + + /* musb_ep_select(mbase, epnum); */ + rx_count = musb_readw(epio, MUSB_RXCOUNT); + dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, + urb->transfer_buffer, qh->offset, + urb->transfer_buffer_length); + + /* unload FIFO */ +#ifndef __UBOOT__ + if (usb_pipeisoc(pipe)) { + int status = 0; + struct usb_iso_packet_descriptor *d; + + if (iso_err) { + status = -EILSEQ; + urb->error_count++; + } + + d = urb->iso_frame_desc + qh->iso_idx; + buf = buffer + d->offset; + length = d->length; + if (rx_count > length) { + if (status == 0) { + status = -EOVERFLOW; + urb->error_count++; + } + dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); + do_flush = 1; + } else + length = rx_count; + urb->actual_length += length; + d->actual_length = length; + + d->status = status; + + /* see if we are done */ + done = (++qh->iso_idx >= urb->number_of_packets); + } else { +#endif + /* non-isoch */ + buf = buffer + qh->offset; + length = urb->transfer_buffer_length - qh->offset; + if (rx_count > length) { + if (urb->status == -EINPROGRESS) + urb->status = -EOVERFLOW; + dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); + do_flush = 1; + } else + length = rx_count; + urb->actual_length += length; + qh->offset += length; + + /* see if we are done */ + done = (urb->actual_length == urb->transfer_buffer_length) + || (rx_count < qh->maxpacket) + || (urb->status != -EINPROGRESS); + if (done + && (urb->status == -EINPROGRESS) + && (urb->transfer_flags & URB_SHORT_NOT_OK) + && (urb->actual_length + < urb->transfer_buffer_length)) + urb->status = -EREMOTEIO; +#ifndef __UBOOT__ + } +#endif + + musb_read_fifo(hw_ep, length, buf); + + csr = musb_readw(epio, MUSB_RXCSR); + csr |= MUSB_RXCSR_H_WZC_BITS; + if (unlikely(do_flush)) + musb_h_flush_rxfifo(hw_ep, csr); + else { + /* REVISIT this assumes AUTOCLEAR is never set */ + csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); + if (!done) + csr |= MUSB_RXCSR_H_REQPKT; + musb_writew(epio, MUSB_RXCSR, csr); + } + + return done; +} + +/* we don't always need to reinit a given side of an endpoint... + * when we do, use tx/rx reinit routine and then construct a new CSR + * to address data toggle, NYET, and DMA or PIO. + * + * it's possible that driver bugs (especially for DMA) or aborting a + * transfer might have left the endpoint busier than it should be. + * the busy/not-empty tests are basically paranoia. + */ +static void +musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) +{ + u16 csr; + + /* NOTE: we know the "rx" fifo reinit never triggers for ep0. + * That always uses tx_reinit since ep0 repurposes TX register + * offsets; the initial SETUP packet is also a kind of OUT. + */ + + /* if programmed for Tx, put it in RX mode */ + if (ep->is_shared_fifo) { + csr = musb_readw(ep->regs, MUSB_TXCSR); + if (csr & MUSB_TXCSR_MODE) { + musb_h_tx_flush_fifo(ep); + csr = musb_readw(ep->regs, MUSB_TXCSR); + musb_writew(ep->regs, MUSB_TXCSR, + csr | MUSB_TXCSR_FRCDATATOG); + } + + /* + * Clear the MODE bit (and everything else) to enable Rx. + * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. + */ + if (csr & MUSB_TXCSR_DMAMODE) + musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); + musb_writew(ep->regs, MUSB_TXCSR, 0); + + /* scrub all previous state, clearing toggle */ + } else { + csr = musb_readw(ep->regs, MUSB_RXCSR); + if (csr & MUSB_RXCSR_RXPKTRDY) + WARNING("rx%d, packet/%d ready?\n", ep->epnum, + musb_readw(ep->regs, MUSB_RXCOUNT)); + + musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); + } + + /* target addr and (for multipoint) hub addr/port */ + if (musb->is_multipoint) { + musb_write_rxfunaddr(ep->target_regs, qh->addr_reg); + musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg); + musb_write_rxhubport(ep->target_regs, qh->h_port_reg); + + } else + musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); + + /* protocol/endpoint, interval/NAKlimit, i/o size */ + musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); + musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); + /* NOTE: bulk combining rewrites high bits of maxpacket */ + /* Set RXMAXP with the FIFO size of the endpoint + * to disable double buffer mode. + */ + if (musb->double_buffer_not_ok) + musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); + else + musb_writew(ep->regs, MUSB_RXMAXP, + qh->maxpacket | ((qh->hb_mult - 1) << 11)); + + ep->rx_reinit = 0; +} + +static bool musb_tx_dma_program(struct dma_controller *dma, + struct musb_hw_ep *hw_ep, struct musb_qh *qh, + struct urb *urb, u32 offset, u32 length) +{ + struct dma_channel *channel = hw_ep->tx_channel; + void __iomem *epio = hw_ep->regs; + u16 pkt_size = qh->maxpacket; + u16 csr; + u8 mode; + +#ifdef CONFIG_USB_INVENTRA_DMA + if (length > channel->max_len) + length = channel->max_len; + + csr = musb_readw(epio, MUSB_TXCSR); + if (length > pkt_size) { + mode = 1; + csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; + /* autoset shouldn't be set in high bandwidth */ + if (qh->hb_mult == 1) + csr |= MUSB_TXCSR_AUTOSET; + } else { + mode = 0; + csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); + csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ + } + channel->desired_mode = mode; + musb_writew(epio, MUSB_TXCSR, csr); +#else + if (!is_cppi_enabled() && !tusb_dma_omap()) + return false; + + channel->actual_len = 0; + + /* + * TX uses "RNDIS" mode automatically but needs help + * to identify the zero-length-final-packet case. + */ + mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; +#endif + + qh->segsize = length; + + /* + * Ensure the data reaches to main memory before starting + * DMA transfer + */ + wmb(); + + if (!dma->channel_program(channel, pkt_size, mode, + urb->transfer_dma + offset, length)) { + dma->channel_release(channel); + hw_ep->tx_channel = NULL; + + csr = musb_readw(epio, MUSB_TXCSR); + csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); + musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); + return false; + } + return true; +} + +/* + * Program an HDRC endpoint as per the given URB + * Context: irqs blocked, controller lock held + */ +static void musb_ep_program(struct musb *musb, u8 epnum, + struct urb *urb, int is_out, + u8 *buf, u32 offset, u32 len) +{ + struct dma_controller *dma_controller; + struct dma_channel *dma_channel; + u8 dma_ok; + void __iomem *mbase = musb->mregs; + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); + u16 packet_sz = qh->maxpacket; + + dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " + "h_addr%02x h_port%02x bytes %d\n", + is_out ? "-->" : "<--", + epnum, urb, urb->dev->speed, + qh->addr_reg, qh->epnum, is_out ? "out" : "in", + qh->h_addr_reg, qh->h_port_reg, + len); + + musb_ep_select(mbase, epnum); + + /* candidate for DMA? */ + dma_controller = musb->dma_controller; + if (is_dma_capable() && epnum && dma_controller) { + dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; + if (!dma_channel) { + dma_channel = dma_controller->channel_alloc( + dma_controller, hw_ep, is_out); + if (is_out) + hw_ep->tx_channel = dma_channel; + else + hw_ep->rx_channel = dma_channel; + } + } else + dma_channel = NULL; + + /* make sure we clear DMAEnab, autoSet bits from previous run */ + + /* OUT/transmit/EP0 or IN/receive? */ + if (is_out) { + u16 csr; + u16 int_txe; + u16 load_count; + + csr = musb_readw(epio, MUSB_TXCSR); + + /* disable interrupt in case we flush */ + int_txe = musb_readw(mbase, MUSB_INTRTXE); + musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); + + /* general endpoint setup */ + if (epnum) { + /* flush all old state, set default */ + musb_h_tx_flush_fifo(hw_ep); + + /* + * We must not clear the DMAMODE bit before or in + * the same cycle with the DMAENAB bit, so we clear + * the latter first... + */ + csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT + | MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_FRCDATATOG + | MUSB_TXCSR_H_RXSTALL + | MUSB_TXCSR_H_ERROR + | MUSB_TXCSR_TXPKTRDY + ); + csr |= MUSB_TXCSR_MODE; + + if (usb_gettoggle(urb->dev, qh->epnum, 1)) + csr |= MUSB_TXCSR_H_WR_DATATOGGLE + | MUSB_TXCSR_H_DATATOGGLE; + else + csr |= MUSB_TXCSR_CLRDATATOG; + + musb_writew(epio, MUSB_TXCSR, csr); + /* REVISIT may need to clear FLUSHFIFO ... */ + csr &= ~MUSB_TXCSR_DMAMODE; + musb_writew(epio, MUSB_TXCSR, csr); + csr = musb_readw(epio, MUSB_TXCSR); + } else { + /* endpoint 0: just flush */ + musb_h_ep0_flush_fifo(hw_ep); + } + + /* target addr and (for multipoint) hub addr/port */ + if (musb->is_multipoint) { + musb_write_txfunaddr(mbase, epnum, qh->addr_reg); + musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg); + musb_write_txhubport(mbase, epnum, qh->h_port_reg); +/* FIXME if !epnum, do the same for RX ... */ + } else + musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); + + /* protocol/endpoint/interval/NAKlimit */ + if (epnum) { + musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); + if (musb->double_buffer_not_ok) + musb_writew(epio, MUSB_TXMAXP, + hw_ep->max_packet_sz_tx); + else if (can_bulk_split(musb, qh->type)) + musb_writew(epio, MUSB_TXMAXP, packet_sz + | ((hw_ep->max_packet_sz_tx / + packet_sz) - 1) << 11); + else + musb_writew(epio, MUSB_TXMAXP, + qh->maxpacket | + ((qh->hb_mult - 1) << 11)); + musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); + } else { + musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); + if (musb->is_multipoint) + musb_writeb(epio, MUSB_TYPE0, + qh->type_reg); + } + + if (can_bulk_split(musb, qh->type)) + load_count = min((u32) hw_ep->max_packet_sz_tx, + len); + else + load_count = min((u32) packet_sz, len); + + if (dma_channel && musb_tx_dma_program(dma_controller, + hw_ep, qh, urb, offset, len)) + load_count = 0; + + if (load_count) { + /* PIO to load FIFO */ + qh->segsize = load_count; + musb_write_fifo(hw_ep, load_count, buf); + } + + /* re-enable interrupt */ + musb_writew(mbase, MUSB_INTRTXE, int_txe); + + /* IN/receive */ + } else { + u16 csr; + + if (hw_ep->rx_reinit) { + musb_rx_reinit(musb, qh, hw_ep); + + /* init new state: toggle and NYET, maybe DMA later */ + if (usb_gettoggle(urb->dev, qh->epnum, 0)) + csr = MUSB_RXCSR_H_WR_DATATOGGLE + | MUSB_RXCSR_H_DATATOGGLE; + else + csr = 0; + if (qh->type == USB_ENDPOINT_XFER_INT) + csr |= MUSB_RXCSR_DISNYET; + + } else { + csr = musb_readw(hw_ep->regs, MUSB_RXCSR); + + if (csr & (MUSB_RXCSR_RXPKTRDY + | MUSB_RXCSR_DMAENAB + | MUSB_RXCSR_H_REQPKT)) + ERR("broken !rx_reinit, ep%d csr %04x\n", + hw_ep->epnum, csr); + + /* scrub any stale state, leaving toggle alone */ + csr &= MUSB_RXCSR_DISNYET; + } + + /* kick things off */ + + if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { + /* Candidate for DMA */ + dma_channel->actual_len = 0L; + qh->segsize = len; + + /* AUTOREQ is in a DMA register */ + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + csr = musb_readw(hw_ep->regs, MUSB_RXCSR); + + /* + * Unless caller treats short RX transfers as + * errors, we dare not queue multiple transfers. + */ + dma_ok = dma_controller->channel_program(dma_channel, + packet_sz, !(urb->transfer_flags & + URB_SHORT_NOT_OK), + urb->transfer_dma + offset, + qh->segsize); + if (!dma_ok) { + dma_controller->channel_release(dma_channel); + hw_ep->rx_channel = dma_channel = NULL; + } else + csr |= MUSB_RXCSR_DMAENAB; + } + + csr |= MUSB_RXCSR_H_REQPKT; + dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr); + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + csr = musb_readw(hw_ep->regs, MUSB_RXCSR); + } +} + + +/* + * Service the default endpoint (ep0) as host. + * Return true until it's time to start the status stage. + */ +static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) +{ + bool more = false; + u8 *fifo_dest = NULL; + u16 fifo_count = 0; + struct musb_hw_ep *hw_ep = musb->control_ep; + struct musb_qh *qh = hw_ep->in_qh; + struct usb_ctrlrequest *request; + + switch (musb->ep0_stage) { + case MUSB_EP0_IN: + fifo_dest = urb->transfer_buffer + urb->actual_length; + fifo_count = min_t(size_t, len, urb->transfer_buffer_length - + urb->actual_length); + if (fifo_count < len) + urb->status = -EOVERFLOW; + + musb_read_fifo(hw_ep, fifo_count, fifo_dest); + + urb->actual_length += fifo_count; + if (len < qh->maxpacket) { + /* always terminate on short read; it's + * rarely reported as an error. + */ + } else if (urb->actual_length < + urb->transfer_buffer_length) + more = true; + break; + case MUSB_EP0_START: + request = (struct usb_ctrlrequest *) urb->setup_packet; + + if (!request->wLength) { + dev_dbg(musb->controller, "start no-DATA\n"); + break; + } else if (request->bRequestType & USB_DIR_IN) { + dev_dbg(musb->controller, "start IN-DATA\n"); + musb->ep0_stage = MUSB_EP0_IN; + more = true; + break; + } else { + dev_dbg(musb->controller, "start OUT-DATA\n"); + musb->ep0_stage = MUSB_EP0_OUT; + more = true; + } + /* FALLTHROUGH */ + case MUSB_EP0_OUT: + fifo_count = min_t(size_t, qh->maxpacket, + urb->transfer_buffer_length - + urb->actual_length); + if (fifo_count) { + fifo_dest = (u8 *) (urb->transfer_buffer + + urb->actual_length); + dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n", + fifo_count, + (fifo_count == 1) ? "" : "s", + fifo_dest); + musb_write_fifo(hw_ep, fifo_count, fifo_dest); + + urb->actual_length += fifo_count; + more = true; + } + break; + default: + ERR("bogus ep0 stage %d\n", musb->ep0_stage); + break; + } + + return more; +} + +/* + * Handle default endpoint interrupt as host. Only called in IRQ time + * from musb_interrupt(). + * + * called with controller irqlocked + */ +irqreturn_t musb_h_ep0_irq(struct musb *musb) +{ + struct urb *urb; + u16 csr, len; + int status = 0; + void __iomem *mbase = musb->mregs; + struct musb_hw_ep *hw_ep = musb->control_ep; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh = hw_ep->in_qh; + bool complete = false; + irqreturn_t retval = IRQ_NONE; + + /* ep0 only has one queue, "in" */ + urb = next_urb(qh); + + musb_ep_select(mbase, 0); + csr = musb_readw(epio, MUSB_CSR0); + len = (csr & MUSB_CSR0_RXPKTRDY) + ? musb_readb(epio, MUSB_COUNT0) + : 0; + + dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", + csr, qh, len, urb, musb->ep0_stage); + + /* if we just did status stage, we are done */ + if (MUSB_EP0_STATUS == musb->ep0_stage) { + retval = IRQ_HANDLED; + complete = true; + } + + /* prepare status */ + if (csr & MUSB_CSR0_H_RXSTALL) { + dev_dbg(musb->controller, "STALLING ENDPOINT\n"); + status = -EPIPE; + + } else if (csr & MUSB_CSR0_H_ERROR) { + dev_dbg(musb->controller, "no response, csr0 %04x\n", csr); + status = -EPROTO; + + } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { + dev_dbg(musb->controller, "control NAK timeout\n"); + + /* NOTE: this code path would be a good place to PAUSE a + * control transfer, if another one is queued, so that + * ep0 is more likely to stay busy. That's already done + * for bulk RX transfers. + * + * if (qh->ring.next != &musb->control), then + * we have a candidate... NAKing is *NOT* an error + */ + musb_writew(epio, MUSB_CSR0, 0); + retval = IRQ_HANDLED; + } + + if (status) { + dev_dbg(musb->controller, "aborting\n"); + retval = IRQ_HANDLED; + if (urb) + urb->status = status; + complete = true; + + /* use the proper sequence to abort the transfer */ + if (csr & MUSB_CSR0_H_REQPKT) { + csr &= ~MUSB_CSR0_H_REQPKT; + musb_writew(epio, MUSB_CSR0, csr); + csr &= ~MUSB_CSR0_H_NAKTIMEOUT; + musb_writew(epio, MUSB_CSR0, csr); + } else { + musb_h_ep0_flush_fifo(hw_ep); + } + + musb_writeb(epio, MUSB_NAKLIMIT0, 0); + + /* clear it */ + musb_writew(epio, MUSB_CSR0, 0); + } + + if (unlikely(!urb)) { + /* stop endpoint since we have no place for its data, this + * SHOULD NEVER HAPPEN! */ + ERR("no URB for end 0\n"); + + musb_h_ep0_flush_fifo(hw_ep); + goto done; + } + + if (!complete) { + /* call common logic and prepare response */ + if (musb_h_ep0_continue(musb, len, urb)) { + /* more packets required */ + csr = (MUSB_EP0_IN == musb->ep0_stage) + ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; + } else { + /* data transfer complete; perform status phase */ + if (usb_pipeout(urb->pipe) + || !urb->transfer_buffer_length) + csr = MUSB_CSR0_H_STATUSPKT + | MUSB_CSR0_H_REQPKT; + else + csr = MUSB_CSR0_H_STATUSPKT + | MUSB_CSR0_TXPKTRDY; + + /* flag status stage */ + musb->ep0_stage = MUSB_EP0_STATUS; + + dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr); + + } + musb_writew(epio, MUSB_CSR0, csr); + retval = IRQ_HANDLED; + } else + musb->ep0_stage = MUSB_EP0_IDLE; + + /* call completion handler if done */ + if (complete) + musb_advance_schedule(musb, urb, hw_ep, 1); +done: + return retval; +} + + +#ifdef CONFIG_USB_INVENTRA_DMA + +/* Host side TX (OUT) using Mentor DMA works as follows: + submit_urb -> + - if queue was empty, Program Endpoint + - ... which starts DMA to fifo in mode 1 or 0 + + DMA Isr (transfer complete) -> TxAvail() + - Stop DMA (~DmaEnab) (<--- Alert ... currently happens + only in musb_cleanup_urb) + - TxPktRdy has to be set in mode 0 or for + short packets in mode 1. +*/ + +#endif + +/* Service a Tx-Available or dma completion irq for the endpoint */ +void musb_host_tx(struct musb *musb, u8 epnum) +{ + int pipe; + bool done = false; + u16 tx_csr; + size_t length = 0; + size_t offset = 0; + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh = hw_ep->out_qh; + struct urb *urb = next_urb(qh); + u32 status = 0; + void __iomem *mbase = musb->mregs; + struct dma_channel *dma; + bool transfer_pending = false; + + musb_ep_select(mbase, epnum); + tx_csr = musb_readw(epio, MUSB_TXCSR); + + /* with CPPI, DMA sometimes triggers "extra" irqs */ + if (!urb) { + dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); + return; + } + + pipe = urb->pipe; + dma = is_dma_capable() ? hw_ep->tx_channel : NULL; + dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, + dma ? ", dma" : ""); + + /* check for errors */ + if (tx_csr & MUSB_TXCSR_H_RXSTALL) { + /* dma was disabled, fifo flushed */ + dev_dbg(musb->controller, "TX end %d stall\n", epnum); + + /* stall; record URB status */ + status = -EPIPE; + + } else if (tx_csr & MUSB_TXCSR_H_ERROR) { + /* (NON-ISO) dma was disabled, fifo flushed */ + dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); + + status = -ETIMEDOUT; + + } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { + dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum); + + /* NOTE: this code path would be a good place to PAUSE a + * transfer, if there's some other (nonperiodic) tx urb + * that could use this fifo. (dma complicates it...) + * That's already done for bulk RX transfers. + * + * if (bulk && qh->ring.next != &musb->out_bulk), then + * we have a candidate... NAKing is *NOT* an error + */ + musb_ep_select(mbase, epnum); + musb_writew(epio, MUSB_TXCSR, + MUSB_TXCSR_H_WZC_BITS + | MUSB_TXCSR_TXPKTRDY); + return; + } + + if (status) { + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dma->status = MUSB_DMA_STATUS_CORE_ABORT; + (void) musb->dma_controller->channel_abort(dma); + } + + /* do the proper sequence to abort the transfer in the + * usb core; the dma engine should already be stopped. + */ + musb_h_tx_flush_fifo(hw_ep); + tx_csr &= ~(MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_H_ERROR + | MUSB_TXCSR_H_RXSTALL + | MUSB_TXCSR_H_NAKTIMEOUT + ); + + musb_ep_select(mbase, epnum); + musb_writew(epio, MUSB_TXCSR, tx_csr); + /* REVISIT may need to clear FLUSHFIFO ... */ + musb_writew(epio, MUSB_TXCSR, tx_csr); + musb_writeb(epio, MUSB_TXINTERVAL, 0); + + done = true; + } + + /* second cppi case */ + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); + return; + } + + if (is_dma_capable() && dma && !status) { + /* + * DMA has completed. But if we're using DMA mode 1 (multi + * packet DMA), we need a terminal TXPKTRDY interrupt before + * we can consider this transfer completed, lest we trash + * its last packet when writing the next URB's data. So we + * switch back to mode 0 to get that interrupt; we'll come + * back here once it happens. + */ + if (tx_csr & MUSB_TXCSR_DMAMODE) { + /* + * We shouldn't clear DMAMODE with DMAENAB set; so + * clear them in a safe order. That should be OK + * once TXPKTRDY has been set (and I've never seen + * it being 0 at this moment -- DMA interrupt latency + * is significant) but if it hasn't been then we have + * no choice but to stop being polite and ignore the + * programmer's guide... :-) + * + * Note that we must write TXCSR with TXPKTRDY cleared + * in order not to re-trigger the packet send (this bit + * can't be cleared by CPU), and there's another caveat: + * TXPKTRDY may be set shortly and then cleared in the + * double-buffered FIFO mode, so we do an extra TXCSR + * read for debouncing... + */ + tx_csr &= musb_readw(epio, MUSB_TXCSR); + if (tx_csr & MUSB_TXCSR_TXPKTRDY) { + tx_csr &= ~(MUSB_TXCSR_DMAENAB | + MUSB_TXCSR_TXPKTRDY); + musb_writew(epio, MUSB_TXCSR, + tx_csr | MUSB_TXCSR_H_WZC_BITS); + } + tx_csr &= ~(MUSB_TXCSR_DMAMODE | + MUSB_TXCSR_TXPKTRDY); + musb_writew(epio, MUSB_TXCSR, + tx_csr | MUSB_TXCSR_H_WZC_BITS); + + /* + * There is no guarantee that we'll get an interrupt + * after clearing DMAMODE as we might have done this + * too late (after TXPKTRDY was cleared by controller). + * Re-read TXCSR as we have spoiled its previous value. + */ + tx_csr = musb_readw(epio, MUSB_TXCSR); + } + + /* + * We may get here from a DMA completion or TXPKTRDY interrupt. + * In any case, we must check the FIFO status here and bail out + * only if the FIFO still has data -- that should prevent the + * "missed" TXPKTRDY interrupts and deal with double-buffered + * FIFO mode too... + */ + if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { + dev_dbg(musb->controller, "DMA complete but packet still in FIFO, " + "CSR %04x\n", tx_csr); + return; + } + } + + if (!status || dma || usb_pipeisoc(pipe)) { + if (dma) + length = dma->actual_len; + else + length = qh->segsize; + qh->offset += length; + + if (usb_pipeisoc(pipe)) { +#ifndef __UBOOT__ + struct usb_iso_packet_descriptor *d; + + d = urb->iso_frame_desc + qh->iso_idx; + d->actual_length = length; + d->status = status; + if (++qh->iso_idx >= urb->number_of_packets) { + done = true; + } else { + d++; + offset = d->offset; + length = d->length; + } +#endif + } else if (dma && urb->transfer_buffer_length == qh->offset) { + done = true; + } else { + /* see if we need to send more data, or ZLP */ + if (qh->segsize < qh->maxpacket) + done = true; + else if (qh->offset == urb->transfer_buffer_length + && !(urb->transfer_flags + & URB_ZERO_PACKET)) + done = true; + if (!done) { + offset = qh->offset; + length = urb->transfer_buffer_length - offset; + transfer_pending = true; + } + } + } + + /* urb->status != -EINPROGRESS means request has been faulted, + * so we must abort this transfer after cleanup + */ + if (urb->status != -EINPROGRESS) { + done = true; + if (status == 0) + status = urb->status; + } + + if (done) { + /* set status */ + urb->status = status; + urb->actual_length = qh->offset; + musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); + return; + } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { + if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, + offset, length)) { + if (is_cppi_enabled() || tusb_dma_omap()) + musb_h_tx_dma_start(hw_ep); + return; + } + } else if (tx_csr & MUSB_TXCSR_DMAENAB) { + dev_dbg(musb->controller, "not complete, but DMA enabled?\n"); + return; + } + + /* + * PIO: start next packet in this URB. + * + * REVISIT: some docs say that when hw_ep->tx_double_buffered, + * (and presumably, FIFO is not half-full) we should write *two* + * packets before updating TXCSR; other docs disagree... + */ + if (length > qh->maxpacket) + length = qh->maxpacket; + /* Unmap the buffer so that CPU can use it */ + usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); + musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); + qh->segsize = length; + + musb_ep_select(mbase, epnum); + musb_writew(epio, MUSB_TXCSR, + MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); +} + + +#ifdef CONFIG_USB_INVENTRA_DMA + +/* Host side RX (IN) using Mentor DMA works as follows: + submit_urb -> + - if queue was empty, ProgramEndpoint + - first IN token is sent out (by setting ReqPkt) + LinuxIsr -> RxReady() + /\ => first packet is received + | - Set in mode 0 (DmaEnab, ~ReqPkt) + | -> DMA Isr (transfer complete) -> RxReady() + | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) + | - if urb not complete, send next IN token (ReqPkt) + | | else complete urb. + | | + --------------------------- + * + * Nuances of mode 1: + * For short packets, no ack (+RxPktRdy) is sent automatically + * (even if AutoClear is ON) + * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent + * automatically => major problem, as collecting the next packet becomes + * difficult. Hence mode 1 is not used. + * + * REVISIT + * All we care about at this driver level is that + * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; + * (b) termination conditions are: short RX, or buffer full; + * (c) fault modes include + * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. + * (and that endpoint's dma queue stops immediately) + * - overflow (full, PLUS more bytes in the terminal packet) + * + * So for example, usb-storage sets URB_SHORT_NOT_OK, and would + * thus be a great candidate for using mode 1 ... for all but the + * last packet of one URB's transfer. + */ + +#endif + +/* Schedule next QH from musb->in_bulk and move the current qh to + * the end; avoids starvation for other endpoints. + */ +static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep) +{ + struct dma_channel *dma; + struct urb *urb; + void __iomem *mbase = musb->mregs; + void __iomem *epio = ep->regs; + struct musb_qh *cur_qh, *next_qh; + u16 rx_csr; + + musb_ep_select(mbase, ep->epnum); + dma = is_dma_capable() ? ep->rx_channel : NULL; + + /* clear nak timeout bit */ + rx_csr = musb_readw(epio, MUSB_RXCSR); + rx_csr |= MUSB_RXCSR_H_WZC_BITS; + rx_csr &= ~MUSB_RXCSR_DATAERROR; + musb_writew(epio, MUSB_RXCSR, rx_csr); + + cur_qh = first_qh(&musb->in_bulk); + if (cur_qh) { + urb = next_urb(cur_qh); + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dma->status = MUSB_DMA_STATUS_CORE_ABORT; + musb->dma_controller->channel_abort(dma); + urb->actual_length += dma->actual_len; + dma->actual_len = 0L; + } + musb_save_toggle(cur_qh, 1, urb); + + /* move cur_qh to end of queue */ + list_move_tail(&cur_qh->ring, &musb->in_bulk); + + /* get the next qh from musb->in_bulk */ + next_qh = first_qh(&musb->in_bulk); + + /* set rx_reinit and schedule the next qh */ + ep->rx_reinit = 1; + musb_start_urb(musb, 1, next_qh); + } +} + +/* + * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, + * and high-bandwidth IN transfer cases. + */ +void musb_host_rx(struct musb *musb, u8 epnum) +{ + struct urb *urb; + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh = hw_ep->in_qh; + size_t xfer_len; + void __iomem *mbase = musb->mregs; + int pipe; + u16 rx_csr, val; + bool iso_err = false; + bool done = false; + u32 status; + struct dma_channel *dma; + + musb_ep_select(mbase, epnum); + + urb = next_urb(qh); + dma = is_dma_capable() ? hw_ep->rx_channel : NULL; + status = 0; + xfer_len = 0; + + rx_csr = musb_readw(epio, MUSB_RXCSR); + val = rx_csr; + + if (unlikely(!urb)) { + /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least + * usbtest #11 (unlinks) triggers it regularly, sometimes + * with fifo full. (Only with DMA??) + */ + dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, + musb_readw(epio, MUSB_RXCOUNT)); + musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); + return; + } + + pipe = urb->pipe; + + dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", + epnum, rx_csr, urb->actual_length, + dma ? dma->actual_len : 0); + + /* check for errors, concurrent stall & unlink is not really + * handled yet! */ + if (rx_csr & MUSB_RXCSR_H_RXSTALL) { + dev_dbg(musb->controller, "RX end %d STALL\n", epnum); + + /* stall; record URB status */ + status = -EPIPE; + + } else if (rx_csr & MUSB_RXCSR_H_ERROR) { + dev_dbg(musb->controller, "end %d RX proto error\n", epnum); + + status = -EPROTO; + musb_writeb(epio, MUSB_RXINTERVAL, 0); + + } else if (rx_csr & MUSB_RXCSR_DATAERROR) { + + if (USB_ENDPOINT_XFER_ISOC != qh->type) { + dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum); + + /* NOTE: NAKing is *NOT* an error, so we want to + * continue. Except ... if there's a request for + * another QH, use that instead of starving it. + * + * Devices like Ethernet and serial adapters keep + * reads posted at all times, which will starve + * other devices without this logic. + */ + if (usb_pipebulk(urb->pipe) + && qh->mux == 1 + && !list_is_singular(&musb->in_bulk)) { + musb_bulk_rx_nak_timeout(musb, hw_ep); + return; + } + musb_ep_select(mbase, epnum); + rx_csr |= MUSB_RXCSR_H_WZC_BITS; + rx_csr &= ~MUSB_RXCSR_DATAERROR; + musb_writew(epio, MUSB_RXCSR, rx_csr); + + goto finish; + } else { + dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum); + /* packet error reported later */ + iso_err = true; + } + } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { + dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n", + epnum); + status = -EPROTO; + } + + /* faults abort the transfer */ + if (status) { + /* clean up dma and collect transfer count */ + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dma->status = MUSB_DMA_STATUS_CORE_ABORT; + (void) musb->dma_controller->channel_abort(dma); + xfer_len = dma->actual_len; + } + musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); + musb_writeb(epio, MUSB_RXINTERVAL, 0); + done = true; + goto finish; + } + + if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { + /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ + ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); + goto finish; + } + + /* thorough shutdown for now ... given more precise fault handling + * and better queueing support, we might keep a DMA pipeline going + * while processing this irq for earlier completions. + */ + + /* FIXME this is _way_ too much in-line logic for Mentor DMA */ + +#ifndef CONFIG_USB_INVENTRA_DMA + if (rx_csr & MUSB_RXCSR_H_REQPKT) { + /* REVISIT this happened for a while on some short reads... + * the cleanup still needs investigation... looks bad... + * and also duplicates dma cleanup code above ... plus, + * shouldn't this be the "half full" double buffer case? + */ + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dma->status = MUSB_DMA_STATUS_CORE_ABORT; + (void) musb->dma_controller->channel_abort(dma); + xfer_len = dma->actual_len; + done = true; + } + + dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, + xfer_len, dma ? ", dma" : ""); + rx_csr &= ~MUSB_RXCSR_H_REQPKT; + + musb_ep_select(mbase, epnum); + musb_writew(epio, MUSB_RXCSR, + MUSB_RXCSR_H_WZC_BITS | rx_csr); + } +#endif + if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { + xfer_len = dma->actual_len; + + val &= ~(MUSB_RXCSR_DMAENAB + | MUSB_RXCSR_H_AUTOREQ + | MUSB_RXCSR_AUTOCLEAR + | MUSB_RXCSR_RXPKTRDY); + musb_writew(hw_ep->regs, MUSB_RXCSR, val); + +#ifdef CONFIG_USB_INVENTRA_DMA + if (usb_pipeisoc(pipe)) { + struct usb_iso_packet_descriptor *d; + + d = urb->iso_frame_desc + qh->iso_idx; + d->actual_length = xfer_len; + + /* even if there was an error, we did the dma + * for iso_frame_desc->length + */ + if (d->status != -EILSEQ && d->status != -EOVERFLOW) + d->status = 0; + + if (++qh->iso_idx >= urb->number_of_packets) + done = true; + else + done = false; + + } else { + /* done if urb buffer is full or short packet is recd */ + done = (urb->actual_length + xfer_len >= + urb->transfer_buffer_length + || dma->actual_len < qh->maxpacket); + } + + /* send IN token for next packet, without AUTOREQ */ + if (!done) { + val |= MUSB_RXCSR_H_REQPKT; + musb_writew(epio, MUSB_RXCSR, + MUSB_RXCSR_H_WZC_BITS | val); + } + + dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, + done ? "off" : "reset", + musb_readw(epio, MUSB_RXCSR), + musb_readw(epio, MUSB_RXCOUNT)); +#else + done = true; +#endif + } else if (urb->status == -EINPROGRESS) { + /* if no errors, be sure a packet is ready for unloading */ + if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { + status = -EPROTO; + ERR("Rx interrupt with no errors or packet!\n"); + + /* FIXME this is another "SHOULD NEVER HAPPEN" */ + +/* SCRUB (RX) */ + /* do the proper sequence to abort the transfer */ + musb_ep_select(mbase, epnum); + val &= ~MUSB_RXCSR_H_REQPKT; + musb_writew(epio, MUSB_RXCSR, val); + goto finish; + } + + /* we are expecting IN packets */ +#ifdef CONFIG_USB_INVENTRA_DMA + if (dma) { + struct dma_controller *c; + u16 rx_count; + int ret, length; + dma_addr_t buf; + + rx_count = musb_readw(epio, MUSB_RXCOUNT); + + dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n", + epnum, rx_count, + urb->transfer_dma + + urb->actual_length, + qh->offset, + urb->transfer_buffer_length); + + c = musb->dma_controller; + + if (usb_pipeisoc(pipe)) { + int d_status = 0; + struct usb_iso_packet_descriptor *d; + + d = urb->iso_frame_desc + qh->iso_idx; + + if (iso_err) { + d_status = -EILSEQ; + urb->error_count++; + } + if (rx_count > d->length) { + if (d_status == 0) { + d_status = -EOVERFLOW; + urb->error_count++; + } + dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\ + rx_count, d->length); + + length = d->length; + } else + length = rx_count; + d->status = d_status; + buf = urb->transfer_dma + d->offset; + } else { + length = rx_count; + buf = urb->transfer_dma + + urb->actual_length; + } + + dma->desired_mode = 0; +#ifdef USE_MODE1 + /* because of the issue below, mode 1 will + * only rarely behave with correct semantics. + */ + if ((urb->transfer_flags & + URB_SHORT_NOT_OK) + && (urb->transfer_buffer_length - + urb->actual_length) + > qh->maxpacket) + dma->desired_mode = 1; + if (rx_count < hw_ep->max_packet_sz_rx) { + length = rx_count; + dma->desired_mode = 0; + } else { + length = urb->transfer_buffer_length; + } +#endif + +/* Disadvantage of using mode 1: + * It's basically usable only for mass storage class; essentially all + * other protocols also terminate transfers on short packets. + * + * Details: + * An extra IN token is sent at the end of the transfer (due to AUTOREQ) + * If you try to use mode 1 for (transfer_buffer_length - 512), and try + * to use the extra IN token to grab the last packet using mode 0, then + * the problem is that you cannot be sure when the device will send the + * last packet and RxPktRdy set. Sometimes the packet is recd too soon + * such that it gets lost when RxCSR is re-set at the end of the mode 1 + * transfer, while sometimes it is recd just a little late so that if you + * try to configure for mode 0 soon after the mode 1 transfer is + * completed, you will find rxcount 0. Okay, so you might think why not + * wait for an interrupt when the pkt is recd. Well, you won't get any! + */ + + val = musb_readw(epio, MUSB_RXCSR); + val &= ~MUSB_RXCSR_H_REQPKT; + + if (dma->desired_mode == 0) + val &= ~MUSB_RXCSR_H_AUTOREQ; + else + val |= MUSB_RXCSR_H_AUTOREQ; + val |= MUSB_RXCSR_DMAENAB; + + /* autoclear shouldn't be set in high bandwidth */ + if (qh->hb_mult == 1) + val |= MUSB_RXCSR_AUTOCLEAR; + + musb_writew(epio, MUSB_RXCSR, + MUSB_RXCSR_H_WZC_BITS | val); + + /* REVISIT if when actual_length != 0, + * transfer_buffer_length needs to be + * adjusted first... + */ + ret = c->channel_program( + dma, qh->maxpacket, + dma->desired_mode, buf, length); + + if (!ret) { + c->channel_release(dma); + hw_ep->rx_channel = NULL; + dma = NULL; + val = musb_readw(epio, MUSB_RXCSR); + val &= ~(MUSB_RXCSR_DMAENAB + | MUSB_RXCSR_H_AUTOREQ + | MUSB_RXCSR_AUTOCLEAR); + musb_writew(epio, MUSB_RXCSR, val); + } + } +#endif /* Mentor DMA */ + + if (!dma) { + /* Unmap the buffer so that CPU can use it */ + usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); + done = musb_host_packet_rx(musb, urb, + epnum, iso_err); + dev_dbg(musb->controller, "read %spacket\n", done ? "last " : ""); + } + } + +finish: + urb->actual_length += xfer_len; + qh->offset += xfer_len; + if (done) { + if (urb->status == -EINPROGRESS) + urb->status = status; + musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); + } +} + +/* schedule nodes correspond to peripheral endpoints, like an OHCI QH. + * the software schedule associates multiple such nodes with a given + * host side hardware endpoint + direction; scheduling may activate + * that hardware endpoint. + */ +static int musb_schedule( + struct musb *musb, + struct musb_qh *qh, + int is_in) +{ + int idle; + int best_diff; + int best_end, epnum; + struct musb_hw_ep *hw_ep = NULL; + struct list_head *head = NULL; + u8 toggle; + u8 txtype; + struct urb *urb = next_urb(qh); + + /* use fixed hardware for control and bulk */ + if (qh->type == USB_ENDPOINT_XFER_CONTROL) { + head = &musb->control; + hw_ep = musb->control_ep; + goto success; + } + + /* else, periodic transfers get muxed to other endpoints */ + + /* + * We know this qh hasn't been scheduled, so all we need to do + * is choose which hardware endpoint to put it on ... + * + * REVISIT what we really want here is a regular schedule tree + * like e.g. OHCI uses. + */ + best_diff = 4096; + best_end = -1; + + for (epnum = 1, hw_ep = musb->endpoints + 1; + epnum < musb->nr_endpoints; + epnum++, hw_ep++) { + int diff; + + if (musb_ep_get_qh(hw_ep, is_in) != NULL) + continue; + + if (hw_ep == musb->bulk_ep) + continue; + + if (is_in) + diff = hw_ep->max_packet_sz_rx; + else + diff = hw_ep->max_packet_sz_tx; + diff -= (qh->maxpacket * qh->hb_mult); + + if (diff >= 0 && best_diff > diff) { + + /* + * Mentor controller has a bug in that if we schedule + * a BULK Tx transfer on an endpoint that had earlier + * handled ISOC then the BULK transfer has to start on + * a zero toggle. If the BULK transfer starts on a 1 + * toggle then this transfer will fail as the mentor + * controller starts the Bulk transfer on a 0 toggle + * irrespective of the programming of the toggle bits + * in the TXCSR register. Check for this condition + * while allocating the EP for a Tx Bulk transfer. If + * so skip this EP. + */ + hw_ep = musb->endpoints + epnum; + toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); + txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) + >> 4) & 0x3; + if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && + toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) + continue; + + best_diff = diff; + best_end = epnum; + } + } + /* use bulk reserved ep1 if no other ep is free */ + if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { + hw_ep = musb->bulk_ep; + if (is_in) + head = &musb->in_bulk; + else + head = &musb->out_bulk; + + /* Enable bulk RX NAK timeout scheme when bulk requests are + * multiplexed. This scheme doen't work in high speed to full + * speed scenario as NAK interrupts are not coming from a + * full speed device connected to a high speed device. + * NAK timeout interval is 8 (128 uframe or 16ms) for HS and + * 4 (8 frame or 8ms) for FS device. + */ + if (is_in && qh->dev) + qh->intv_reg = + (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; + goto success; + } else if (best_end < 0) { + return -ENOSPC; + } + + idle = 1; + qh->mux = 0; + hw_ep = musb->endpoints + best_end; + dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end); +success: + if (head) { + idle = list_empty(head); + list_add_tail(&qh->ring, head); + qh->mux = 1; + } + qh->hw_ep = hw_ep; + qh->hep->hcpriv = qh; + if (idle) + musb_start_urb(musb, is_in, qh); + return 0; +} + +#ifdef __UBOOT__ +/* check if transaction translator is needed for device */ +static int tt_needed(struct musb *musb, struct usb_device *dev) +{ + if ((musb_readb(musb->mregs, MUSB_POWER) & MUSB_POWER_HSMODE) && + (dev->speed < USB_SPEED_HIGH)) + return 1; + return 0; +} +#endif + +#ifndef __UBOOT__ +static int musb_urb_enqueue( +#else +int musb_urb_enqueue( +#endif + struct usb_hcd *hcd, + struct urb *urb, + gfp_t mem_flags) +{ + unsigned long flags; + struct musb *musb = hcd_to_musb(hcd); + struct usb_host_endpoint *hep = urb->ep; + struct musb_qh *qh; + struct usb_endpoint_descriptor *epd = &hep->desc; + int ret; + unsigned type_reg; + unsigned interval; + + /* host role must be active */ + if (!is_host_active(musb) || !musb->is_active) + return -ENODEV; + + spin_lock_irqsave(&musb->lock, flags); + ret = usb_hcd_link_urb_to_ep(hcd, urb); + qh = ret ? NULL : hep->hcpriv; + if (qh) + urb->hcpriv = qh; + spin_unlock_irqrestore(&musb->lock, flags); + + /* DMA mapping was already done, if needed, and this urb is on + * hep->urb_list now ... so we're done, unless hep wasn't yet + * scheduled onto a live qh. + * + * REVISIT best to keep hep->hcpriv valid until the endpoint gets + * disabled, testing for empty qh->ring and avoiding qh setup costs + * except for the first urb queued after a config change. + */ + if (qh || ret) + return ret; + + /* Allocate and initialize qh, minimizing the work done each time + * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. + * + * REVISIT consider a dedicated qh kmem_cache, so it's harder + * for bugs in other kernel code to break this driver... + */ + qh = kzalloc(sizeof *qh, mem_flags); + if (!qh) { + spin_lock_irqsave(&musb->lock, flags); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&musb->lock, flags); + return -ENOMEM; + } + + qh->hep = hep; + qh->dev = urb->dev; + INIT_LIST_HEAD(&qh->ring); + qh->is_ready = 1; + + qh->maxpacket = usb_endpoint_maxp(epd); + qh->type = usb_endpoint_type(epd); + + /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. + * Some musb cores don't support high bandwidth ISO transfers; and + * we don't (yet!) support high bandwidth interrupt transfers. + */ + qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); + if (qh->hb_mult > 1) { + int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); + + if (ok) + ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) + || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); + if (!ok) { + ret = -EMSGSIZE; + goto done; + } + qh->maxpacket &= 0x7ff; + } + + qh->epnum = usb_endpoint_num(epd); + + /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ + qh->addr_reg = (u8) usb_pipedevice(urb->pipe); + + /* precompute rxtype/txtype/type0 register */ + type_reg = (qh->type << 4) | qh->epnum; + switch (urb->dev->speed) { + case USB_SPEED_LOW: + type_reg |= 0xc0; + break; + case USB_SPEED_FULL: + type_reg |= 0x80; + break; + default: + type_reg |= 0x40; + } + qh->type_reg = type_reg; + + /* Precompute RXINTERVAL/TXINTERVAL register */ + switch (qh->type) { + case USB_ENDPOINT_XFER_INT: + /* + * Full/low speeds use the linear encoding, + * high speed uses the logarithmic encoding. + */ + if (urb->dev->speed <= USB_SPEED_FULL) { + interval = max_t(u8, epd->bInterval, 1); + break; + } + /* FALLTHROUGH */ + case USB_ENDPOINT_XFER_ISOC: + /* ISO always uses logarithmic encoding */ + interval = min_t(u8, epd->bInterval, 16); + break; + default: + /* REVISIT we actually want to use NAK limits, hinting to the + * transfer scheduling logic to try some other qh, e.g. try + * for 2 msec first: + * + * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; + * + * The downside of disabling this is that transfer scheduling + * gets VERY unfair for nonperiodic transfers; a misbehaving + * peripheral could make that hurt. That's perfectly normal + * for reads from network or serial adapters ... so we have + * partial NAKlimit support for bulk RX. + * + * The upside of disabling it is simpler transfer scheduling. + */ + interval = 0; + } + qh->intv_reg = interval; + + /* precompute addressing for external hub/tt ports */ + if (musb->is_multipoint) { + struct usb_device *parent = urb->dev->parent; + +#ifndef __UBOOT__ + if (parent != hcd->self.root_hub) { +#else + if (parent) { +#endif + qh->h_addr_reg = (u8) parent->devnum; + +#ifndef __UBOOT__ + /* set up tt info if needed */ + if (urb->dev->tt) { + qh->h_port_reg = (u8) urb->dev->ttport; + if (urb->dev->tt->hub) + qh->h_addr_reg = + (u8) urb->dev->tt->hub->devnum; + if (urb->dev->tt->multi) + qh->h_addr_reg |= 0x80; + } +#else + if (tt_needed(musb, urb->dev)) { + u16 hub_port = find_tt(urb->dev); + qh->h_addr_reg = (u8) (hub_port >> 8); + qh->h_port_reg = (u8) (hub_port & 0xff); + } +#endif + } + } + + /* invariant: hep->hcpriv is null OR the qh that's already scheduled. + * until we get real dma queues (with an entry for each urb/buffer), + * we only have work to do in the former case. + */ + spin_lock_irqsave(&musb->lock, flags); + if (hep->hcpriv) { + /* some concurrent activity submitted another urb to hep... + * odd, rare, error prone, but legal. + */ + kfree(qh); + qh = NULL; + ret = 0; + } else + ret = musb_schedule(musb, qh, + epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); + + if (ret == 0) { + urb->hcpriv = qh; + /* FIXME set urb->start_frame for iso/intr, it's tested in + * musb_start_urb(), but otherwise only konicawc cares ... + */ + } + spin_unlock_irqrestore(&musb->lock, flags); + +done: + if (ret != 0) { + spin_lock_irqsave(&musb->lock, flags); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&musb->lock, flags); + kfree(qh); + } + return ret; +} + + +#ifndef __UBOOT__ +/* + * abort a transfer that's at the head of a hardware queue. + * called with controller locked, irqs blocked + * that hardware queue advances to the next transfer, unless prevented + */ +static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) +{ + struct musb_hw_ep *ep = qh->hw_ep; + struct musb *musb = ep->musb; + void __iomem *epio = ep->regs; + unsigned hw_end = ep->epnum; + void __iomem *regs = ep->musb->mregs; + int is_in = usb_pipein(urb->pipe); + int status = 0; + u16 csr; + + musb_ep_select(regs, hw_end); + + if (is_dma_capable()) { + struct dma_channel *dma; + + dma = is_in ? ep->rx_channel : ep->tx_channel; + if (dma) { + status = ep->musb->dma_controller->channel_abort(dma); + dev_dbg(musb->controller, + "abort %cX%d DMA for urb %p --> %d\n", + is_in ? 'R' : 'T', ep->epnum, + urb, status); + urb->actual_length += dma->actual_len; + } + } + + /* turn off DMA requests, discard state, stop polling ... */ + if (ep->epnum && is_in) { + /* giveback saves bulk toggle */ + csr = musb_h_flush_rxfifo(ep, 0); + + /* REVISIT we still get an irq; should likely clear the + * endpoint's irq status here to avoid bogus irqs. + * clearing that status is platform-specific... + */ + } else if (ep->epnum) { + musb_h_tx_flush_fifo(ep); + csr = musb_readw(epio, MUSB_TXCSR); + csr &= ~(MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_H_RXSTALL + | MUSB_TXCSR_H_NAKTIMEOUT + | MUSB_TXCSR_H_ERROR + | MUSB_TXCSR_TXPKTRDY); + musb_writew(epio, MUSB_TXCSR, csr); + /* REVISIT may need to clear FLUSHFIFO ... */ + musb_writew(epio, MUSB_TXCSR, csr); + /* flush cpu writebuffer */ + csr = musb_readw(epio, MUSB_TXCSR); + } else { + musb_h_ep0_flush_fifo(ep); + } + if (status == 0) + musb_advance_schedule(ep->musb, urb, ep, is_in); + return status; +} + +static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + struct musb *musb = hcd_to_musb(hcd); + struct musb_qh *qh; + unsigned long flags; + int is_in = usb_pipein(urb->pipe); + int ret; + + dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb, + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe), + is_in ? "in" : "out"); + + spin_lock_irqsave(&musb->lock, flags); + ret = usb_hcd_check_unlink_urb(hcd, urb, status); + if (ret) + goto done; + + qh = urb->hcpriv; + if (!qh) + goto done; + + /* + * Any URB not actively programmed into endpoint hardware can be + * immediately given back; that's any URB not at the head of an + * endpoint queue, unless someday we get real DMA queues. And even + * if it's at the head, it might not be known to the hardware... + * + * Otherwise abort current transfer, pending DMA, etc.; urb->status + * has already been updated. This is a synchronous abort; it'd be + * OK to hold off until after some IRQ, though. + * + * NOTE: qh is invalid unless !list_empty(&hep->urb_list) + */ + if (!qh->is_ready + || urb->urb_list.prev != &qh->hep->urb_list + || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { + int ready = qh->is_ready; + + qh->is_ready = 0; + musb_giveback(musb, urb, 0); + qh->is_ready = ready; + + /* If nothing else (usually musb_giveback) is using it + * and its URB list has emptied, recycle this qh. + */ + if (ready && list_empty(&qh->hep->urb_list)) { + qh->hep->hcpriv = NULL; + list_del(&qh->ring); + kfree(qh); + } + } else + ret = musb_cleanup_urb(urb, qh); +done: + spin_unlock_irqrestore(&musb->lock, flags); + return ret; +} + +/* disable an endpoint */ +static void +musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) +{ + u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; + unsigned long flags; + struct musb *musb = hcd_to_musb(hcd); + struct musb_qh *qh; + struct urb *urb; + + spin_lock_irqsave(&musb->lock, flags); + + qh = hep->hcpriv; + if (qh == NULL) + goto exit; + + /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ + + /* Kick the first URB off the hardware, if needed */ + qh->is_ready = 0; + if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { + urb = next_urb(qh); + + /* make software (then hardware) stop ASAP */ + if (!urb->unlinked) + urb->status = -ESHUTDOWN; + + /* cleanup */ + musb_cleanup_urb(urb, qh); + + /* Then nuke all the others ... and advance the + * queue on hw_ep (e.g. bulk ring) when we're done. + */ + while (!list_empty(&hep->urb_list)) { + urb = next_urb(qh); + urb->status = -ESHUTDOWN; + musb_advance_schedule(musb, urb, qh->hw_ep, is_in); + } + } else { + /* Just empty the queue; the hardware is busy with + * other transfers, and since !qh->is_ready nothing + * will activate any of these as it advances. + */ + while (!list_empty(&hep->urb_list)) + musb_giveback(musb, next_urb(qh), -ESHUTDOWN); + + hep->hcpriv = NULL; + list_del(&qh->ring); + kfree(qh); + } +exit: + spin_unlock_irqrestore(&musb->lock, flags); +} + +static int musb_h_get_frame_number(struct usb_hcd *hcd) +{ + struct musb *musb = hcd_to_musb(hcd); + + return musb_readw(musb->mregs, MUSB_FRAME); +} + +static int musb_h_start(struct usb_hcd *hcd) +{ + struct musb *musb = hcd_to_musb(hcd); + + /* NOTE: musb_start() is called when the hub driver turns + * on port power, or when (OTG) peripheral starts. + */ + hcd->state = HC_STATE_RUNNING; + musb->port1_status = 0; + return 0; +} + +static void musb_h_stop(struct usb_hcd *hcd) +{ + musb_stop(hcd_to_musb(hcd)); + hcd->state = HC_STATE_HALT; +} + +static int musb_bus_suspend(struct usb_hcd *hcd) +{ + struct musb *musb = hcd_to_musb(hcd); + u8 devctl; + + if (!is_host_active(musb)) + return 0; + + switch (musb->xceiv->state) { + case OTG_STATE_A_SUSPEND: + return 0; + case OTG_STATE_A_WAIT_VRISE: + /* ID could be grounded even if there's no device + * on the other end of the cable. NOTE that the + * A_WAIT_VRISE timers are messy with MUSB... + */ + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) + musb->xceiv->state = OTG_STATE_A_WAIT_BCON; + break; + default: + break; + } + + if (musb->is_active) { + WARNING("trying to suspend as %s while active\n", + otg_state_string(musb->xceiv->state)); + return -EBUSY; + } else + return 0; +} + +static int musb_bus_resume(struct usb_hcd *hcd) +{ + /* resuming child port does the work */ + return 0; +} + +const struct hc_driver musb_hc_driver = { + .description = "musb-hcd", + .product_desc = "MUSB HDRC host driver", + .hcd_priv_size = sizeof(struct musb), + .flags = HCD_USB2 | HCD_MEMORY, + + /* not using irq handler or reset hooks from usbcore, since + * those must be shared with peripheral code for OTG configs + */ + + .start = musb_h_start, + .stop = musb_h_stop, + + .get_frame_number = musb_h_get_frame_number, + + .urb_enqueue = musb_urb_enqueue, + .urb_dequeue = musb_urb_dequeue, + .endpoint_disable = musb_h_disable, + + .hub_status_data = musb_hub_status_data, + .hub_control = musb_hub_control, + .bus_suspend = musb_bus_suspend, + .bus_resume = musb_bus_resume, + /* .start_port_reset = NULL, */ + /* .hub_irq_enable = NULL, */ +}; +#endif diff --git a/drivers/usb/musb-new/musb_host.h b/drivers/usb/musb-new/musb_host.h new file mode 100644 index 00000000000..ebebe0c02a5 --- /dev/null +++ b/drivers/usb/musb-new/musb_host.h @@ -0,0 +1,114 @@ +/* + * MUSB OTG driver host defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _MUSB_HOST_H +#define _MUSB_HOST_H +#ifdef __UBOOT__ +#include "usb-compat.h" +#endif + +static inline struct usb_hcd *musb_to_hcd(struct musb *musb) +{ + return container_of((void *) musb, struct usb_hcd, hcd_priv); +} + +static inline struct musb *hcd_to_musb(struct usb_hcd *hcd) +{ + return (struct musb *) (hcd->hcd_priv); +} + +/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */ +struct musb_qh { + struct usb_host_endpoint *hep; /* usbcore info */ + struct usb_device *dev; + struct musb_hw_ep *hw_ep; /* current binding */ + + struct list_head ring; /* of musb_qh */ + /* struct musb_qh *next; */ /* for periodic tree */ + u8 mux; /* qh multiplexed to hw_ep */ + + unsigned offset; /* in urb->transfer_buffer */ + unsigned segsize; /* current xfer fragment */ + + u8 type_reg; /* {rx,tx} type register */ + u8 intv_reg; /* {rx,tx} interval register */ + u8 addr_reg; /* device address register */ + u8 h_addr_reg; /* hub address register */ + u8 h_port_reg; /* hub port register */ + + u8 is_ready; /* safe to modify hw_ep */ + u8 type; /* XFERTYPE_* */ + u8 epnum; + u8 hb_mult; /* high bandwidth pkts per uf */ + u16 maxpacket; + u16 frame; /* for periodic schedule */ + unsigned iso_idx; /* in urb->iso_frame_desc[] */ +}; + +/* map from control or bulk queue head to the first qh on that ring */ +static inline struct musb_qh *first_qh(struct list_head *q) +{ + if (list_empty(q)) + return NULL; + return list_entry(q->next, struct musb_qh, ring); +} + + +extern void musb_root_disconnect(struct musb *musb); + +struct usb_hcd; + +extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf); +extern int musb_hub_control(struct usb_hcd *hcd, + u16 typeReq, u16 wValue, u16 wIndex, + char *buf, u16 wLength); + +extern const struct hc_driver musb_hc_driver; + +static inline struct urb *next_urb(struct musb_qh *qh) +{ + struct list_head *queue; + + if (!qh) + return NULL; + queue = &qh->hep->urb_list; + if (list_empty(queue)) + return NULL; + return list_entry(queue->next, struct urb, urb_list); +} + +#ifdef __UBOOT__ +int musb_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); +#endif +#endif /* _MUSB_HOST_H */ diff --git a/drivers/usb/musb-new/musb_io.h b/drivers/usb/musb-new/musb_io.h new file mode 100644 index 00000000000..51730aee521 --- /dev/null +++ b/drivers/usb/musb-new/musb_io.h @@ -0,0 +1,146 @@ +/* + * MUSB OTG driver register I/O + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__ +#define __MUSB_LINUX_PLATFORM_ARCH_H__ + +#ifndef __UBOOT__ +#include <linux/io.h> +#else +#include <asm/io.h> +#endif + +#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \ + && !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \ + && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) \ + && !defined(CONFIG_MIPS) && !defined(CONFIG_M68K) +static inline void readsl(const void __iomem *addr, void *buf, int len) + { insl((unsigned long)addr, buf, len); } +static inline void readsw(const void __iomem *addr, void *buf, int len) + { insw((unsigned long)addr, buf, len); } +static inline void readsb(const void __iomem *addr, void *buf, int len) + { insb((unsigned long)addr, buf, len); } + +static inline void writesl(const void __iomem *addr, const void *buf, int len) + { outsl((unsigned long)addr, buf, len); } +static inline void writesw(const void __iomem *addr, const void *buf, int len) + { outsw((unsigned long)addr, buf, len); } +static inline void writesb(const void __iomem *addr, const void *buf, int len) + { outsb((unsigned long)addr, buf, len); } + +#endif + +#ifndef CONFIG_BLACKFIN + +/* NOTE: these offsets are all in bytes */ + +static inline u16 musb_readw(const void __iomem *addr, unsigned offset) + { return __raw_readw(addr + offset); } + +static inline u32 musb_readl(const void __iomem *addr, unsigned offset) + { return __raw_readl(addr + offset); } + + +static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data) + { __raw_writew(data, addr + offset); } + +static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data) + { __raw_writel(data, addr + offset); } + + +#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE) + +/* + * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum. + */ +static inline u8 musb_readb(const void __iomem *addr, unsigned offset) +{ + u16 tmp; + u8 val; + + tmp = __raw_readw(addr + (offset & ~1)); + if (offset & 1) + val = (tmp >> 8); + else + val = tmp & 0xff; + + return val; +} + +static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) +{ + u16 tmp; + + tmp = __raw_readw(addr + (offset & ~1)); + if (offset & 1) + tmp = (data << 8) | (tmp & 0xff); + else + tmp = (tmp & 0xff00) | data; + + __raw_writew(tmp, addr + (offset & ~1)); +} + +#else + +static inline u8 musb_readb(const void __iomem *addr, unsigned offset) + { return __raw_readb(addr + offset); } + +static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) + { __raw_writeb(data, addr + offset); } + +#endif /* CONFIG_USB_MUSB_TUSB6010 */ + +#else + +static inline u8 musb_readb(const void __iomem *addr, unsigned offset) + { return (u8) (bfin_read16(addr + offset)); } + +static inline u16 musb_readw(const void __iomem *addr, unsigned offset) + { return bfin_read16(addr + offset); } + +static inline u32 musb_readl(const void __iomem *addr, unsigned offset) + { return (u32) (bfin_read16(addr + offset)); } + +static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) + { bfin_write16(addr + offset, (u16) data); } + +static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data) + { bfin_write16(addr + offset, data); } + +static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data) + { bfin_write16(addr + offset, (u16) data); } + +#endif /* CONFIG_BLACKFIN */ + +#endif diff --git a/drivers/usb/musb-new/musb_regs.h b/drivers/usb/musb-new/musb_regs.h new file mode 100644 index 00000000000..03f2655af29 --- /dev/null +++ b/drivers/usb/musb-new/musb_regs.h @@ -0,0 +1,645 @@ +/* + * MUSB OTG driver register defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_REGS_H__ +#define __MUSB_REGS_H__ + +#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */ + +/* + * MUSB Register bits + */ + +/* POWER */ +#define MUSB_POWER_ISOUPDATE 0x80 +#define MUSB_POWER_SOFTCONN 0x40 +#define MUSB_POWER_HSENAB 0x20 +#define MUSB_POWER_HSMODE 0x10 +#define MUSB_POWER_RESET 0x08 +#define MUSB_POWER_RESUME 0x04 +#define MUSB_POWER_SUSPENDM 0x02 +#define MUSB_POWER_ENSUSPEND 0x01 + +/* INTRUSB */ +#define MUSB_INTR_SUSPEND 0x01 +#define MUSB_INTR_RESUME 0x02 +#define MUSB_INTR_RESET 0x04 +#define MUSB_INTR_BABBLE 0x04 +#define MUSB_INTR_SOF 0x08 +#define MUSB_INTR_CONNECT 0x10 +#define MUSB_INTR_DISCONNECT 0x20 +#define MUSB_INTR_SESSREQ 0x40 +#define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */ + +/* DEVCTL */ +#define MUSB_DEVCTL_BDEVICE 0x80 +#define MUSB_DEVCTL_FSDEV 0x40 +#define MUSB_DEVCTL_LSDEV 0x20 +#define MUSB_DEVCTL_VBUS 0x18 +#define MUSB_DEVCTL_VBUS_SHIFT 3 +#define MUSB_DEVCTL_HM 0x04 +#define MUSB_DEVCTL_HR 0x02 +#define MUSB_DEVCTL_SESSION 0x01 + +/* MUSB ULPI VBUSCONTROL */ +#define MUSB_ULPI_USE_EXTVBUS 0x01 +#define MUSB_ULPI_USE_EXTVBUSIND 0x02 +/* ULPI_REG_CONTROL */ +#define MUSB_ULPI_REG_REQ (1 << 0) +#define MUSB_ULPI_REG_CMPLT (1 << 1) +#define MUSB_ULPI_RDN_WR (1 << 2) + +/* TESTMODE */ +#define MUSB_TEST_FORCE_HOST 0x80 +#define MUSB_TEST_FIFO_ACCESS 0x40 +#define MUSB_TEST_FORCE_FS 0x20 +#define MUSB_TEST_FORCE_HS 0x10 +#define MUSB_TEST_PACKET 0x08 +#define MUSB_TEST_K 0x04 +#define MUSB_TEST_J 0x02 +#define MUSB_TEST_SE0_NAK 0x01 + +/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */ +#define MUSB_FIFOSZ_DPB 0x10 +/* Allocation size (8, 16, 32, ... 4096) */ +#define MUSB_FIFOSZ_SIZE 0x0f + +/* CSR0 */ +#define MUSB_CSR0_FLUSHFIFO 0x0100 +#define MUSB_CSR0_TXPKTRDY 0x0002 +#define MUSB_CSR0_RXPKTRDY 0x0001 + +/* CSR0 in Peripheral mode */ +#define MUSB_CSR0_P_SVDSETUPEND 0x0080 +#define MUSB_CSR0_P_SVDRXPKTRDY 0x0040 +#define MUSB_CSR0_P_SENDSTALL 0x0020 +#define MUSB_CSR0_P_SETUPEND 0x0010 +#define MUSB_CSR0_P_DATAEND 0x0008 +#define MUSB_CSR0_P_SENTSTALL 0x0004 + +/* CSR0 in Host mode */ +#define MUSB_CSR0_H_DIS_PING 0x0800 +#define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */ +#define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */ +#define MUSB_CSR0_H_NAKTIMEOUT 0x0080 +#define MUSB_CSR0_H_STATUSPKT 0x0040 +#define MUSB_CSR0_H_REQPKT 0x0020 +#define MUSB_CSR0_H_ERROR 0x0010 +#define MUSB_CSR0_H_SETUPPKT 0x0008 +#define MUSB_CSR0_H_RXSTALL 0x0004 + +/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */ +#define MUSB_CSR0_P_WZC_BITS \ + (MUSB_CSR0_P_SENTSTALL) +#define MUSB_CSR0_H_WZC_BITS \ + (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \ + | MUSB_CSR0_RXPKTRDY) + +/* TxType/RxType */ +#define MUSB_TYPE_SPEED 0xc0 +#define MUSB_TYPE_SPEED_SHIFT 6 +#define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */ +#define MUSB_TYPE_PROTO_SHIFT 4 +#define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */ + +/* CONFIGDATA */ +#define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */ +#define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */ +#define MUSB_CONFIGDATA_BIGENDIAN 0x20 +#define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */ +#define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */ +#define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */ +#define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */ +#define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */ + +/* TXCSR in Peripheral and Host mode */ +#define MUSB_TXCSR_AUTOSET 0x8000 +#define MUSB_TXCSR_DMAENAB 0x1000 +#define MUSB_TXCSR_FRCDATATOG 0x0800 +#define MUSB_TXCSR_DMAMODE 0x0400 +#define MUSB_TXCSR_CLRDATATOG 0x0040 +#define MUSB_TXCSR_FLUSHFIFO 0x0008 +#define MUSB_TXCSR_FIFONOTEMPTY 0x0002 +#define MUSB_TXCSR_TXPKTRDY 0x0001 + +/* TXCSR in Peripheral mode */ +#define MUSB_TXCSR_P_ISO 0x4000 +#define MUSB_TXCSR_P_INCOMPTX 0x0080 +#define MUSB_TXCSR_P_SENTSTALL 0x0020 +#define MUSB_TXCSR_P_SENDSTALL 0x0010 +#define MUSB_TXCSR_P_UNDERRUN 0x0004 + +/* TXCSR in Host mode */ +#define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200 +#define MUSB_TXCSR_H_DATATOGGLE 0x0100 +#define MUSB_TXCSR_H_NAKTIMEOUT 0x0080 +#define MUSB_TXCSR_H_RXSTALL 0x0020 +#define MUSB_TXCSR_H_ERROR 0x0004 + +/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ +#define MUSB_TXCSR_P_WZC_BITS \ + (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \ + | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY) +#define MUSB_TXCSR_H_WZC_BITS \ + (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \ + | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY) + +/* RXCSR in Peripheral and Host mode */ +#define MUSB_RXCSR_AUTOCLEAR 0x8000 +#define MUSB_RXCSR_DMAENAB 0x2000 +#define MUSB_RXCSR_DISNYET 0x1000 +#define MUSB_RXCSR_PID_ERR 0x1000 +#define MUSB_RXCSR_DMAMODE 0x0800 +#define MUSB_RXCSR_INCOMPRX 0x0100 +#define MUSB_RXCSR_CLRDATATOG 0x0080 +#define MUSB_RXCSR_FLUSHFIFO 0x0010 +#define MUSB_RXCSR_DATAERROR 0x0008 +#define MUSB_RXCSR_FIFOFULL 0x0002 +#define MUSB_RXCSR_RXPKTRDY 0x0001 + +/* RXCSR in Peripheral mode */ +#define MUSB_RXCSR_P_ISO 0x4000 +#define MUSB_RXCSR_P_SENTSTALL 0x0040 +#define MUSB_RXCSR_P_SENDSTALL 0x0020 +#define MUSB_RXCSR_P_OVERRUN 0x0004 + +/* RXCSR in Host mode */ +#define MUSB_RXCSR_H_AUTOREQ 0x4000 +#define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400 +#define MUSB_RXCSR_H_DATATOGGLE 0x0200 +#define MUSB_RXCSR_H_RXSTALL 0x0040 +#define MUSB_RXCSR_H_REQPKT 0x0020 +#define MUSB_RXCSR_H_ERROR 0x0004 + +/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ +#define MUSB_RXCSR_P_WZC_BITS \ + (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \ + | MUSB_RXCSR_RXPKTRDY) +#define MUSB_RXCSR_H_WZC_BITS \ + (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \ + | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY) + +/* HUBADDR */ +#define MUSB_HUBADDR_MULTI_TT 0x80 + + +#ifndef CONFIG_BLACKFIN + +/* + * Common USB registers + */ + +#define MUSB_FADDR 0x00 /* 8-bit */ +#define MUSB_POWER 0x01 /* 8-bit */ + +#define MUSB_INTRTX 0x02 /* 16-bit */ +#define MUSB_INTRRX 0x04 +#define MUSB_INTRTXE 0x06 +#define MUSB_INTRRXE 0x08 +#define MUSB_INTRUSB 0x0A /* 8 bit */ +#define MUSB_INTRUSBE 0x0B /* 8 bit */ +#define MUSB_FRAME 0x0C +#define MUSB_INDEX 0x0E /* 8 bit */ +#define MUSB_TESTMODE 0x0F /* 8 bit */ + +/* Get offset for a given FIFO from musb->mregs */ +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) +#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) +#else +#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) +#endif + +/* + * Additional Control Registers + */ + +#define MUSB_DEVCTL 0x60 /* 8 bit */ + +/* These are always controlled through the INDEX register */ +#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */ +#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */ +#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */ +#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */ + +/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */ +#define MUSB_HWVERS 0x6C /* 8 bit */ +#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */ +#define MUSB_ULPI_INT_MASK 0x72 /* 8 bit */ +#define MUSB_ULPI_INT_SRC 0x73 /* 8 bit */ +#define MUSB_ULPI_REG_DATA 0x74 /* 8 bit */ +#define MUSB_ULPI_REG_ADDR 0x75 /* 8 bit */ +#define MUSB_ULPI_REG_CONTROL 0x76 /* 8 bit */ +#define MUSB_ULPI_RAW_DATA 0x77 /* 8 bit */ + +#define MUSB_EPINFO 0x78 /* 8 bit */ +#define MUSB_RAMINFO 0x79 /* 8 bit */ +#define MUSB_LINKINFO 0x7a /* 8 bit */ +#define MUSB_VPLEN 0x7b /* 8 bit */ +#define MUSB_HS_EOF1 0x7c /* 8 bit */ +#define MUSB_FS_EOF1 0x7d /* 8 bit */ +#define MUSB_LS_EOF1 0x7e /* 8 bit */ + +/* Offsets to endpoint registers */ +#define MUSB_TXMAXP 0x00 +#define MUSB_TXCSR 0x02 +#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */ +#define MUSB_RXMAXP 0x04 +#define MUSB_RXCSR 0x06 +#define MUSB_RXCOUNT 0x08 +#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */ +#define MUSB_TXTYPE 0x0A +#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */ +#define MUSB_TXINTERVAL 0x0B +#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */ +#define MUSB_RXTYPE 0x0C +#define MUSB_RXINTERVAL 0x0D +#define MUSB_FIFOSIZE 0x0F +#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */ + +/* Offsets to endpoint registers in indexed model (using INDEX register) */ +#define MUSB_INDEXED_OFFSET(_epnum, _offset) \ + (0x10 + (_offset)) + +/* Offsets to endpoint registers in flat models */ +#define MUSB_FLAT_OFFSET(_epnum, _offset) \ + (0x100 + (0x10*(_epnum)) + (_offset)) + +#if defined(CONFIG_USB_MUSB_TUSB6010) || \ + defined(CONFIG_USB_MUSB_TUSB6010_MODULE) +/* TUSB6010 EP0 configuration register is special */ +#define MUSB_TUSB_OFFSET(_epnum, _offset) \ + (0x10 + _offset) +#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */ +#endif + +#define MUSB_TXCSR_MODE 0x2000 + +/* "bus control"/target registers, for host side multipoint (external hubs) */ +#define MUSB_TXFUNCADDR 0x00 +#define MUSB_TXHUBADDR 0x02 +#define MUSB_TXHUBPORT 0x03 + +#define MUSB_RXFUNCADDR 0x04 +#define MUSB_RXHUBADDR 0x06 +#define MUSB_RXHUBPORT 0x07 + +#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \ + (0x80 + (8*(_epnum)) + (_offset)) + +static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size) +{ + musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); +} + +static inline void musb_write_txfifoadd(void __iomem *mbase, u16 c_off) +{ + musb_writew(mbase, MUSB_TXFIFOADD, c_off); +} + +static inline void musb_write_rxfifosz(void __iomem *mbase, u8 c_size) +{ + musb_writeb(mbase, MUSB_RXFIFOSZ, c_size); +} + +static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) +{ + musb_writew(mbase, MUSB_RXFIFOADD, c_off); +} + +static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val) +{ + musb_writeb(mbase, MUSB_ULPI_BUSCONTROL, val); +} + +static inline u8 musb_read_txfifosz(void __iomem *mbase) +{ + return musb_readb(mbase, MUSB_TXFIFOSZ); +} + +static inline u16 musb_read_txfifoadd(void __iomem *mbase) +{ + return musb_readw(mbase, MUSB_TXFIFOADD); +} + +static inline u8 musb_read_rxfifosz(void __iomem *mbase) +{ + return musb_readb(mbase, MUSB_RXFIFOSZ); +} + +static inline u16 musb_read_rxfifoadd(void __iomem *mbase) +{ + return musb_readw(mbase, MUSB_RXFIFOADD); +} + +static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase) +{ + return musb_readb(mbase, MUSB_ULPI_BUSCONTROL); +} + +static inline u8 musb_read_configdata(void __iomem *mbase) +{ + musb_writeb(mbase, MUSB_INDEX, 0); + return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); +} + +static inline u16 musb_read_hwvers(void __iomem *mbase) +{ + return musb_readw(mbase, MUSB_HWVERS); +} + +static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase) +{ + return (MUSB_BUSCTL_OFFSET(i, 0) + mbase); +} + +static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs, + u8 qh_addr_reg) +{ + musb_writeb(ep_target_regs, MUSB_RXFUNCADDR, qh_addr_reg); +} + +static inline void musb_write_rxhubaddr(void __iomem *ep_target_regs, + u8 qh_h_addr_reg) +{ + musb_writeb(ep_target_regs, MUSB_RXHUBADDR, qh_h_addr_reg); +} + +static inline void musb_write_rxhubport(void __iomem *ep_target_regs, + u8 qh_h_port_reg) +{ + musb_writeb(ep_target_regs, MUSB_RXHUBPORT, qh_h_port_reg); +} + +static inline void musb_write_txfunaddr(void __iomem *mbase, u8 epnum, + u8 qh_addr_reg) +{ + musb_writeb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR), + qh_addr_reg); +} + +static inline void musb_write_txhubaddr(void __iomem *mbase, u8 epnum, + u8 qh_addr_reg) +{ + musb_writeb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR), + qh_addr_reg); +} + +static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum, + u8 qh_h_port_reg) +{ + musb_writeb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT), + qh_h_port_reg); +} + +static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXFUNCADDR)); +} + +static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBADDR)); +} + +static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBPORT)); +} + +static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR)); +} + +static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR)); +} + +static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT)); +} + +#else /* CONFIG_BLACKFIN */ + +#define USB_BASE USB_FADDR +#define USB_OFFSET(reg) (reg - USB_BASE) + +/* + * Common USB registers + */ +#define MUSB_FADDR USB_OFFSET(USB_FADDR) /* 8-bit */ +#define MUSB_POWER USB_OFFSET(USB_POWER) /* 8-bit */ +#define MUSB_INTRTX USB_OFFSET(USB_INTRTX) /* 16-bit */ +#define MUSB_INTRRX USB_OFFSET(USB_INTRRX) +#define MUSB_INTRTXE USB_OFFSET(USB_INTRTXE) +#define MUSB_INTRRXE USB_OFFSET(USB_INTRRXE) +#define MUSB_INTRUSB USB_OFFSET(USB_INTRUSB) /* 8 bit */ +#define MUSB_INTRUSBE USB_OFFSET(USB_INTRUSBE)/* 8 bit */ +#define MUSB_FRAME USB_OFFSET(USB_FRAME) +#define MUSB_INDEX USB_OFFSET(USB_INDEX) /* 8 bit */ +#define MUSB_TESTMODE USB_OFFSET(USB_TESTMODE)/* 8 bit */ + +/* Get offset for a given FIFO from musb->mregs */ +#define MUSB_FIFO_OFFSET(epnum) \ + (USB_OFFSET(USB_EP0_FIFO) + ((epnum) * 8)) + +/* + * Additional Control Registers + */ + +#define MUSB_DEVCTL USB_OFFSET(USB_OTG_DEV_CTL) /* 8 bit */ + +#define MUSB_LINKINFO USB_OFFSET(USB_LINKINFO)/* 8 bit */ +#define MUSB_VPLEN USB_OFFSET(USB_VPLEN) /* 8 bit */ +#define MUSB_HS_EOF1 USB_OFFSET(USB_HS_EOF1) /* 8 bit */ +#define MUSB_FS_EOF1 USB_OFFSET(USB_FS_EOF1) /* 8 bit */ +#define MUSB_LS_EOF1 USB_OFFSET(USB_LS_EOF1) /* 8 bit */ + +/* Offsets to endpoint registers */ +#define MUSB_TXMAXP 0x00 +#define MUSB_TXCSR 0x04 +#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */ +#define MUSB_RXMAXP 0x08 +#define MUSB_RXCSR 0x0C +#define MUSB_RXCOUNT 0x10 +#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */ +#define MUSB_TXTYPE 0x14 +#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */ +#define MUSB_TXINTERVAL 0x18 +#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */ +#define MUSB_RXTYPE 0x1C +#define MUSB_RXINTERVAL 0x20 +#define MUSB_TXCOUNT 0x28 + +/* Offsets to endpoint registers in indexed model (using INDEX register) */ +#define MUSB_INDEXED_OFFSET(_epnum, _offset) \ + (0x40 + (_offset)) + +/* Offsets to endpoint registers in flat models */ +#define MUSB_FLAT_OFFSET(_epnum, _offset) \ + (USB_OFFSET(USB_EP_NI0_TXMAXP) + (0x40 * (_epnum)) + (_offset)) + +/* Not implemented - HW has separate Tx/Rx FIFO */ +#define MUSB_TXCSR_MODE 0x0000 + +static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size) +{ +} + +static inline void musb_write_txfifoadd(void __iomem *mbase, u16 c_off) +{ +} + +static inline void musb_write_rxfifosz(void __iomem *mbase, u8 c_size) +{ +} + +static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) +{ +} + +static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val) +{ +} + +static inline u8 musb_read_txfifosz(void __iomem *mbase) +{ + return 0; +} + +static inline u16 musb_read_txfifoadd(void __iomem *mbase) +{ + return 0; +} + +static inline u8 musb_read_rxfifosz(void __iomem *mbase) +{ + return 0; +} + +static inline u16 musb_read_rxfifoadd(void __iomem *mbase) +{ + return 0; +} + +static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase) +{ + return 0; +} + +static inline u8 musb_read_configdata(void __iomem *mbase) +{ + return 0; +} + +static inline u16 musb_read_hwvers(void __iomem *mbase) +{ + /* + * This register is invisible on Blackfin, actually the MUSB + * RTL version of Blackfin is 1.9, so just harcode its value. + */ + return MUSB_HWVERS_1900; +} + +static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase) +{ + return NULL; +} + +static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs, + u8 qh_addr_req) +{ +} + +static inline void musb_write_rxhubaddr(void __iomem *ep_target_regs, + u8 qh_h_addr_reg) +{ +} + +static inline void musb_write_rxhubport(void __iomem *ep_target_regs, + u8 qh_h_port_reg) +{ +} + +static inline void musb_write_txfunaddr(void __iomem *mbase, u8 epnum, + u8 qh_addr_reg) +{ +} + +static inline void musb_write_txhubaddr(void __iomem *mbase, u8 epnum, + u8 qh_addr_reg) +{ +} + +static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum, + u8 qh_h_port_reg) +{ +} + +static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum) +{ + return 0; +} + +static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum) +{ + return 0; +} + +static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum) +{ + return 0; +} + +static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum) +{ + return 0; +} + +static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) +{ + return 0; +} + +static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum) +{ + return 0; +} + +#endif /* CONFIG_BLACKFIN */ + +#endif /* __MUSB_REGS_H__ */ diff --git a/drivers/usb/musb-new/musb_uboot.c b/drivers/usb/musb-new/musb_uboot.c new file mode 100644 index 00000000000..762cbc11df1 --- /dev/null +++ b/drivers/usb/musb-new/musb_uboot.c @@ -0,0 +1,237 @@ +#include <common.h> +#include <asm/errno.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> + +#define __UBOOT__ +#include <usb.h> +#include "linux-compat.h" +#include "usb-compat.h" +#include "musb_core.h" +#include "musb_host.h" +#include "musb_gadget.h" + +#ifdef CONFIG_MUSB_HOST +static struct musb *host; +static struct usb_hcd hcd; +static enum usb_device_speed host_speed; + +static void musb_host_complete_urb(struct urb *urb) +{ + urb->dev->status &= ~USB_ST_NOT_PROC; + urb->dev->act_len = urb->actual_length; +} + +static struct usb_host_endpoint hep; +static struct urb urb; + +static struct urb *construct_urb(struct usb_device *dev, int endpoint_type, + unsigned long pipe, void *buffer, int len, + struct devrequest *setup, int interval) +{ + int epnum = usb_pipeendpoint(pipe); + int is_in = usb_pipein(pipe); + + memset(&urb, 0, sizeof(struct urb)); + memset(&hep, 0, sizeof(struct usb_host_endpoint)); + INIT_LIST_HEAD(&hep.urb_list); + INIT_LIST_HEAD(&urb.urb_list); + urb.ep = &hep; + urb.complete = musb_host_complete_urb; + urb.status = -EINPROGRESS; + urb.dev = dev; + urb.pipe = pipe; + urb.transfer_buffer = buffer; + urb.transfer_dma = (unsigned long)buffer; + urb.transfer_buffer_length = len; + urb.setup_packet = (unsigned char *)setup; + + urb.ep->desc.wMaxPacketSize = + __cpu_to_le16(is_in ? dev->epmaxpacketin[epnum] : + dev->epmaxpacketout[epnum]); + urb.ep->desc.bmAttributes = endpoint_type; + urb.ep->desc.bEndpointAddress = + (is_in ? USB_DIR_IN : USB_DIR_OUT) | epnum; + urb.ep->desc.bInterval = interval; + + return &urb; +} + +#define MUSB_HOST_TIMEOUT 0x3ffffff + +static int submit_urb(struct usb_hcd *hcd, struct urb *urb) +{ + struct musb *host = hcd->hcd_priv; + int ret; + int timeout; + + ret = musb_urb_enqueue(hcd, urb, 0); + if (ret < 0) { + printf("Failed to enqueue URB to controller\n"); + return ret; + } + + timeout = MUSB_HOST_TIMEOUT; + do { + if (ctrlc()) + return -EIO; + host->isr(0, host); + } while ((urb->dev->status & USB_ST_NOT_PROC) && --timeout); + + return urb->status; +} + +int submit_control_msg(struct usb_device *dev, unsigned long pipe, + void *buffer, int len, struct devrequest *setup) +{ + struct urb *urb = construct_urb(dev, USB_ENDPOINT_XFER_CONTROL, pipe, + buffer, len, setup, 0); + + /* Fix speed for non hub-attached devices */ + if (!dev->parent) + dev->speed = host_speed; + + return submit_urb(&hcd, urb); +} + + +int submit_bulk_msg(struct usb_device *dev, unsigned long pipe, + void *buffer, int len) +{ + struct urb *urb = construct_urb(dev, USB_ENDPOINT_XFER_BULK, pipe, + buffer, len, NULL, 0); + return submit_urb(&hcd, urb); +} + +int submit_int_msg(struct usb_device *dev, unsigned long pipe, + void *buffer, int len, int interval) +{ + struct urb *urb = construct_urb(dev, USB_ENDPOINT_XFER_INT, pipe, + buffer, len, NULL, interval); + return submit_urb(&hcd, urb); +} + +int usb_lowlevel_init(int index, void **controller) +{ + u8 power; + void *mbase; + int timeout = MUSB_HOST_TIMEOUT; + + if (!host) { + printf("MUSB host is not registered\n"); + return -ENODEV; + } + + musb_start(host); + mbase = host->mregs; + do { + if (musb_readb(mbase, MUSB_DEVCTL) & MUSB_DEVCTL_HM) + break; + } while (--timeout); + if (!timeout) + return -ENODEV; + + power = musb_readb(mbase, MUSB_POWER); + musb_writeb(mbase, MUSB_POWER, MUSB_POWER_RESET | power); + udelay(30000); + power = musb_readb(mbase, MUSB_POWER); + musb_writeb(mbase, MUSB_POWER, ~MUSB_POWER_RESET & power); + host->isr(0, host); + host_speed = (musb_readb(mbase, MUSB_POWER) & MUSB_POWER_HSMODE) ? + USB_SPEED_HIGH : + (musb_readb(mbase, MUSB_DEVCTL) & MUSB_DEVCTL_FSDEV) ? + USB_SPEED_FULL : USB_SPEED_LOW; + host->is_active = 1; + hcd.hcd_priv = host; + + return 0; +} + +int usb_lowlevel_stop(int index) +{ + if (!host) { + printf("MUSB host is not registered\n"); + return -ENODEV; + } + + musb_stop(host); + return 0; +} +#endif /* CONFIG_MUSB_HOST */ + +#ifdef CONFIG_MUSB_GADGET +static struct musb *gadget; + +int usb_gadget_handle_interrupts(void) +{ + if (!gadget || !gadget->isr) + return -EINVAL; + + return gadget->isr(0, gadget); +} + +int usb_gadget_register_driver(struct usb_gadget_driver *driver) +{ + int ret; + + if (!driver || driver->speed < USB_SPEED_HIGH || !driver->bind || + !driver->setup) { + printf("bad parameter.\n"); + return -EINVAL; + } + + if (!gadget) { + printf("Controller uninitialized\n"); + return -ENXIO; + } + + ret = musb_gadget_start(&gadget->g, driver); + if (ret < 0) { + printf("gadget_start failed with %d\n", ret); + return ret; + } + + ret = driver->bind(&gadget->g); + if (ret < 0) { + printf("bind failed with %d\n", ret); + return ret; + } + + return 0; +} + +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) +{ + /* TODO: implement me */ + return 0; +} +#endif /* CONFIG_MUSB_GADGET */ + +int musb_register(struct musb_hdrc_platform_data *plat, void *bdata, + void *ctl_regs) +{ + struct musb **musbp; + + switch (plat->mode) { +#ifdef CONFIG_MUSB_HOST + case MUSB_HOST: + musbp = &host; + break; +#endif +#ifdef CONFIG_MUSB_GADGET + case MUSB_PERIPHERAL: + musbp = &gadget; + break; +#endif + default: + return -EINVAL; + } + + *musbp = musb_init_controller(plat, (struct device *)bdata, ctl_regs); + if (!musbp) { + printf("Failed to init the controller\n"); + return -EIO; + } + + return 0; +} diff --git a/drivers/usb/musb-new/omap2430.c b/drivers/usb/musb-new/omap2430.c new file mode 100644 index 00000000000..b1c4dc78285 --- /dev/null +++ b/drivers/usb/musb-new/omap2430.c @@ -0,0 +1,626 @@ +/* + * Copyright (C) 2005-2007 by Texas Instruments + * Some code has been taken from tusb6010.c + * Copyrights for that are attributable to: + * Copyright (C) 2006 Nokia Corporation + * Tony Lindgren <tony@atomide.com> + * + * This file is part of the Inventra Controller Driver for Linux. + * + * The Inventra Controller Driver for Linux is free software; you + * can redistribute it and/or modify it under the terms of the GNU + * General Public License version 2 as published by the Free Software + * Foundation. + * + * The Inventra Controller Driver for Linux is distributed in + * the hope that it will be useful, but WITHOUT ANY WARRANTY; + * without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + * License for more details. + * + * You should have received a copy of the GNU General Public License + * along with The Inventra Controller Driver for Linux ; if not, + * write to the Free Software Foundation, Inc., 59 Temple Place, + * Suite 330, Boston, MA 02111-1307 USA + * + */ +#define __UBOOT__ +#ifndef __UBOOT__ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <linux/err.h> +#include <linux/usb/musb-omap.h> +#else +#include <common.h> +#include <asm/omap_musb.h> +#include <twl4030.h> +#include "linux-compat.h" +#endif + +#include "musb_core.h" +#include "omap2430.h" + +#ifndef __UBOOT__ +struct omap2430_glue { + struct device *dev; + struct platform_device *musb; + enum omap_musb_vbus_id_status status; + struct work_struct omap_musb_mailbox_work; +}; +#define glue_to_musb(g) platform_get_drvdata(g->musb) + +struct omap2430_glue *_glue; + +static struct timer_list musb_idle_timer; + +static void musb_do_idle(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + unsigned long flags; + u8 power; + u8 devctl; + + spin_lock_irqsave(&musb->lock, flags); + + switch (musb->xceiv->state) { + case OTG_STATE_A_WAIT_BCON: + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) { + musb->xceiv->state = OTG_STATE_B_IDLE; + MUSB_DEV_MODE(musb); + } else { + musb->xceiv->state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + } + break; + case OTG_STATE_A_SUSPEND: + /* finish RESUME signaling? */ + if (musb->port1_status & MUSB_PORT_STAT_RESUME) { + power = musb_readb(musb->mregs, MUSB_POWER); + power &= ~MUSB_POWER_RESUME; + dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power); + musb_writeb(musb->mregs, MUSB_POWER, power); + musb->is_active = 1; + musb->port1_status &= ~(USB_PORT_STAT_SUSPEND + | MUSB_PORT_STAT_RESUME); + musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; + usb_hcd_poll_rh_status(musb_to_hcd(musb)); + /* NOTE: it might really be A_WAIT_BCON ... */ + musb->xceiv->state = OTG_STATE_A_HOST; + } + break; + case OTG_STATE_A_HOST: + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) + musb->xceiv->state = OTG_STATE_B_IDLE; + else + musb->xceiv->state = OTG_STATE_A_WAIT_BCON; + default: + break; + } + spin_unlock_irqrestore(&musb->lock, flags); +} + + +static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout) +{ + unsigned long default_timeout = jiffies + msecs_to_jiffies(3); + static unsigned long last_timer; + + if (timeout == 0) + timeout = default_timeout; + + /* Never idle if active, or when VBUS timeout is not set as host */ + if (musb->is_active || ((musb->a_wait_bcon == 0) + && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { + dev_dbg(musb->controller, "%s active, deleting timer\n", + otg_state_string(musb->xceiv->state)); + del_timer(&musb_idle_timer); + last_timer = jiffies; + return; + } + + if (time_after(last_timer, timeout)) { + if (!timer_pending(&musb_idle_timer)) + last_timer = timeout; + else { + dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n"); + return; + } + } + last_timer = timeout; + + dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", + otg_state_string(musb->xceiv->state), + (unsigned long)jiffies_to_msecs(timeout - jiffies)); + mod_timer(&musb_idle_timer, timeout); +} + +static void omap2430_musb_set_vbus(struct musb *musb, int is_on) +{ + struct usb_otg *otg = musb->xceiv->otg; + u8 devctl; + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + int ret = 1; + /* HDRC controls CPEN, but beware current surges during device + * connect. They can trigger transient overcurrent conditions + * that must be ignored. + */ + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + if (is_on) { + if (musb->xceiv->state == OTG_STATE_A_IDLE) { + /* start the session */ + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + /* + * Wait for the musb to set as A device to enable the + * VBUS + */ + while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) { + + cpu_relax(); + + if (time_after(jiffies, timeout)) { + dev_err(musb->controller, + "configured as A device timeout"); + ret = -EINVAL; + break; + } + } + + if (ret && otg->set_vbus) + otg_set_vbus(otg, 1); + } else { + musb->is_active = 1; + otg->default_a = 1; + musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; + devctl |= MUSB_DEVCTL_SESSION; + MUSB_HST_MODE(musb); + } + } else { + musb->is_active = 0; + + /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and + * jumping right to B_IDLE... + */ + + otg->default_a = 0; + musb->xceiv->state = OTG_STATE_B_IDLE; + devctl &= ~MUSB_DEVCTL_SESSION; + + MUSB_DEV_MODE(musb); + } + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + dev_dbg(musb->controller, "VBUS %s, devctl %02x " + /* otg %3x conf %08x prcm %08x */ "\n", + otg_state_string(musb->xceiv->state), + musb_readb(musb->mregs, MUSB_DEVCTL)); +} + +static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode) +{ + u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + return 0; +} +#endif + +static inline void omap2430_low_level_exit(struct musb *musb) +{ + u32 l; + + /* in any role */ + l = musb_readl(musb->mregs, OTG_FORCESTDBY); + l |= ENABLEFORCE; /* enable MSTANDBY */ + musb_writel(musb->mregs, OTG_FORCESTDBY, l); +} + +static inline void omap2430_low_level_init(struct musb *musb) +{ + u32 l; + + l = musb_readl(musb->mregs, OTG_FORCESTDBY); + l &= ~ENABLEFORCE; /* disable MSTANDBY */ + musb_writel(musb->mregs, OTG_FORCESTDBY, l); +} + +#ifndef __UBOOT__ +void omap_musb_mailbox(enum omap_musb_vbus_id_status status) +{ + struct omap2430_glue *glue = _glue; + struct musb *musb = glue_to_musb(glue); + + glue->status = status; + if (!musb) { + dev_err(glue->dev, "musb core is not yet ready\n"); + return; + } + + schedule_work(&glue->omap_musb_mailbox_work); +} +EXPORT_SYMBOL_GPL(omap_musb_mailbox); + +static void omap_musb_set_mailbox(struct omap2430_glue *glue) +{ + struct musb *musb = glue_to_musb(glue); + struct device *dev = musb->controller; + struct musb_hdrc_platform_data *pdata = dev->platform_data; + struct omap_musb_board_data *data = pdata->board_data; + struct usb_otg *otg = musb->xceiv->otg; + + switch (glue->status) { + case OMAP_MUSB_ID_GROUND: + dev_dbg(dev, "ID GND\n"); + + otg->default_a = true; + musb->xceiv->state = OTG_STATE_A_IDLE; + musb->xceiv->last_event = USB_EVENT_ID; + if (!is_otg_enabled(musb) || musb->gadget_driver) { + pm_runtime_get_sync(dev); + usb_phy_init(musb->xceiv); + omap2430_musb_set_vbus(musb, 1); + } + break; + + case OMAP_MUSB_VBUS_VALID: + dev_dbg(dev, "VBUS Connect\n"); + + otg->default_a = false; + musb->xceiv->state = OTG_STATE_B_IDLE; + musb->xceiv->last_event = USB_EVENT_VBUS; + if (musb->gadget_driver) + pm_runtime_get_sync(dev); + usb_phy_init(musb->xceiv); + break; + + case OMAP_MUSB_ID_FLOAT: + case OMAP_MUSB_VBUS_OFF: + dev_dbg(dev, "VBUS Disconnect\n"); + + musb->xceiv->last_event = USB_EVENT_NONE; + if (is_otg_enabled(musb) || is_peripheral_enabled(musb)) + if (musb->gadget_driver) { + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + } + + if (data->interface_type == MUSB_INTERFACE_UTMI) { + if (musb->xceiv->otg->set_vbus) + otg_set_vbus(musb->xceiv->otg, 0); + } + usb_phy_shutdown(musb->xceiv); + break; + default: + dev_dbg(dev, "ID float\n"); + } +} + + +static void omap_musb_mailbox_work(struct work_struct *mailbox_work) +{ + struct omap2430_glue *glue = container_of(mailbox_work, + struct omap2430_glue, omap_musb_mailbox_work); + omap_musb_set_mailbox(glue); +} +#endif + +static int omap2430_musb_init(struct musb *musb) +{ + u32 l; + int status = 0; +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct omap2430_glue *glue = dev_get_drvdata(dev->parent); + struct musb_hdrc_platform_data *plat = dev->platform_data; + struct omap_musb_board_data *data = plat->board_data; +#else + struct omap_musb_board_data *data = + (struct omap_musb_board_data *)musb->controller; +#endif + + +#ifndef __UBOOT__ + /* We require some kind of external transceiver, hooked + * up through ULPI. TWL4030-family PMICs include one, + * which needs a driver, drivers aren't always needed. + */ + musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); + if (IS_ERR_OR_NULL(musb->xceiv)) { + pr_err("HS USB OTG: no transceiver configured\n"); + return -ENODEV; + } + + status = pm_runtime_get_sync(dev); + if (status < 0) { + dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); + goto err1; + } +#endif + + l = musb_readl(musb->mregs, OTG_INTERFSEL); + + if (data->interface_type == MUSB_INTERFACE_UTMI) { + /* OMAP4 uses Internal PHY GS70 which uses UTMI interface */ + l &= ~ULPI_12PIN; /* Disable ULPI */ + l |= UTMI_8BIT; /* Enable UTMI */ + } else { + l |= ULPI_12PIN; + } + + musb_writel(musb->mregs, OTG_INTERFSEL, l); + + pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " + "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", + musb_readl(musb->mregs, OTG_REVISION), + musb_readl(musb->mregs, OTG_SYSCONFIG), + musb_readl(musb->mregs, OTG_SYSSTATUS), + musb_readl(musb->mregs, OTG_INTERFSEL), + musb_readl(musb->mregs, OTG_SIMENABLE)); + +#ifndef __UBOOT__ + setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); + + if (glue->status != OMAP_MUSB_UNKNOWN) + omap_musb_set_mailbox(glue); + + pm_runtime_put_noidle(musb->controller); +#endif + return 0; + +err1: + return status; +} + +static void omap2430_musb_enable(struct musb *musb) +{ +#ifndef __UBOOT__ + u8 devctl; + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + struct device *dev = musb->controller; + struct omap2430_glue *glue = dev_get_drvdata(dev->parent); + struct musb_hdrc_platform_data *pdata = dev->platform_data; + struct omap_musb_board_data *data = pdata->board_data; + + switch (glue->status) { + + case OMAP_MUSB_ID_GROUND: + usb_phy_init(musb->xceiv); + if (data->interface_type != MUSB_INTERFACE_UTMI) + break; + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + /* start the session */ + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + while (musb_readb(musb->mregs, MUSB_DEVCTL) & + MUSB_DEVCTL_BDEVICE) { + cpu_relax(); + + if (time_after(jiffies, timeout)) { + dev_err(dev, "configured as A device timeout"); + break; + } + } + break; + + case OMAP_MUSB_VBUS_VALID: + usb_phy_init(musb->xceiv); + break; + + default: + break; + } +#else +#ifdef CONFIG_TWL4030_USB + if (twl4030_usb_ulpi_init()) { + serial_printf("ERROR: %s Could not initialize PHY\n", + __PRETTY_FUNCTION__); + } +#endif +#endif +} + +static void omap2430_musb_disable(struct musb *musb) +{ +#ifndef __UBOOT__ + struct device *dev = musb->controller; + struct omap2430_glue *glue = dev_get_drvdata(dev->parent); + + if (glue->status != OMAP_MUSB_UNKNOWN) + usb_phy_shutdown(musb->xceiv); +#endif +} + +static int omap2430_musb_exit(struct musb *musb) +{ + del_timer_sync(&musb_idle_timer); + + omap2430_low_level_exit(musb); + + return 0; +} + +#ifndef __UBOOT__ +static const struct musb_platform_ops omap2430_ops = { +#else +const struct musb_platform_ops omap2430_ops = { +#endif + .init = omap2430_musb_init, + .exit = omap2430_musb_exit, + +#ifndef __UBOOT__ + .set_mode = omap2430_musb_set_mode, + .try_idle = omap2430_musb_try_idle, + + .set_vbus = omap2430_musb_set_vbus, +#endif + + .enable = omap2430_musb_enable, + .disable = omap2430_musb_disable, +}; + +#ifndef __UBOOT__ +static u64 omap2430_dmamask = DMA_BIT_MASK(32); + +static int __devinit omap2430_probe(struct platform_device *pdev) +{ + struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; + struct platform_device *musb; + struct omap2430_glue *glue; + int ret = -ENOMEM; + + glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); + if (!glue) { + dev_err(&pdev->dev, "failed to allocate glue context\n"); + goto err0; + } + + musb = platform_device_alloc("musb-hdrc", -1); + if (!musb) { + dev_err(&pdev->dev, "failed to allocate musb device\n"); + goto err0; + } + + musb->dev.parent = &pdev->dev; + musb->dev.dma_mask = &omap2430_dmamask; + musb->dev.coherent_dma_mask = omap2430_dmamask; + + glue->dev = &pdev->dev; + glue->musb = musb; + glue->status = OMAP_MUSB_UNKNOWN; + + pdata->platform_ops = &omap2430_ops; + + platform_set_drvdata(pdev, glue); + + /* + * REVISIT if we ever have two instances of the wrapper, we will be + * in big trouble + */ + _glue = glue; + + INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work); + + ret = platform_device_add_resources(musb, pdev->resource, + pdev->num_resources); + if (ret) { + dev_err(&pdev->dev, "failed to add resources\n"); + goto err1; + } + + ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); + if (ret) { + dev_err(&pdev->dev, "failed to add platform_data\n"); + goto err1; + } + + pm_runtime_enable(&pdev->dev); + + ret = platform_device_add(musb); + if (ret) { + dev_err(&pdev->dev, "failed to register musb device\n"); + goto err1; + } + + return 0; + +err1: + platform_device_put(musb); + +err0: + return ret; +} + +static int __devexit omap2430_remove(struct platform_device *pdev) +{ + struct omap2430_glue *glue = platform_get_drvdata(pdev); + + cancel_work_sync(&glue->omap_musb_mailbox_work); + platform_device_del(glue->musb); + platform_device_put(glue->musb); + + return 0; +} + +#ifdef CONFIG_PM + +static int omap2430_runtime_suspend(struct device *dev) +{ + struct omap2430_glue *glue = dev_get_drvdata(dev); + struct musb *musb = glue_to_musb(glue); + + if (musb) { + musb->context.otg_interfsel = musb_readl(musb->mregs, + OTG_INTERFSEL); + + omap2430_low_level_exit(musb); + usb_phy_set_suspend(musb->xceiv, 1); + } + + return 0; +} + +static int omap2430_runtime_resume(struct device *dev) +{ + struct omap2430_glue *glue = dev_get_drvdata(dev); + struct musb *musb = glue_to_musb(glue); + + if (musb) { + omap2430_low_level_init(musb); + musb_writel(musb->mregs, OTG_INTERFSEL, + musb->context.otg_interfsel); + + usb_phy_set_suspend(musb->xceiv, 0); + } + + return 0; +} + +static struct dev_pm_ops omap2430_pm_ops = { + .runtime_suspend = omap2430_runtime_suspend, + .runtime_resume = omap2430_runtime_resume, +}; + +#define DEV_PM_OPS (&omap2430_pm_ops) +#else +#define DEV_PM_OPS NULL +#endif + +static struct platform_driver omap2430_driver = { + .probe = omap2430_probe, + .remove = __devexit_p(omap2430_remove), + .driver = { + .name = "musb-omap2430", + .pm = DEV_PM_OPS, + }, +}; + +MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); +MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); +MODULE_LICENSE("GPL v2"); + +static int __init omap2430_init(void) +{ + return platform_driver_register(&omap2430_driver); +} +subsys_initcall(omap2430_init); + +static void __exit omap2430_exit(void) +{ + platform_driver_unregister(&omap2430_driver); +} +module_exit(omap2430_exit); +#endif diff --git a/drivers/usb/musb-new/omap2430.h b/drivers/usb/musb-new/omap2430.h new file mode 100644 index 00000000000..3b795c248d1 --- /dev/null +++ b/drivers/usb/musb-new/omap2430.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2005-2006 by Texas Instruments + * + * The Inventra Controller Driver for Linux is free software; you + * can redistribute it and/or modify it under the terms of the GNU + * General Public License version 2 as published by the Free Software + * Foundation. + */ + +#ifndef __MUSB_OMAP243X_H__ +#define __MUSB_OMAP243X_H__ + +#ifndef __UBOOT__ +#include <plat/usb.h> +#else +#undef RESETDONE +#endif + +/* + * OMAP2430-specific definitions + */ + +#define OTG_REVISION 0x400 + +#define OTG_SYSCONFIG 0x404 +# define MIDLEMODE 12 /* bit position */ +# define FORCESTDBY (0 << MIDLEMODE) +# define NOSTDBY (1 << MIDLEMODE) +# define SMARTSTDBY (2 << MIDLEMODE) + +# define SIDLEMODE 3 /* bit position */ +# define FORCEIDLE (0 << SIDLEMODE) +# define NOIDLE (1 << SIDLEMODE) +# define SMARTIDLE (2 << SIDLEMODE) + +# define ENABLEWAKEUP (1 << 2) +# define SOFTRST (1 << 1) +# define AUTOIDLE (1 << 0) + +#define OTG_SYSSTATUS 0x408 +# define RESETDONE (1 << 0) + +#define OTG_INTERFSEL 0x40c +# define EXTCP (1 << 2) +# define PHYSEL 0 /* bit position */ +# define UTMI_8BIT (0 << PHYSEL) +# define ULPI_12PIN (1 << PHYSEL) +# define ULPI_8PIN (2 << PHYSEL) + +#define OTG_SIMENABLE 0x410 +# define TM1 (1 << 0) + +#define OTG_FORCESTDBY 0x414 +# define ENABLEFORCE (1 << 0) + +#endif /* __MUSB_OMAP243X_H__ */ diff --git a/drivers/usb/musb-new/usb-compat.h b/drivers/usb/musb-new/usb-compat.h new file mode 100644 index 00000000000..27f656f0ce5 --- /dev/null +++ b/drivers/usb/musb-new/usb-compat.h @@ -0,0 +1,88 @@ +#ifndef __USB_COMPAT_H__ +#define __USB_COMPAT_H__ + +#include "usb.h" + +struct usb_hcd { + void *hcd_priv; +}; + +struct usb_host_endpoint { + struct usb_endpoint_descriptor desc; + struct list_head urb_list; + void *hcpriv; +}; + +/* + * urb->transfer_flags: + * + * Note: URB_DIR_IN/OUT is automatically set in usb_submit_urb(). + */ +#define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */ +#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */ + +struct urb; + +typedef void (*usb_complete_t)(struct urb *); + +struct urb { + void *hcpriv; /* private data for host controller */ + struct list_head urb_list; /* list head for use by the urb's + * current owner */ + struct usb_device *dev; /* (in) pointer to associated device */ + struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */ + unsigned int pipe; /* (in) pipe information */ + int status; /* (return) non-ISO status */ + unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ + void *transfer_buffer; /* (in) associated data buffer */ + dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ + u32 transfer_buffer_length; /* (in) data buffer length */ + u32 actual_length; /* (return) actual transfer length */ + unsigned char *setup_packet; /* (in) setup packet (control only) */ + int start_frame; /* (modify) start frame (ISO) */ + usb_complete_t complete; /* (in) completion routine */ +}; + +#define usb_hcd_link_urb_to_ep(hcd, urb) ({ \ + int ret = 0; \ + list_add_tail(&urb->urb_list, &urb->ep->urb_list); \ + ret; }) +#define usb_hcd_unlink_urb_from_ep(hcd, urb) list_del_init(&urb->urb_list) + +static inline void usb_hcd_giveback_urb(struct usb_hcd *hcd, + struct urb *urb, + int status) +{ + urb->status = status; + if (urb->complete) + urb->complete(urb); +} + +static inline int usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, + struct urb *urb) +{ + /* TODO: add cache invalidation here */ + return 0; +} + +static inline u16 find_tt(struct usb_device *dev) +{ + u8 chid; + u8 hub; + + /* Find out the nearest parent which is high speed */ + while (dev->parent->parent != NULL) + if (dev->parent->speed != USB_SPEED_HIGH) + dev = dev->parent; + else + break; + + /* determine the port address at that hub */ + hub = dev->parent->devnum; + for (chid = 0; chid < USB_MAXCHILDREN; chid++) + if (dev->parent->children[chid] == dev) + break; + + return (hub << 8) | chid; +} +#endif /* __USB_COMPAT_H__ */ diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index e9143692970..ec8a038c74c 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -34,7 +34,6 @@ #ifndef __MUSB_HDRC_DEFS_H__ #define __MUSB_HDRC_DEFS_H__ -#include <usb.h> #include <usb_defs.h> #include <asm/io.h> @@ -145,7 +144,7 @@ struct musb_regs { struct musb_epN_regs epN; } ep[16]; -} __attribute__((packed, aligned(USB_DMA_MINALIGN))); +} __attribute__((packed)); #endif /* diff --git a/drivers/usb/musb/musb_hcd.c b/drivers/usb/musb/musb_hcd.c index 06be38d1cf9..60e03a4bf73 100644 --- a/drivers/usb/musb/musb_hcd.c +++ b/drivers/usb/musb/musb_hcd.c @@ -22,6 +22,7 @@ */ #include <common.h> +#include <usb.h> #include "musb_hcd.h" /* MSC control transfers */ @@ -485,8 +486,8 @@ static int ctrlreq_in_status_phase(struct usb_device *dev) */ static u8 get_dev_speed(struct usb_device *dev) { - return (dev->speed & USB_SPEED_HIGH) ? MUSB_TYPE_SPEED_HIGH : - ((dev->speed & USB_SPEED_LOW) ? MUSB_TYPE_SPEED_LOW : + return (dev->speed == USB_SPEED_HIGH) ? MUSB_TYPE_SPEED_HIGH : + ((dev->speed == USB_SPEED_LOW) ? MUSB_TYPE_SPEED_LOW : MUSB_TYPE_SPEED_FULL); } diff --git a/drivers/video/Makefile b/drivers/video/Makefile index b3207c83c3c..170a358b528 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -40,6 +40,7 @@ COBJS-$(CONFIG_S6E63D6) += s6e63d6.o COBJS-$(CONFIG_LD9040) += ld9040.o COBJS-$(CONFIG_SED156X) += sed156x.o COBJS-$(CONFIG_VIDEO_AMBA) += amba.o +COBJS-$(CONFIG_VIDEO_COREBOOT) += coreboot_fb.o COBJS-$(CONFIG_VIDEO_CT69000) += ct69000.o videomodes.o COBJS-$(CONFIG_VIDEO_DA8XX) += da8xx-fb.o videomodes.o COBJS-$(CONFIG_VIDEO_MB862xx) += mb862xx.o videomodes.o diff --git a/drivers/video/atmel_hlcdfb.c b/drivers/video/atmel_hlcdfb.c index beb7fa396ea..b10ca4b6771 100644 --- a/drivers/video/atmel_hlcdfb.c +++ b/drivers/video/atmel_hlcdfb.c @@ -51,6 +51,18 @@ short console_row; #define lcdc_readl(reg) __raw_readl((reg)) #define lcdc_writel(reg, val) __raw_writel((val), (reg)) +/* + * the CLUT register map as following + * RCLUT(24 ~ 16), GCLUT(15 ~ 8), BCLUT(7 ~ 0) + */ +void lcd_setcolreg(ushort regno, ushort red, ushort green, ushort blue) +{ + lcdc_writel(((red << LCDC_BASECLUT_RCLUT_Pos) & LCDC_BASECLUT_RCLUT_Msk) + | ((green << LCDC_BASECLUT_GCLUT_Pos) & LCDC_BASECLUT_GCLUT_Msk) + | ((blue << LCDC_BASECLUT_BCLUT_Pos) & LCDC_BASECLUT_BCLUT_Msk), + panel_info.mmio + ATMEL_LCDC_LUT(regno)); +} + void lcd_ctrl_init(void *lcdbase) { unsigned long value; diff --git a/drivers/video/bus_vcxk.c b/drivers/video/bus_vcxk.c index 9c4714d50a5..a0607cf4336 100644 --- a/drivers/video/bus_vcxk.c +++ b/drivers/video/bus_vcxk.c @@ -153,7 +153,7 @@ int vcxk_init(unsigned long width, unsigned long height) #ifdef CONFIG_SYS_VCXK_DOUBLEBUFFERED double_bws_word = (u_short *)double_bws; double_bws_long = (u_long *)double_bws; - debug("%lx %lx %lx \n", double_bws, double_bws_word, double_bws_long); + debug("%px %px %px\n", double_bws, double_bws_word, double_bws_long); #endif display_width = width; display_height = height; diff --git a/drivers/video/cfb_console.c b/drivers/video/cfb_console.c index 9c67b63bf45..26f673a96a9 100644 --- a/drivers/video/cfb_console.c +++ b/drivers/video/cfb_console.c @@ -1515,6 +1515,13 @@ int video_display_bitmap(ulong bmp_image, int x, int y) padded_line = (((width * bpp + 7) / 8) + 3) & ~0x3; + /* + * Just ignore elements which are completely beyond screen + * dimensions. + */ + if ((x >= VIDEO_VISIBLE_COLS) || (y >= VIDEO_VISIBLE_ROWS)) + return 0; + #ifdef CONFIG_SPLASH_SCREEN_ALIGN if (x == BMP_ALIGN_CENTER) x = max(0, (VIDEO_VISIBLE_COLS - width) / 2); @@ -2257,3 +2264,47 @@ int drv_video_init(void) /* Return success */ return 1; } + +void video_position_cursor(unsigned col, unsigned row) +{ + console_col = min(col, CONSOLE_COLS - 1); + console_row = min(row, CONSOLE_ROWS - 1); +} + +int video_get_pixel_width(void) +{ + return VIDEO_VISIBLE_COLS; +} + +int video_get_pixel_height(void) +{ + return VIDEO_VISIBLE_ROWS; +} + +int video_get_screen_rows(void) +{ + return CONSOLE_ROWS; +} + +int video_get_screen_columns(void) +{ + return CONSOLE_COLS; +} + +void video_clear(void) +{ + if (!video_fb_address) + return; +#ifdef VIDEO_HW_RECTFILL + video_hw_rectfill(VIDEO_PIXEL_SIZE, /* bytes per pixel */ + 0, /* dest pos x */ + 0, /* dest pos y */ + VIDEO_VISIBLE_COLS, /* frame width */ + VIDEO_VISIBLE_ROWS, /* frame height */ + bgx /* fill color */ + ); +#else + memsetl(video_fb_address, + (VIDEO_VISIBLE_ROWS * VIDEO_LINE_LEN) / sizeof(int), bgx); +#endif +} diff --git a/drivers/video/coreboot_fb.c b/drivers/video/coreboot_fb.c new file mode 100644 index 00000000000..d93bd894a22 --- /dev/null +++ b/drivers/video/coreboot_fb.c @@ -0,0 +1,101 @@ +/* + * coreboot Framebuffer driver. + * + * Copyright (C) 2011 The Chromium OS authors + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <common.h> +#include <asm/arch/tables.h> +#include <asm/arch/sysinfo.h> +#include <video_fb.h> +#include "videomodes.h" + +/* + * The Graphic Device + */ +GraphicDevice ctfb; + +static int parse_coreboot_table_fb(GraphicDevice *gdev) +{ + struct cb_framebuffer *fb = lib_sysinfo.framebuffer; + + /* If there is no framebuffer structure, bail out and keep + * running on the serial console. + */ + if (!fb) + return 0; + + gdev->winSizeX = fb->x_resolution; + gdev->winSizeY = fb->y_resolution; + + gdev->plnSizeX = fb->x_resolution; + gdev->plnSizeY = fb->y_resolution; + + gdev->gdfBytesPP = fb->bits_per_pixel / 8; + + switch (fb->bits_per_pixel) { + case 24: + gdev->gdfIndex = GDF_32BIT_X888RGB; + break; + case 16: + gdev->gdfIndex = GDF_16BIT_565RGB; + break; + default: + gdev->gdfIndex = GDF__8BIT_INDEX; + break; + } + + gdev->isaBase = CONFIG_SYS_ISA_IO_BASE_ADDRESS; + gdev->pciBase = (unsigned int)fb->physical_address; + + gdev->frameAdrs = (unsigned int)fb->physical_address; + gdev->memSize = fb->bytes_per_line * fb->y_resolution; + + gdev->vprBase = (unsigned int)fb->physical_address; + gdev->cprBase = (unsigned int)fb->physical_address; + + return 1; +} + +void *video_hw_init(void) +{ + GraphicDevice *gdev = &ctfb; + int bits_per_pixel; + + printf("Video: "); + + if (!parse_coreboot_table_fb(gdev)) { + printf("No video mode configured in coreboot!\n"); + return NULL; + } + + bits_per_pixel = gdev->gdfBytesPP * 8; + + /* fill in Graphic device struct */ + sprintf(gdev->modeIdent, "%dx%dx%d", gdev->winSizeX, gdev->winSizeY, + bits_per_pixel); + printf("%s\n", gdev->modeIdent); + + memset((void *)gdev->pciBase, 0, + gdev->winSizeX * gdev->winSizeY * gdev->gdfBytesPP); + + return (void *)gdev; +} diff --git a/drivers/video/ipu_common.c b/drivers/video/ipu_common.c index 0f2d113a6f1..ad4af5283a9 100644 --- a/drivers/video/ipu_common.c +++ b/drivers/video/ipu_common.c @@ -94,6 +94,7 @@ struct ipu_ch_param { temp1; \ }) +#define IPU_SW_RST_TOUT_USEC (10000) void clk_enable(struct clk *clk) { @@ -398,11 +399,20 @@ void ipu_reset(void) { u32 *reg; u32 value; + int timeout = IPU_SW_RST_TOUT_USEC; reg = (u32 *)SRC_BASE_ADDR; value = __raw_readl(reg); value = value | SW_IPU_RST; __raw_writel(value, reg); + + while (__raw_readl(reg) & SW_IPU_RST) { + udelay(1); + if (!(timeout--)) { + printf("ipu software reset timeout\n"); + break; + } + }; } /* |