From 38fb38688f9a80dfb62b1c6b382edb47a590a93f Mon Sep 17 00:00:00 2001 From: Oleg Drokin Date: Wed, 1 Feb 2012 19:40:17 -0500 Subject: [PATCH] Initial TS attempt Added hsuart, user pins and the dumb ts driver --- arch/arm/configs/tenderloin_defconfig | 6 +- arch/arm/mach-msm/Kconfig | 7 + arch/arm/mach-msm/Makefile | 2 + arch/arm/mach-msm/board-msm8x60.c | 420 ++- arch/arm/mach-msm/clock-8x60.c | 14 +- arch/arm/mach-msm/devices-msm8x60.c | 81 + arch/arm/mach-msm/devices-msm8x60.h | 3 +- arch/arm/mach-msm/devices.h | 1 + arch/arm/mach-msm/gpiomux-8x60.c | 67 + arch/arm/mach-msm/include/mach/msm_hsuart.h | 219 ++ arch/arm/mach-msm/include/mach/msm_uart_dm.h | 604 +++++ arch/arm/mach-msm/msm_hsuart.c | 2390 +++++++++++++++++ arch/arm/mach-msm/msm_uart_dm.c | 2357 +++++++++++++++++ drivers/input/touchscreen/Kconfig | 12 + drivers/input/touchscreen/Makefile | 1 + drivers/input/touchscreen/cy8ctma395.c | 1189 +++++++++ drivers/misc/Kconfig | 11 + drivers/misc/Makefile | 2 + drivers/misc/hsuart.c | 2464 ++++++++++++++++++ drivers/misc/user-pins.c | 1014 +++++++ include/linux/cy8ctma395.h | 16 + include/linux/hsuart.h | 153 ++ include/linux/user-pins.h | 44 + 23 files changed, 11072 insertions(+), 5 deletions(-) create mode 100644 arch/arm/mach-msm/include/mach/msm_hsuart.h create mode 100644 arch/arm/mach-msm/include/mach/msm_uart_dm.h create mode 100644 arch/arm/mach-msm/msm_hsuart.c create mode 100644 arch/arm/mach-msm/msm_uart_dm.c create mode 100644 drivers/input/touchscreen/cy8ctma395.c create mode 100644 drivers/misc/hsuart.c create mode 100644 drivers/misc/user-pins.c create mode 100644 include/linux/cy8ctma395.h create mode 100644 include/linux/hsuart.h create mode 100644 include/linux/user-pins.h diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index 6b06963aea5..fdf4f524da5 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -418,6 +418,7 @@ CONFIG_RTAC=y # CONFIG_MSM_VREG_SWITCH_INVERTED is not set CONFIG_MSM_DMA_TEST=m # CONFIG_WIFI_CONTROL_FUNC is not set +CONFIG_MSM_UARTDM=y CONFIG_MSM_SLEEP_TIME_OVERRIDE=y # CONFIG_MSM_MEMORY_LOW_POWER_MODE is not set CONFIG_MSM_PM_TIMEOUT_HALT=y @@ -959,6 +960,8 @@ CONFIG_KERNEL_LOG=y CONFIG_ANDROID_PMEM=y # CONFIG_INTEL_MID_PTI is not set # CONFIG_ICS932S401 is not set +CONFIG_HSUART=y +CONFIG_USER_PINS=y # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_APDS9802ALS is not set # CONFIG_ISL29003 is not set @@ -1275,6 +1278,7 @@ CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH=y # CONFIG_TOUCHSCREEN_TPS6507X is not set CONFIG_TOUCHSCREEN_CY8C_TS=y CONFIG_TOUCHSCREEN_CYTTSP_I2C=y +CONFIG_TOUCHSCREEN_CY8CTMA395=y CONFIG_INPUT_MISC=y # CONFIG_INPUT_AD714X is not set # CONFIG_INPUT_ATI_REMOTE is not set @@ -1685,7 +1689,7 @@ CONFIG_MFD_PM8XXX_BATT_ALARM=y CONFIG_REGULATOR=y # CONFIG_REGULATOR_DEBUG is not set # CONFIG_REGULATOR_DUMMY is not set -# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=y # CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set # CONFIG_REGULATOR_USERSPACE_CONSUMER is not set # CONFIG_REGULATOR_BQ24022 is not set diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index d91064f76db..08c2cd40e1f 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -1587,6 +1587,13 @@ config SURF_FFA_GPIO_KEYPAD help Select if the GPIO keypad is attached. +config MSM_UARTDM + tristate "MSM UARTDM (High Speed) Driver" + depends on ARCH_MSM + default y + help + Provides access to the high speed uart ports. + config MSM_SLEEP_TIME_OVERRIDE bool "Allow overriding suspend/sleep time with PM module parameter" default y diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index 8d7370f588f..cfc30402e9d 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile @@ -319,3 +319,5 @@ endif obj-$(CONFIG_ARCH_MSM8960) += mdm2.o mdm_common.o obj-$(CONFIG_MSM_RTB) += msm_rtb.o +obj-$(CONFIG_MSM_UARTDM) += msm_hsuart.o +obj-$(CONFIG_MSM_UARTDM) += msm_uart_dm.o diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c index b34b8cfeb25..2bcf3ebe2f2 100644 --- a/arch/arm/mach-msm/board-msm8x60.c +++ b/arch/arm/mach-msm/board-msm8x60.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -87,6 +88,14 @@ #include #include +#ifdef CONFIG_HSUART +#include +#endif + +#ifdef CONFIG_USER_PINS +#include +#endif + #include "devices.h" #include "devices-msm8x60.h" #include @@ -258,6 +267,30 @@ enum { GPIO_EPM_EXPANDER_IO15, }; +/* helper function to manipulate group of gpios (msm_gpiomux)*/ +static int configure_gpiomux_gpios(int on, int gpios[], int cnt) +{ + int ret = 0; + int i; + + for (i = 0; i < cnt; i++) { + //printk(KERN_ERR "%s:pin(%d):%s\n", __func__, gpios[i], on?"on":"off"); + if (on) { + ret = msm_gpiomux_get(gpios[i]); + if (unlikely(ret)) + break; + } else { + ret = msm_gpiomux_put(gpios[i]); + if (unlikely(ret)) + return ret; + } + } + if (ret) + for (; i >= 0; i--) + msm_gpiomux_put(gpios[i]); + return ret; +} + struct pm8xxx_mpp_init_info { unsigned mpp; struct pm8xxx_mpp_config_data config; @@ -2551,6 +2584,14 @@ static struct msm_i2c_platform_data msm_gsbi9_qup_i2c_pdata = { .msm_i2c_config_gpio = gsbi_qup_i2c_gpio_config, }; +static void board_gsbi10_init2(int unused1, int unused2); +static struct msm_i2c_platform_data msm_gsbi10_qup_i2c_pdata = { + .clk_freq = 300000, + .src_clk_rate = 24000000, + .use_gsbi_shared_mode = 1, + .msm_i2c_config_gpio = board_gsbi10_init2, +}; + static struct msm_i2c_platform_data msm_gsbi12_qup_i2c_pdata = { .clk_freq = 100000, .src_clk_rate = 24000000, @@ -2569,6 +2610,356 @@ static struct msm_spi_platform_data msm_gsbi10_qup_spi_pdata = { }; #endif +#define GSBI1_PHYS 0x16000000 +#define GSBI8_PHYS 0x19800000 +#define GSBI_CTRL 0x0 +#define PROTOCOL_CODE(code) (((code) & 0x7) << 4) +#define UART_WITH_FLOW_CONTROL 0x4 +#define I2C_ON_2_PORTS_UART 0x6 + +static DEFINE_MUTEX(gsbi_init_lock); + +static int board_gsbi_init(int gsbi, int *inited, u32 prot) +{ + int rc; + u32 gsbi_phys; + void *gsbi_virt; + + pr_debug("%s: gsbi=%d inited=%d\n", __func__, gsbi, *inited); + + mutex_lock(&gsbi_init_lock); + + if (*inited) { + rc = 0; + goto exit; + } + + pr_debug("%s: gsbi=%d prot=%x", __func__, gsbi, prot); + + if ((gsbi >= 1) && (gsbi <= 7)) + gsbi_phys = GSBI1_PHYS + ((gsbi - 1) * 0x100000); + + else if ((gsbi >= 8) && (gsbi <= 12)) + gsbi_phys = GSBI8_PHYS + ((gsbi - 8) * 0x100000); + + else { + rc = -EINVAL; + goto exit; + } + + gsbi_virt = ioremap(gsbi_phys, 4); + if (!gsbi_virt) { + pr_err("error remapping address 0x%08x\n", gsbi_phys); + rc = -ENXIO; + goto exit; + } + + pr_debug("%s: %08x=%08x\n", __func__, gsbi_phys + GSBI_CTRL, + PROTOCOL_CODE(prot)); + writel(PROTOCOL_CODE(prot), gsbi_virt + GSBI_CTRL); + iounmap(gsbi_virt); + rc = 0; +exit: + mutex_unlock(&gsbi_init_lock); + + return (rc); +} + +static int board_gsbi10_init(void) +{ + static int inited = 0; + + return (board_gsbi_init(10, &inited, I2C_ON_2_PORTS_UART)); +} + +static void board_gsbi10_init2(int unused1, int unused2) +{ + board_gsbi10_init(); +} + +#if defined (CONFIG_TOUCHSCREEN_CY8CTMA395) \ + || defined (CONFIG_TOUCHSCREEN_CY8CTMA395_MODULE) +static struct user_pin ctp_pins[] = { + { + .name = "wake", + .gpio = GPIO_CTP_WAKE, + .act_level = 0, + .direction = 0, + .def_level = 1, + .sysfs_mask = 0660, + }, +}; + +#define CTP_UART_SPEED_SLOW 3000000 +#define CTP_UART_SPEED_FAST 4000000 + +static int ctp_uart_pin_mux(int on) +{ + int rc; + static int is_on = 0; + + pr_debug("%s: on=%d\n", __func__, on); + + if (on && !is_on) + rc = msm_gpiomux_get(GPIO_CTP_RX); + + else if (!on && is_on) + rc = msm_gpiomux_put(GPIO_CTP_RX); + + else + rc = 0; + + is_on = on; + + return (rc); +} + +static struct hsuart_platform_data ctp_uart_data = { + .dev_name = "ctp_uart", + .uart_mode = HSUART_MODE_FLOW_CTRL_NONE | HSUART_MODE_PARITY_NONE, + .uart_speed = CTP_UART_SPEED_SLOW, + .options = HSUART_OPTION_RX_DM | HSUART_OPTION_SCHED_RT, + + .tx_buf_size = 4096, + .tx_buf_num = 1, + .rx_buf_size = 16384, + .rx_buf_num = 2, + .max_packet_size = 10240, + .min_packet_size = 1, + .rx_latency = CTP_UART_SPEED_FAST/20000, /* bytes per 500 us */ + .p_board_pin_mux_cb = ctp_uart_pin_mux, + .p_board_config_gsbi_cb = board_gsbi10_init, +}; + +static struct platform_device ctp_uart_device = { + .name = "hsuart", + .id = 1, + .dev = { + .platform_data = &ctp_uart_data, + } +}; + +static int board_cy8ctma395_gpio_request(unsigned gpio, const char *name, + int request, int *requested, + struct gpiomux_setting *new, + struct gpiomux_setting *old, + int *replaced) +{ + int rc; + + if (request && !*requested) { + rc = gpio_request(gpio, name); + if (rc < 0) { + pr_err("error %d requesting gpio %u (%s)\n", rc, gpio, name); + goto gpio_request_failed; + } + + rc = msm_gpiomux_write(gpio, GPIOMUX_ACTIVE, new, old); + if (rc < 0) { + pr_err("error %d muxing gpio %u (%s)\n", rc, gpio, name); + goto msm_gpiomux_write_failed; + } + + *replaced = !rc; + + rc = msm_gpiomux_get(gpio); + if (rc < 0) { + pr_err("error %d 'getting' gpio %u (%s)\n", rc, gpio, name); + goto msm_gpiomux_get_failed; + } + + *requested = 1; + } + + else if (!request && *requested) { + msm_gpiomux_put(gpio); + msm_gpiomux_write(gpio, GPIOMUX_ACTIVE, *replaced ? old : NULL, NULL); + gpio_free(gpio); + *requested = 0; + } + + return (0); + +msm_gpiomux_get_failed: + msm_gpiomux_write(gpio, GPIOMUX_ACTIVE, *replaced ? old : NULL, NULL); +msm_gpiomux_write_failed: + gpio_free(gpio); +gpio_request_failed: + + return (rc); +} + +static int board_cy8ctma395_swdck_request(int request) +{ + static int replaced = 0; + static int requested = 0; + static struct gpiomux_setting scl; + + struct gpiomux_setting swdck = { + .func = GPIOMUX_FUNC_GPIO, + .drv = GPIOMUX_DRV_8MA, + .pull = GPIOMUX_PULL_NONE, + .dir = GPIOMUX_OUT_HIGH, + }; + + return (board_cy8ctma395_gpio_request(GPIO_CTP_SCL, "CY8CTMA395_SWDCK", + request, &requested, &swdck, + &scl, &replaced)); +} + +static int board_cy8ctma395_swdio_request(int request) +{ + static int replaced = 0; + static int requested = 0; + static struct gpiomux_setting sda; + + struct gpiomux_setting swdio = { + .func = GPIOMUX_FUNC_GPIO, + .drv = GPIOMUX_DRV_8MA, + .pull = GPIOMUX_PULL_NONE, + .dir = GPIOMUX_OUT_HIGH, + }; + + return (board_cy8ctma395_gpio_request(GPIO_CTP_SDA, "CY8CTMA395_SWDIO", + request, &requested, &swdio, + &sda, &replaced)); +} + +static void board_cy8ctma395_vdd_enable(int enable) +{ + int rc; + static struct regulator *tp_5v0 = NULL; + static struct regulator *tp_l10 = NULL; + static int isPowerOn = 0; + + if (!tp_l10) { + tp_l10 = regulator_get(NULL, "8058_l10"); + if (IS_ERR(tp_l10)) { + pr_err("%s: failed to get regulator \"8058_l10\"\n", __func__); + return; + } + + rc = regulator_set_voltage(tp_l10, 3050000, 3050000); + if (rc) { + pr_err("%s: Unable to set regulator voltage:" + " tp_l10\n", __func__); + regulator_put(tp_l10); + tp_l10 = NULL; + return; + } + } + + if (!tp_5v0) { + tp_5v0 = regulator_get(NULL, "vdd50_boost"); + if (IS_ERR(tp_5v0)) { + pr_err("failed to get regulator 'vdd50_boost' with %ld\n", + PTR_ERR(tp_5v0)); + regulator_put(tp_l10); + tp_l10 = NULL; + tp_5v0 = NULL; + return; + } + } + + if (enable == isPowerOn) { + return; + } + + if (enable) { + rc = regulator_enable(tp_l10); + if (rc < 0) { + pr_err("failed to enable regulator '8058_l10' with %d\n", rc); + return; + } + + rc = regulator_enable(tp_5v0); + if (rc < 0) { + pr_err("failed to enable regulator 'vdd50_boost' with %d\n", rc); + return; + } + + // Make sure the voltage is stabilized + msleep(2); + } + + else { + rc = regulator_disable(tp_5v0); + if (rc < 0) { + pr_err("failed to disable regulator 'vdd50_boost' with %d\n", rc); + return; + } + + rc = regulator_disable(tp_l10); + if (rc < 0) { + pr_err("failed to enable regulator '8058_l10' with %d\n", rc); + return; + } + } + + isPowerOn = enable; +} + +static struct cy8ctma395_platform_data board_cy8ctma395_data = { + .swdck_request = board_cy8ctma395_swdck_request, + .swdio_request = board_cy8ctma395_swdio_request, + .vdd_enable = board_cy8ctma395_vdd_enable, + .xres = GPIO_CY8CTMA395_XRES, + .xres_us = 1000, + .swdck = GPIO_CTP_SCL, + .swdio = GPIO_CTP_SDA, + .swd_wait_retries = 0, + .port_acquire_retries = 4, + .status_reg_timeout_ms = 1000, + .nr_blocks = 256, +}; + +static struct platform_device board_cy8ctma395_device = { + .name = CY8CTMA395_DEVICE, + .id = -1, + .dev = { + .platform_data = &board_cy8ctma395_data, + }, +}; + +static int tenderloin_tp_init(int on) +{ + int ret; + int tp_gpios[] = {GPIO_CTP_SCL, GPIO_CTP_SDA}; + ret = configure_gpiomux_gpios(on, tp_gpios, ARRAY_SIZE(tp_gpios)); + if (ret < 0) { + printk(KERN_ERR "%s: Error %d while configuring touch panel gpios.\n", __func__, ret); + } + return ret; +} + +#endif /* CONFIG_TOUCHSCREEN_CY8CTMA395[_MODULE] */ + +#ifdef CONFIG_USER_PINS +static struct user_pin_set board_user_pins_sets[] = { +#if defined (CONFIG_TOUCHSCREEN_CY8CTMA395) \ + || defined (CONFIG_TOUCHSCREEN_CY8CTMA395_MODULE) + { + .set_name = "ctp", + .num_pins = ARRAY_SIZE(ctp_pins), + .pins = ctp_pins, + }, +#endif /* CONFIG_TOUCHSCREEN_CY8CTMA395[_MODULE] */ +}; + +static struct user_pins_platform_data board_user_pins_pdata = { + .num_sets = ARRAY_SIZE(board_user_pins_sets), + .sets = board_user_pins_sets, +}; + +static struct platform_device board_user_pins_device = { + .name = "user-pins", + .id = -1, + .dev = { + .platform_data = &board_user_pins_pdata, + } +}; +#endif + #ifdef CONFIG_I2C_SSBI /* CODEC/TSSC SSBI */ static struct msm_i2c_ssbi_platform_data msm_ssbi3_pdata = { @@ -5384,6 +5775,9 @@ static struct platform_device *surf_devices[] __initdata = { #if defined(CONFIG_MSM_RPM_LOG) || defined(CONFIG_MSM_RPM_LOG_MODULE) &msm8660_rpm_log_device, #endif +#ifdef CONFIG_USER_PINS + &board_user_pins_device, +#endif #if defined(CONFIG_MSM_RPM_STATS_LOG) &msm8660_rpm_stat_device, #endif @@ -5426,6 +5820,13 @@ static struct platform_device *surf_devices[] __initdata = { &ion_dev, #endif &msm8660_device_watchdog, +#if defined (CONFIG_TOUCHSCREEN_CY8CTMA395) \ + || defined (CONFIG_TOUCHSCREEN_CY8CTMA395_MODULE) + &board_cy8ctma395_device, + &msm_device_uart_dm2, + &ctp_uart_device, +#endif /* CONFIG_TOUCHSCREEN_CY8CTMA395[_MODULE] */ + }; #ifdef CONFIG_ION_MSM @@ -5537,7 +5938,6 @@ static struct platform_device ion_dev = { }; #endif - static struct memtype_reserve msm8x60_reserve_table[] __initdata = { /* Kernel SMI memory pool for video core, used for firmware */ /* and encoder, decoder scratch buffers */ @@ -7532,6 +7932,18 @@ static void fixup_i2c_configs(void) else if (machine_is_msm8x60_fluid()) sx150x_data[SX150X_CORE_FLUID].irq_summary = PM8058_GPIO_IRQ(PM8058_IRQ_BASE, UI_INT1_N); +#endif + /* + * Set PMIC 8901 MPP0 active_high to 0 for surf and charm_surf. This + * implies that the regulator connected to MPP0 is enabled when + * MPP0 is low. + */ +#if 0 // TODO -- add/move elsewhere? + if (machine_is_msm8x60_surf() || machine_is_msm8x60_fusion() || + machine_is_tenderloin()) + pm8901_vreg_init_pdata[PM8901_VREG_ID_MPP0].active_high = 0; + else + pm8901_vreg_init_pdata[PM8901_VREG_ID_MPP0].active_high = 1; #endif #endif } @@ -7629,6 +8041,7 @@ static void __init msm8x60_init_buses(void) } #endif msm_gsbi9_qup_i2c_device.dev.platform_data = &msm_gsbi9_qup_i2c_pdata; + msm_gsbi10_qup_i2c_device.dev.platform_data = &msm_gsbi10_qup_i2c_pdata; msm_gsbi12_qup_i2c_device.dev.platform_data = &msm_gsbi12_qup_i2c_pdata; #endif #if defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE) @@ -10989,6 +11402,11 @@ static void __init msm8x60_init(struct msm_board_data *board_data) platform_add_devices(surf_devices, ARRAY_SIZE(surf_devices)); +#if defined (CONFIG_TOUCHSCREEN_CY8CTMA395) \ + || defined (CONFIG_TOUCHSCREEN_CY8CTMA395_MODULE) + tenderloin_tp_init (true); +#endif + #ifdef CONFIG_MSM_DSPS if (machine_is_msm8x60_fluid()) { platform_device_unregister(&msm_gsbi12_qup_i2c_device); diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c index 79f3d2ee690..5c08cf1ed75 100644 --- a/arch/arm/mach-msm/clock-8x60.c +++ b/arch/arm/mach-msm/clock-8x60.c @@ -3586,10 +3586,11 @@ static struct clk_lookup msm_clocks_8x60[] = { CLK_LOOKUP("core_clk", gsbi4_uart_clk.c, ""), CLK_LOOKUP("core_clk", gsbi5_uart_clk.c, ""), CLK_LOOKUP("core_clk", gsbi6_uart_clk.c, "msm_serial_hs.0"), + CLK_LOOKUP("core_clk", gsbi6_uart_clk.c, "msm_uartdm.0"), CLK_LOOKUP("core_clk", gsbi7_uart_clk.c, ""), CLK_LOOKUP("core_clk", gsbi8_uart_clk.c, ""), CLK_LOOKUP("core_clk", gsbi9_uart_clk.c, "msm_serial_hsl.1"), - CLK_LOOKUP("core_clk", gsbi10_uart_clk.c, ""), + CLK_LOOKUP("core_clk", gsbi10_uart_clk.c, "msm_uartdm.1"), CLK_LOOKUP("core_clk", gsbi11_uart_clk.c, ""), CLK_LOOKUP("core_clk", gsbi12_uart_clk.c, "msm_serial_hsl.0"), CLK_LOOKUP("core_clk", gsbi1_qup_clk.c, "spi_qsd.0"), @@ -3602,9 +3603,10 @@ static struct clk_lookup msm_clocks_8x60[] = { CLK_LOOKUP("core_clk", gsbi8_qup_clk.c, "qup_i2c.3"), CLK_LOOKUP("core_clk", gsbi9_qup_clk.c, "qup_i2c.2"), CLK_LOOKUP("core_clk", gsbi10_qup_clk.c, "spi_qsd.1"), + CLK_LOOKUP("core_clk", gsbi10_qup_clk.c, "qup_i2c.5"), CLK_LOOKUP("core_clk", gsbi11_qup_clk.c, ""), CLK_LOOKUP("gsbi_qup_clk", gsbi12_qup_clk.c, "msm_dsps"), - CLK_LOOKUP("core_clk", gsbi12_qup_clk.c, "qup_i2c.5"), + CLK_LOOKUP("core_clk", gsbi12_qup_clk.c, "qup_i2c.10"), CLK_LOOKUP("core_clk", pdm_clk.c, ""), CLK_LOOKUP("mem_clk", pmem_clk.c, "msm_dsps"), CLK_LOOKUP("core_clk", prng_clk.c, "msm_rng.0"), @@ -3633,6 +3635,7 @@ static struct clk_lookup msm_clocks_8x60[] = { CLK_LOOKUP("iface_clk", gsbi4_p_clk.c, "qup_i2c.1"), CLK_LOOKUP("iface_clk", gsbi5_p_clk.c, ""), CLK_LOOKUP("iface_clk", gsbi6_p_clk.c, "msm_serial_hs.0"), + CLK_LOOKUP("iface_clk", gsbi6_p_clk.c, "msm_uartdm.0"), CLK_LOOKUP("iface_clk", gsbi7_p_clk.c, "qup_i2c.4"), CLK_LOOKUP("iface_clk", gsbi8_p_clk.c, "qup_i2c.3"), CLK_LOOKUP("iface_clk", gsbi9_p_clk.c, "msm_serial_hsl.1"), @@ -3643,6 +3646,13 @@ static struct clk_lookup msm_clocks_8x60[] = { CLK_LOOKUP("iface_clk", gsbi12_p_clk.c, "msm_serial_hsl.0"), CLK_LOOKUP("iface_clk", gsbi12_p_clk.c, "qup_i2c.5"), CLK_LOOKUP("ppss_pclk", ppss_p_clk.c, ""), + CLK_LOOKUP("iface_clk", gsbi10_p_clk.c, "qup_i2c.5"), + CLK_LOOKUP("iface_clk", gsbi10_p_clk.c, "msm_uartdm.1"), + CLK_LOOKUP("iface_clk", gsbi11_p_clk.c, ""), + CLK_LOOKUP("iface_clk", gsbi12_p_clk.c, ""), + CLK_LOOKUP("iface_clk", gsbi12_p_clk.c, "msm_serial_hsl.0"), + CLK_LOOKUP("iface_clk", gsbi12_p_clk.c, "qup_i2c.10"), + CLK_LOOKUP("ppss_pclk", ppss_p_clk.c, ""), CLK_LOOKUP("iface_clk", tsif_p_clk.c, "msm_tsif.0"), CLK_LOOKUP("iface_clk", tsif_p_clk.c, "msm_tsif.1"), CLK_LOOKUP("iface_clk", usb_fs1_p_clk.c, ""), diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c index 4670ce8557f..63aa249cd6f 100644 --- a/arch/arm/mach-msm/devices-msm8x60.c +++ b/arch/arm/mach-msm/devices-msm8x60.c @@ -93,6 +93,7 @@ #define MSM_UART3DM_PHYS (MSM_GSBI3_PHYS + 0x40000) #define INT_UART3DM_IRQ GSBI3_UARTDM_IRQ #define TCSR_BASE_PHYS 0x16b00000 +#define MSM_GSBI10_UART_DM_PHYS (MSM_GSBI10_PHYS + 0x40000) /* PRNG device */ #define MSM_PRNG_PHYS 0x16C00000 @@ -265,6 +266,56 @@ struct platform_device msm_device_uart_dm1 = { }, }; +static struct resource msm_uart_dm2_resources[] = { + { + .start = MSM_GSBI10_UART_DM_PHYS, + .end = MSM_GSBI10_UART_DM_PHYS + PAGE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = GSBI10_UARTDM_IRQ, + .end = GSBI10_UARTDM_IRQ, + .flags = IORESOURCE_IRQ, + }, + { + .start = MSM_GSBI10_PHYS, + .end = MSM_GSBI10_PHYS + 4 - 1, + .name = "gsbi_resource", + .flags = IORESOURCE_MEM, + }, + { + .start = TCSR_BASE_PHYS, + .end = TCSR_BASE_PHYS + 0x80 - 1, + .name = "tcsr_resource", + .flags = IORESOURCE_MEM, + }, + { + .start = DMOV_HSUART2_TX_CHAN, + .end = DMOV_HSUART2_RX_CHAN, + .name = "uartdm_channels", + .flags = IORESOURCE_DMA, + }, + { + .start = DMOV_HSUART2_TX_CRCI, + .end = DMOV_HSUART2_RX_CRCI, + .name = "uartdm_crci", + .flags = IORESOURCE_DMA, + }, +}; + +static u64 msm_uart_dm2_dma_mask = DMA_BIT_MASK(32); + +struct platform_device msm_device_uart_dm2 = { + .name = "msm_uartdm", + .id = 1, + .num_resources = ARRAY_SIZE(msm_uart_dm2_resources), + .resource = msm_uart_dm2_resources, + .dev = { + .dma_mask = &msm_uart_dm2_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + static struct resource msm_uart3_dm_resources[] = { { .start = MSM_UART3DM_PHYS, @@ -487,6 +538,28 @@ static struct resource gsbi9_qup_i2c_resources[] = { }, }; +// config gsbi10 as i2c function +static struct resource gsbi10_qup_i2c_resources[] = { + { + .name = "qup_phys_addr", + .start = MSM_GSBI10_QUP_PHYS, + .end = MSM_GSBI10_QUP_PHYS + SZ_4K - 1, + .flags = IORESOURCE_MEM, + }, + { + .name = "gsbi_qup_i2c_addr", + .start = MSM_GSBI10_PHYS, + .end = MSM_GSBI10_PHYS + 4 - 1, + .flags = IORESOURCE_MEM, + }, + { + .name = "qup_err_intr", + .start = GSBI10_QUP_IRQ, + .end = GSBI10_QUP_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + static struct resource gsbi12_qup_i2c_resources[] = { { .name = "qup_phys_addr", @@ -881,6 +954,14 @@ struct platform_device msm_gsbi7_qup_i2c_device = { .resource = gsbi7_qup_i2c_resources, }; +/* Use GSBI10 QUP for /dev/i2c-5 */ +struct platform_device msm_gsbi10_qup_i2c_device = { + .name = "qup_i2c", + .id = MSM_GSBI10_QUP_I2C_BUS_ID, + .num_resources = ARRAY_SIZE(gsbi10_qup_i2c_resources), + .resource = gsbi10_qup_i2c_resources, +}; + /* Use GSBI12 QUP for /dev/i2c-5 (Sensors) */ struct platform_device msm_gsbi12_qup_i2c_device = { .name = "qup_i2c", diff --git a/arch/arm/mach-msm/devices-msm8x60.h b/arch/arm/mach-msm/devices-msm8x60.h index 9bfaeeed401..e0b0511127e 100644 --- a/arch/arm/mach-msm/devices-msm8x60.h +++ b/arch/arm/mach-msm/devices-msm8x60.h @@ -18,7 +18,8 @@ #define MSM_GSBI9_QUP_I2C_BUS_ID 2 #define MSM_GSBI8_QUP_I2C_BUS_ID 3 #define MSM_GSBI7_QUP_I2C_BUS_ID 4 -#define MSM_GSBI12_QUP_I2C_BUS_ID 5 +#define MSM_GSBI10_QUP_I2C_BUS_ID 5 +#define MSM_GSBI12_QUP_I2C_BUS_ID 10 #define MSM_SSBI1_I2C_BUS_ID 6 #define MSM_SSBI2_I2C_BUS_ID 7 #define MSM_SSBI3_I2C_BUS_ID 8 diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h index c4f57ab6275..830e998a89a 100644 --- a/arch/arm/mach-msm/devices.h +++ b/arch/arm/mach-msm/devices.h @@ -120,6 +120,7 @@ extern struct platform_device msm_gsbi4_qup_i2c_device; extern struct platform_device msm_gsbi7_qup_i2c_device; extern struct platform_device msm_gsbi8_qup_i2c_device; extern struct platform_device msm_gsbi9_qup_i2c_device; +extern struct platform_device msm_gsbi10_qup_i2c_device; extern struct platform_device msm_gsbi12_qup_i2c_device; extern struct platform_device msm8625_device_qup_i2c_gsbi0; diff --git a/arch/arm/mach-msm/gpiomux-8x60.c b/arch/arm/mach-msm/gpiomux-8x60.c index 9318986af1b..8373176e9c3 100644 --- a/arch/arm/mach-msm/gpiomux-8x60.c +++ b/arch/arm/mach-msm/gpiomux-8x60.c @@ -343,11 +343,36 @@ static struct gpiomux_setting tma_active = { .pull = GPIOMUX_PULL_UP, }; +static struct gpiomux_setting ts_active = { + .func = GPIOMUX_FUNC_GPIO, + .drv = GPIOMUX_DRV_8MA, + .pull = GPIOMUX_PULL_NONE, +}; + +static struct gpiomux_setting ts_active1 = { + .func = GPIOMUX_FUNC_1, + .drv = GPIOMUX_DRV_8MA, + .pull = GPIOMUX_PULL_NONE, +}; + + static struct gpiomux_setting ts_suspended = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; +static struct gpiomux_setting ts_suspended1 = { + .func = GPIOMUX_FUNC_GPIO, + .drv = GPIOMUX_DRV_2MA, + .pull = GPIOMUX_PULL_NONE, +}; +static struct gpiomux_setting ts_suspended2 = { + .func = GPIOMUX_FUNC_GPIO, + .drv = GPIOMUX_DRV_2MA, + .pull = GPIOMUX_PULL_UP, +}; + + static struct gpiomux_setting mdp_vsync_active_cfg = { .func = GPIOMUX_FUNC_1, @@ -1104,6 +1129,47 @@ static struct msm_gpiomux_config tenderloin_uart_configs[] __initdata = { }, }; +#if defined (CONFIG_TOUCHSCREEN_CY8CTMA395) \ + || defined (CONFIG_TOUCHSCREEN_CY8CTMA395_MODULE) +static struct msm_gpiomux_config tenderloin_ctp_configs[] __initdata = { + { /* GPIO_CTP_RX */ + .gpio = 71, + .settings = { + [GPIOMUX_ACTIVE] = &aux_pcm_active_config, + [GPIOMUX_SUSPENDED] = &ts_suspended, + }, + }, + { /* TS reset pin */ + .gpio = 70, + .settings = { + [GPIOMUX_ACTIVE] = &ts_active, + [GPIOMUX_SUSPENDED] = &ts_suspended1, + }, + }, + { /* GPIO_CTP_WAKE */ + .gpio = 123, + .settings = { + [GPIOMUX_ACTIVE] = &ts_active, + [GPIOMUX_SUSPENDED] = &ts_suspended2, + }, + }, + { /* GPIO_CTP_SCL */ + .gpio = 73, + .settings = { + [GPIOMUX_ACTIVE] = &ts_active1, + [GPIOMUX_SUSPENDED] = &ts_suspended1, + }, + }, + { /* GPIO_CTP_SDA */ + .gpio = 72, + .settings = { + [GPIOMUX_ACTIVE] = &ts_active1, + [GPIOMUX_SUSPENDED] = &ts_suspended1, + }, + }, +}; +#endif + #ifdef CONFIG_MSM_GSBI9_UART static struct msm_gpiomux_config msm8x60_charm_uart_configs[] __initdata = { { /* UART9DM RX */ @@ -2231,6 +2297,7 @@ struct msm_gpiomux_configs /* -JCS update for tenderloin TODO */ tenderloin_gpiomux_cfgs[] __initdata = { {tenderloin_gsbi_configs, ARRAY_SIZE(tenderloin_gsbi_configs)}, {tenderloin_uart_configs, ARRAY_SIZE(tenderloin_uart_configs)}, + {tenderloin_ctp_configs, ARRAY_SIZE(tenderloin_ctp_configs)}, {msm8x60_pmic_configs, ARRAY_SIZE(msm8x60_pmic_configs)}, {tenderloin_lcdc_configs, ARRAY_SIZE(tenderloin_lcdc_configs)}, #if 0 diff --git a/arch/arm/mach-msm/include/mach/msm_hsuart.h b/arch/arm/mach-msm/include/mach/msm_hsuart.h new file mode 100644 index 00000000000..cdb1a2d883f --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_hsuart.h @@ -0,0 +1,219 @@ +/* + * include/asm/arch-msm/msm_hsuart.h + * + * Copyright (C) 2008 Palm, Inc. + * Author: Amir Frenkel + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_HSUART_H__ +#define __MSM_HSUART_H__ + +struct buffer_item { + /* + * linked list member so we can chain the buffer into a linked list. + */ + struct list_head list_item; + + /* In bytes */ + size_t size; + + /* + * The physical start addr of the DMA buffer. + */ + dma_addr_t phys_addr; + + /* + * Pointer to the virtual address that correspond to the phys mem + * allocated for the DMA + */ + char* p_vaddr; + + int read_index; + int write_index; + int fullness; + +}; + +struct rxtx_lists { + /* + * Currently, all the lists use the same lock + * as a future enhancement, we may consider having lock per + * list + */ + spinlock_t lock; + + /* + * List of empty buffers, can be filled with data. + */ + struct list_head empty; + + /* + * List of full buffers, contains data which was not + * consumed yet. + */ + struct list_head full; + + /* + * List of buffer that are currently used by the underlying + * platform - uart driver to rx or tx data. + */ + struct list_head used; + + /* + * pool of buffers to be used in empty and full list, + * these buffers point to the underlying memory in which Rx + * will be read into or Tx will be writing from. + */ + struct buffer_item* p_buffer_pool; + + /* + * The number of vacant buffers is the list. + */ + int vacant_buffers; + + /* + * The total number of buffers. + */ + int buffer_cnt; +}; + + + +/* + * The following flags indicates features/configuration + * option for the HSUART context + */ +#define HSUART_CFG_RX_PIO (1 << 0) +#define HSUART_CFG_TX_PIO (1 << 1) +#define HSUART_CFG_RX_DM (1 << 2) +#define HSUART_CFG_TX_DM (1 << 3) +#define HSUART_CFG_SCHED_RT (1 << 4) + +/* + * Generic configuration parameters. + */ +struct hsuart_config { + /* + * The uart port flags. See definition above + */ + u32 flags; + + /* + * The uart port number to read/write from/to. + */ + int port_id; + + /* + * The uart port max packet size. + */ + int max_packet_size; + + /* + * The uart port min packet size + */ + int min_packet_size; + + /* + * The uart port tx latency in bytes at current speed + */ + int rx_latency; + + /* + * The uart callback for pin muxing + */ + int (*p_board_pin_mux_cb) ( int on ); + int (*p_board_gsbi_config_cb) ( void ); + + int (*p_board_rts_pin_deassert_cb) ( int deassert ); + /* + * Callback Zone... + */ + struct { + struct buffer_item* (*p_cbk)(void* p_data, int free_bytes); + void* p_data; + } rx_get_buffer; + + struct { + void (*p_cbk) (void* p_data, struct buffer_item* p_buffer); + void* p_data; + } rx_put_buffer; + + struct { + struct buffer_item* (*p_cbk)(void* p_data); + void* p_data; + } tx_get_buffer; + + struct { + void (*p_cbk) (void* p_data, struct buffer_item* p_buffer, int transaction_size); + void* p_data; + } tx_put_buffer; +}; + +int +msm_hsuart_write(int context_id_handle); + +int +msm_hsuart_read(int context_id_handle, struct buffer_item* io_p_buffer); + +int +msm_hsuart_open_context(struct hsuart_config* io_p_cfg, + int* o_p_context_id_handle); + +int +msm_hsuart_close_context(int context_id_handle); + +int +msm_hsuart_suspend(int context_id_handle); + +int +msm_hsuart_resume(int context_id_handle); + +int +msm_hsuart_register_rx_put_buffer( + int context_id_handle, + void (*p_cbk)(void* p_data, struct buffer_item* p_buffer), + void* p_data); +int +msm_hsuart_register_rx_get_buffer( + int context_id_handle, + struct buffer_item* (*p_cbk)(void* p_data, int free_bytes), + void* p_data); + +int +msm_hsuart_register_tx_put_buffer( + int context_id_handle, + void (*p_cbk)(void* p_data, struct buffer_item* p_buffer, int transaction_size), + void* p_data); +int +msm_hsuart_register_tx_get_buffer( + int context_id_handle, + struct buffer_item* (*p_cbk)(void* p_data), + void* p_data); +/* + * The possible values for 'flow' parameter are defined in hsuart.h + */ +int +msm_hsuart_set_flow( + int context_id_handle, + int flow); +int +msm_hsuart_set_parity( + int context_id_handle, + int parity); +int +msm_hsuart_set_baud_rate(int context_id_handle, + uint32_t baud_rate); + +int +msm_hsuart_rx_fifo_has_bytes( int context_id_handle); + +#endif /* __MSM_HSUART_H__ */ diff --git a/arch/arm/mach-msm/include/mach/msm_uart_dm.h b/arch/arm/mach-msm/include/mach/msm_uart_dm.h new file mode 100644 index 00000000000..ff6fd0d2682 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_uart_dm.h @@ -0,0 +1,604 @@ +/* + * arch/arm/mach-msm/msm_uart_dm.h + * + * Copyright (C) 2008 Palm, Inc. + * Author: Amir Frenkel + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_UART_DM_H__ +#define __MSM_UART_DM_H__ + +/* Register */ +#define UART_DM_MR1 (0x0000) + +/* BitMask of the fields */ +#define UART_DM_MR1_AUTO_RFR_LEVEL0 (0x3F) +#define UART_DM_MR1_AUTO_RFR_LEVEL1 (0xFFFFFF00) +#define UART_MR1_RX_RDY_CTL (1 << 7) +#define UART_MR1_CTS_CTL (1 << 6) + +/* Register */ +#define UART_DM_MR2 (0x0004) + +/* Bit fields value definition and masks */ +#define UART_DM_MR2_ERROR_MODE (1 << 6) + +#define UART_DM_MR2_BITS_PER_CHAR_MASK (0x3 << 4) +#define UART_DM_MR2_BITS_PER_CHAR_5 (0x0 << 4) +#define UART_DM_MR2_BITS_PER_CHAR_6 (0x1 << 4) +#define UART_DM_MR2_BITS_PER_CHAR_7 (0x2 << 4) +#define UART_DM_MR2_BITS_PER_CHAR_8 (0x3 << 4) + +#define UART_DM_MR2_STOP_BIT_LEN_MASK (0x3 << 2) +#define UART_DM_MR2_STOP_BIT_LEN_0_563 (0x0 << 2) +#define UART_DM_MR2_STOP_BIT_LEN_ONE (0x1 << 2) +#define UART_DM_MR2_STOP_BIT_LEN_1_563 (0x2 << 2) +#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2) + +#define UART_DM_MR2_PARITY_MODE_MASK (0x3 << 0) +#define UART_DM_MR2_PARITY_MODE_NONE (0x0 << 0) +#define UART_DM_MR2_PARITY_MODE_ODD (0x1 << 0) +#define UART_DM_MR2_PARITY_MODE_EVEN (0x2 << 0) +#define UART_DM_MR2_PARITY_MODE_SPACE (0x3 << 0) + + +/* Clk selection Register */ +#define UART_DM_CSR (0x0008) +/* + * The bellow values are used to divide the 'uart fundemntal clk' + * by X - which is the number specificed in the enum + * TX - CSR clk sel is in bits 0-3 + * Rx - CSR clk sel is in bits 4-7 + */ +#define UART_DM_CSR_TX_DIV_1 (0xF) +#define UART_DM_CSR_TX_DIV_2 (0xE) +#define UART_DM_CSR_TX_DIV_3 (0xD) +#define UART_DM_CSR_TX_DIV_4 (0xC) +#define UART_DM_CSR_TX_DIV_6 (0xB) +#define UART_DM_CSR_TX_DIV_8 (0xA) +#define UART_DM_CSR_TX_DIV_12 (0x9) +#define UART_DM_CSR_TX_DIV_16 (0x8) +#define UART_DM_CSR_TX_DIV_24 (0x7) +#define UART_DM_CSR_TX_DIV_32 (0x6) +#define UART_DM_CSR_TX_DIV_48 (0x5) +#define UART_DM_CSR_TX_DIV_96 (0x4) +#define UART_DM_CSR_TX_DIV_192 (0x3) +#define UART_DM_CSR_TX_DIV_384 (0x2) +#define UART_DM_CSR_TX_DIV_768 (0x1) +#define UART_DM_CSR_TX_DIV_1536 (0x0) + +/* + * + */ +#define UART_DM_CSR_RX_DIV_1 (0xF << 4) +#define UART_DM_CSR_RX_DIV_2 (0xE << 4) +#define UART_DM_CSR_RX_DIV_3 (0xD << 4) +#define UART_DM_CSR_RX_DIV_4 (0xC << 4) +#define UART_DM_CSR_RX_DIV_6 (0xB << 4) +#define UART_DM_CSR_RX_DIV_8 (0xA << 4) +#define UART_DM_CSR_RX_DIV_12 (0x9 << 4) +#define UART_DM_CSR_RX_DIV_16 (0x8 << 4) +#define UART_DM_CSR_RX_DIV_24 (0x7 << 4) +#define UART_DM_CSR_RX_DIV_32 (0x6 << 4) +#define UART_DM_CSR_RX_DIV_48 (0x5 << 4) +#define UART_DM_CSR_RX_DIV_96 (0x4 << 4) +#define UART_DM_CSR_RX_DIV_192 (0x3 << 4) +#define UART_DM_CSR_RX_DIV_384 (0x2 << 4) +#define UART_DM_CSR_RX_DIV_768 (0x1 << 4) +#define UART_DM_CSR_RX_DIV_1536 (0x0 << 4) + + +/* + * TX - CSR clk sel is in bits 0-3 + * Rx - CSR clk sel is in bits 4-7 + */ +#define UART_DM_CSR_RX_OFF ( + + +/* Command Register */ +#define UART_DM_CR (0x0010) + +/* + * CHANNEL COMMAND bit definition + */ +#define UART_DM_CR_CMD_NULL (0x0 << 4) +#define UART_DM_CR_CMD_RESET_RX (0x1 << 4) +#define UART_DM_CR_CMD_RESET_TX (0x2 << 4) +/* reset error status */ +#define UART_DM_CR_CMD_RESET_ERR (0x3 << 4) +/* reset break change interrupt */ +#define UART_DM_CR_CMD_RESET_BCI (0x4 << 4) +#define UART_DM_CR_CMD_START_BREAK (0x5 << 4) +#define UART_DM_CR_CMD_STOP_BREAK (0x6 << 4) +/* clear CTS interrupt */ +#define UART_DM_CR_CMD_CLR_CTS (0x7 << 4) +/* Reset stale interrupt */ +#define UART_DM_CR_CMD_CLR_STALE (0x8 << 4) +/* enable 16x mode */ +#define UART_DM_CR_CMD_PACKET_MODE (0x9 << 4) +/* disable 16x mode */ +#define UART_DM_CR_CMD_RESET_MODE (0xC << 4) +/* assert rx ready (active low) */ +#define UART_DM_CR_CMD_SET_RFR (0xD << 4) +/* deassert receive ready */ +#define UART_DM_CR_CMD_RESET_RFR (0xE << 4) +/* Clear TX err interrupt */ +#define UART_DM_CR_CMD_CLR_TX_ERR (0x10 << 4) +/* Clear TX done interrupt */ +#define UART_DM_CR_CMD_CLR_TX_DONE (0x11 << 4) + +#define UART_DM_CR_RX_ENABLE (0x1 << 0) +#define UART_DM_CR_RX_DISABLE (0x1 << 1) +#define UART_DM_CR_TX_ENABLE (0x1 << 2) +#define UART_DM_CR_TX_DISABLE (0x1 << 3) + +/* + * GENERAL command definitions + */ +#define UART_DM_CR_GCMD_NULL (0 << 8) +#define UART_DM_CR_GCMD_CR_PROTECTION_EN (1 << 8) +#define UART_DM_CR_GCMD_CR_PROTECTION_DIS (2 << 8) +#define UART_DM_CR_GCMD_RESET_TX_RDY_INT (3 << 8) +#define UART_DM_CR_GCMD_SW_FORCE_STALE (4 << 8) +#define UART_DM_CR_GCMD_EN_STALE_EVENT (5 << 8) +#define UART_DM_CR_GCMD_DIS_STALE_EVENT (6 << 8) + +/* + * Interrupt Mask Register + */ +#define UART_DM_IMR (0x0014) + +/* IMR bit definitions */ + +/* Only relevant in UIM/SIM mode */ +#define UART_DM_IMR_TX_DONE (1 << 9) +/* Only used in UIM mode */ +#define UART_DM_IMR_TX_ERROR (1 << 8) + +#define UART_DM_IMR_TX_RDY (1 << 7) +/* Indicates the current state of CTS, never generates interrupt */ +#define UART_DM_IMR_CURRENT_CTS (1 << 6) +#define UART_DM_IMR_DELTA_CTS (1 << 5) +/* Set when Rx FIFO is above watermark value*/ +#define UART_DM_IMR_RX_LEV (1 << 4) + +/* Stale event */ +#define UART_DM_IMR_RX_STALE (1 << 3) + +#define UART_DM_IMR_RX_BREAK (1 << 2) +#define UART_DM_IMR_RX_HUNT (1 << 1) + +/* Set when The Tx FIFO is below or eq the watermark value */ +#define UART_DM_IMR_TX_LEV (1 << 0) + +/* + * Interrupt Program Register + */ +#define UART_DM_IPR (0x0018) + +/* IPR bit definitions */ +#define UART_DM_IPR_STALE_TIMEOUT_MSB_OFFSET 7 +#define UART_DM_IPR_STALE_TIMEOUT_LSB_SIZE 5 + +#define UART_DM_IPR_STALE_TIMEOUT_MSB_MSK (0xFFFFFF80) +#define UART_DM_IPR_STALE_TIMEOUT_LSB_MSK (0x1F) +#define UART_DM_IPR_SAMPLE_DATA (0x40) + + +/* + * Tx FIFO watermark register + */ +#define UART_DM_TFWR (0x001C) + +/* + * Rx FIFO watermark register + */ +#define UART_DM_RFWR (0x0020) + +/* + * Hunt character Register + */ +#define UART_DM_HCR (0x0024) + + +/* + * Write: In DM mode used as the handshake size (in chars) + * between the FIFO and DM. + * Read: Number of bytes received since the last xfer. + */ +#define UART_DM_DMRX (0x0034) + + + +/* + * Bits 23:0 - Hold the number of valid characters received + * since the end of the last rx transaction. + * End of rx transaction is defined as: + * - The number of bytes written into DMRX register has been read. + * or + * - Stale event occured. + */ +#define UART_DM_RX_TOTAL_SNAP (0x0038) + +/* + * Bits 31:10 TX_FIFO_STATE_MSB + * Bits 9:7 TX_BUFFER_STATE + * Bits 6:0 TX_FIFO_STATE_LSB + * The number if valid entries (dword) in the RX-FIFO + */ +#define UART_DM_TXFS (0x004C) + +#define UART_DM_TX_FIFO_STATE_LSB (0x7F) +#define UART_DM_TX_FIFO_STATE_MSB (0xFFFFFC00) +#define UART_DM_TX_FIFO_STATE_SHIFT (10) + +/* + * Bits 31:10 RX_FIFO_STATE_MSB + * Bits 9:7 RX_BUFFER_STATE + * Bits 6:0 RX_FIFO_STATE_LSB + * The number if valid entries (dword) in the RX-FIFO + */ +#define UART_DM_RXFS (0x0050) + +#define UART_DM_RX_FIFO_STATE_LSB (0x7F) +#define UART_DM_RX_FIFO_STATE_MSB (0xFFFFFC00) +#define UART_DM_RX_FIFO_STATE_SHIFT (10) +#define UART_DM_RX_BUFFER_STATE_SHIFT (7) +#define UART_DM_RX_BUFFER_STATE_MASK ( 7 << UART_DM_RX_BUFFER_STATE_SHIFT) + +/* + * UART_DM_DMEN + * Data mover enable. + */ +#define UART_DM_DMEN (0x003C) + +/* + * UART_DM_DMEN bit definitions + */ +#define UART_DM_DMEN_RX_DM_EN (1 << 1) +#define UART_DM_DMEN_RX_DM_DIS (0 << 1) +#define UART_DM_DMEN_TX_DM_EN (1 << 0) +#define UART_DM_DMEN_TX_DM_DIS (0 << 0) + +/* + * The total number of chars for transmission_complete_intr + */ +#define UART_DM_NUM_CHARS_FOR_TX (0x0040) + +/* + * The Status Register + */ +#define UART_DM_SR (0x0008) +#define UART_DM_SR_HUNT_CHAR (1 << 7) +#define UART_DM_SR_RX_BREAK (1 << 6) +#define UART_DM_SR_PAR_FRAME_ERR (1 << 5) +#define UART_DM_SR_OVERRUN (1 << 4) +#define UART_DM_SR_TX_EMPTY (1 << 3) +#define UART_DM_SR_TX_READY (1 << 2) +#define UART_DM_SR_RX_FULL (1 << 1) +#define UART_DM_SR_RX_READY (1 << 0) + +/* + * Masked Interrupt status register + */ +#define UART_DM_MISR (0x0010) + +/* + * Interrupt status register + */ +#define UART_DM_ISR (0x0014) + +/* Bit fields definition of ISR */ +#define UART_DM_ISR_TX_DONE (1 << 9) +#define UART_DM_ISR_TX_ERR (1 << 8) +#define UART_DM_ISR_TX_READY (1 << 7) +#define UART_DM_ISR_CURRENT_CTS (1 << 6) +#define UART_DM_ISR_DELTA_CTS (1 << 5) +#define UART_DM_ISR_RX_LEV (1 << 4) +#define UART_DM_ISR_RX_STALE (1 << 3) +#define UART_DM_ISR_RX_BREAK (1 << 2) +#define UART_DM_ISR_RX_HUNT (1 << 1) +#define UART_DM_ISR_TX_LEV (1 << 0) + + +/* + * *** Write *** + * Addr 0x70-0x80 is used for TX FIFO + */ +#define UART_DM_TF (0x0070) +#define UART_DM_TF2 (0x0074) +#define UART_DM_TF3 (0x0078) +#define UART_DM_TF4 (0x007C) + +/* + * *** Read *** + * Addr 0x70-0x80 is used for RX FIFO + */ +#define UART_DM_RF (0x0070) +#define UART_DM_RF2 (0x0074) +#define UART_DM_RF3 (0x0078) +#define UART_DM_RF4 (0x007C) + + +#define UART_DM_MODE_RX_PIO (1 << 0) +#define UART_DM_MODE_RX_DM (1 << 1) + +struct msm_uart_port { + char name[16]; + struct clk* p_clk; + struct clk* p_pclk; + const char* p_clk_name; + const char* p_pclk_name; + + int flags; + /* + * Pointer to the device structure from the board file + */ + struct device* p_device; + + /* + * id - used for debug and identification purpose + */ + u32 id; + + /* + * The IRQ number to be used for UART + */ + /* TODO:move irq to generic generic_uart_port structure.*/ + unsigned int irq; + + /* + * The UART clock rate + */ + unsigned int clk_rate; + + /* TODO:move fifo size to generic generic_uart_port structure.*/ + /* + * The size of the rx/tx fifo in bytes, we assume same size for + * rx and tx + */ + int rx_fifo_size; + int tx_fifo_size; + + /* + * The memory base address to be used for uart access in the driver. + * this pointer was generated by mapping a physical memory mapped area. + */ + unsigned char __iomem* p_membase; + + /* + * The base physical address for uart mem-mapped registers, should be + * mapped into p_membase to be able to use it by the driver + */ + unsigned int mapbase; + + /* + * The size in bytes of the UART memory mapped area + */ + unsigned int mem_size; + + /* + * port lock + */ + spinlock_t lock; + + /* + * The desired port baud rate + */ + unsigned int baud_rate; + + /* + * Port parity data + */ + uint32_t parity_data; + /* + * Write-only registers, hence we keep a shadow register to + * tell us the last value written to the HW was + */ + u32 imr; + + /* + * RX DMA related structure + */ + struct { + + dmov_box *command_ptr; + dma_addr_t command_ptr_phys; + u32 *command_ptr_ptr; + dma_addr_t command_ptr_ptr_phys; + + } rx_dm; + + int dma_rx_channel; + int dma_tx_channel; + + int dma_rx_crci; + int dma_tx_crci; + + int rx_latency; + + int rx_flow_ctl; + int rx_flow_state; + + int tx_flow_ctl; + + struct msm_dmov_cmd rx_xfer; + + /* + * callback zone... + */ + int (*p_board_pin_mux_cb) ( int on ); + int (*p_board_rts_pin_deassert_cb) ( int deassert ); + + void (* p_tx_level_callback)(void *pdata); + void* p_tx_level_data; + void (* p_tx_rdy_callback)(void *pdata); + void* p_tx_rdy_data; + + void (* p_rx_stale_callback)(void *pdata); + void* p_rx_stale_data; + void (* p_rx_level_callback)(void *pdata); + void* p_rx_level_data; + + void (* p_rx_dm_callback)(void *pdata); + void* p_rx_dm_data; + +}; + +#define generic_uart_port msm_uart_port + +#define GEN_UART_TO_MSM(generic_uart_port) \ + ((struct msm_uart_port *) generic_uart_port) + +/* + * Generic UART configuration parameters. + */ +struct generic_uart_config { + /* + * The phys UART port ID. + * Correspond to the UART port ID in the board file. + */ + int port_id; + + int flags; + + int rx_latency; + + int (*p_board_pin_mux_cb) ( int on ); + int (*p_board_config_gsbi_cb) ( void ); + int (*p_board_rts_pin_deassert_cb) ( int deassert ); +}; + +typedef enum { + MSM_UARTDM_PARITY_ODD = 1, + MSM_UARTDM_PARITY_EVEN, + MSM_UARTDM_PARITY_NONE +} msm_uartdm_parity_t; +int +msm_uartdm_port_open(struct generic_uart_config* i_p_config, + struct generic_uart_port** o_pp_port); + +int +msm_uartdm_port_close(struct generic_uart_port* io_p_port); + +int +msm_uartdm_port_suspend(struct generic_uart_port* io_p_port); + +int +msm_uartdm_port_resume(struct generic_uart_port* io_p_uart_port); + +int +msm_uartdm_port_init(struct generic_uart_port* io_p_uart_port); + +void +msm_uartdm_enable_tx_level(struct generic_uart_port* port); + +int +msm_uartdm_disable_tx_level(struct generic_uart_port* port); + +int +msm_uartdm_disable_tx_rdy(struct generic_uart_port* port); + +void +msm_uartdm_enable_tx_rdy(struct generic_uart_port* port); + +void +msm_uartdm_enable_rx_irqs(struct generic_uart_port* port, int enable_stale); + +int +msm_uartdm_disable_rx_irqs(struct generic_uart_port* port); + +void +msm_uartdm_enable_tx(struct generic_uart_port* port); + +void +msm_uartdm_disable_tx(struct generic_uart_port* port); + +void +msm_uartdm_reset_tx(struct generic_uart_port* port); + +void +msm_uartdm_enable_rx(struct generic_uart_port* port); + +void +msm_uartdm_disable_rx(struct generic_uart_port* port); + +void +msm_uartdm_reset_rx(struct generic_uart_port* port); + +int +msm_uartdm_set_tx_level_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata); +int +msm_uartdm_set_tx_rdy_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata); +int +msm_uartdm_set_baud_rate(struct generic_uart_port* i_p_port, + uint32_t baud_rate); + +int +msm_uartdm_set_rx_level_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata); +int +msm_uartdm_set_rx_dm_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata); + +int +msm_uartdm_set_rx_stale_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata); +int +msm_uartdm_tx_ready(struct generic_uart_port* i_p_port); + +void +msm_uartdm_config_write_size(struct generic_uart_port* i_p_port, int num_bytes); + +void +msm_uartdm_config_read_size(struct generic_uart_port* i_p_port, int num_bytes); + +int +msm_uartdm_get_received_byte_cnt(struct generic_uart_port* i_p_port); +int +msm_uartdm_get_rx_fifo_fullness(struct generic_uart_port* i_p_port, int *o_p_packing_bytes); + +void +msm_uartdm_send_dword(struct generic_uart_port* i_p_port, unsigned int data); + +int +msm_uartdm_rx_ready(struct generic_uart_port* i_p_port); +unsigned int +msm_uartdm_get_dword(struct generic_uart_port* i_p_port); + +int +msm_uartdm_rx_dm_config(struct generic_uart_port* i_p_port, uint32_t dst_phys_addr, size_t read_size ); + +void +msm_uartdm_rx_dm_flush(struct generic_uart_port* i_p_port); + +// TODO: I am here for debug, remove me :) +unsigned int msm_uartdm_read_reg(struct generic_uart_port* i_p_port, int addr); + +void +msm_uartdm_set_rx_flow(struct generic_uart_port* i_p_port, uint32_t flow_ctl, uint32_t flow_state); + +void +msm_uartdm_set_tx_flow(struct generic_uart_port* i_p_port, uint32_t flow_ctl); + +void +msm_uartdm_set_parity(struct generic_uart_port* i_p_port, + msm_uartdm_parity_t parity); + +#endif /* __MSM_UART_DM_H__ */ diff --git a/arch/arm/mach-msm/msm_hsuart.c b/arch/arm/mach-msm/msm_hsuart.c new file mode 100644 index 00000000000..6208ea46181 --- /dev/null +++ b/arch/arm/mach-msm/msm_hsuart.c @@ -0,0 +1,2390 @@ +/* + * arch/arm/mach-msm/hsuart.c - Driver for data mover + * Data mover can be either DMA client or pio workerthread + * that emulates the DMA client. + * + * Copyright (C) 2008 Palm, Inc. + * Author: Amir Frenkel + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Debug related macros and defs + */ +#define DRIVER_NAME "msm_hsuart" +#define DRIVER_VERSION (0x100) + + +/* + * Debug ctl for the module + */ +#define MSM_HSUART_FEATURE_PRINT_RX_DATA 0 +#define MSM_HSUART_FEATURE_PRINT_TX_DATA 0 + +#define MSM_HSUART_DEBUG_ENABLE 0 +#define MSM_HSUART_FUNC_LOG_ENABLE 0 + +#if MSM_HSUART_DEBUG_ENABLE +#define MSM_HSUART_DEBUG(args...) (printk(KERN_DEBUG args)) +#define MSM_HSUART_INFO(args...) (printk(KERN_INFO args)) +#define MSM_HSUART_ERR(args...) (printk(KERN_ERR args)) +#else +#define MSM_HSUART_INFO(args...) +#define MSM_HSUART_DEBUG(args...) +#define MSM_HSUART_ERR(args...) +#endif // MSM_HSUART_DEBUG_ENABLE + +#if MSM_HSUART_FUNC_LOG_ENABLE +#define MSM_HSUART_ENTER() (printk(KERN_INFO"%s: %s, %u [msec] enter\n",\ + DRIVER_NAME, __PRETTY_FUNCTION__, jiffies_to_msecs(jiffies))) +#if 0 + i(printk(KERN_INFO"%s: %s, enter\n",\ + DRIVER_NAME, __PRETTY_FUNCTION__)) +#endif +#define MSM_HSUART_EXIT() (printk(KERN_INFO"%s: %s, %u [msec] exit\n",\ + DRIVER_NAME, __PRETTY_FUNCTION__, jiffies_to_msecs(jiffies))) +#if 0 + (printk(KERN_INFO"%s: %s, exit\n",\ + DRIVER_NAME, __PRETTY_FUNCTION__)) +#endif +#else +#define MSM_HSUART_ENTER() +#define MSM_HSUART_EXIT() +#endif + +/* + * definition of data-types and data-structures for hsuart... + */ + +#define RX_MODE_PIO(p_context) (p_context->flags & HSUART_CFG_RX_PIO ) +#define RX_MODE_DM(p_context) (p_context->flags & HSUART_CFG_RX_DM ) +#define RX_MODE_DM_FIXED_PACKET_LEN(p_context) ((p_context->flags & HSUART_CFG_RX_DM ) \ + && (p_context->rx.max_packet_size == p_context->rx.min_packet_size)) + +#define TX_MODE_PIO(p_context) (p_context->flags & HSUART_CFG_TX_PIO ) +#define TX_MODE_DM(p_context) (p_context->flags & HSUART_CFG_TX_DM ) + +#define MSM_UART_DEBUG_TIMING 0 +#define MSM_UART_DEBUG_TIMING_PORT 1 + +#if MSM_UART_DEBUG_TIMING +#include +static char* dbg_strings[] = { "msm uart get buff enter", + "msm uart buff exit", + "msm uart buff enter", + "msm uart buff exit", + + "msm uart write enter", + "msm uart write exit", + "msm uart read enter", + "msm uart read exit", + + "msm uart pio stale enter", + "msm uart pio stale exit", + + "msm uart pio rx level enter", + "msm uart pio rx level exit", + + "msm uart dm stale enter", + "msm uart dm stale exit", + + "msm uart dm cb enter", + "msm uart dm cb exit", + + "msm uart dm wq enter", + "msm uart dm wq exit", + + "msm uart open context enter", + "msm uart open context exit", + + "msm uart close context enter", + "msm uart close context exit", + + "msm uart dm rx config enter", + "msm uart dm rx config exit", + "msm uart set baud rate enter", + "msm uart set baud rate exit", + + "msm uart write worker enter", + "msm uart write worker exit", + + "msm uart suspend enter", + "msm uart suspend exit", + + "msm uart resume enter", + "msm uart resume exit", + + "msm uart tx pio event", + }; + +#define MSM_UART_GB_ENT 0 +#define MSM_UART_GB_EXT 1 +#define MSM_UART_PB_ENT 2 +#define MSM_UART_PB_EXT 3 +#define MSM_UART_WRITE_ENT 4 +#define MSM_UART_WRITE_EXT 5 +#define MSM_UART_READ_ENT 6 +#define MSM_UART_READ_EXT 7 +#define MSM_UART_PIO_STALE_ENT 8 +#define MSM_UART_PIO_STALE_EXT 9 +#define MSM_UART_PIO_RX_LEVEL_ENT 10 +#define MSM_UART_PIO_RX_LEVEL_EXT 11 +#define MSM_UART_DM_STALE_ENT 12 +#define MSM_UART_DM_STALE_EXT 13 +#define MSM_UART_DM_CB_ENT 14 +#define MSM_UART_DM_CB_EXT 15 +#define MSM_UART_DM_WQ_ENT 16 +#define MSM_UART_DM_WQ_EXT 17 +#define MSM_UART_OPEN_CONTEXT_ENT 18 +#define MSM_UART_OPEN_CONTEXT_EXT 19 +#define MSM_UART_CLOSE_CONTEXT_ENT 20 +#define MSM_UART_CLOSE_CONTEXT_EXT 21 +#define MSM_UART_DM_CONFIG_RX_ENT 22 +#define MSM_UART_DM_CONFIG_RX_EXT 23 +#define MSM_UART_SET_BAUD_RATE_ENT 24 +#define MSM_UART_SET_BAUD_RATE_EXT 25 +#define MSM_UART_WRITE_WORKER_ENT 26 +#define MSM_UART_WRITE_WORKER_EXT 27 +#define MSM_UART_SUSPEND_ENT 28 +#define MSM_UART_SUSPEND_EXT 29 +#define MSM_UART_RESUME_ENT 30 +#define MSM_UART_RESUME_EXT 31 +#define MSM_UART_TX_PIO_EVENT 32 + + +#define MSM_HSUART_LOG(p_context, eventid, arg1, arg2 ) \ + if ( p_context->p_uart_port->id == MSM_UART_DEBUG_TIMING_PORT ) { \ + hres_event(dbg_strings[eventid], (u32) arg1, (u32) arg2 ); \ + } \ + +#else + #define MSM_HSUART_LOG(args...) +#endif +/* + * TODO:MSM_UARTDM, right now each context can support read and write, consider changing it and have context per action to simplify the code + */ +struct hsuart_worker { + /* + * The work-queue for this context + */ + struct workqueue_struct* p_worker; + struct work_struct worker; + char name[20]; + + /* + * Indicate the completion of single xfer. + */ + struct completion xfer_done; +}; + +/* +* RX transaction state +* Idle - No DM transaction configured +* DM in progress - DM request has been submitted +* DM flushing - flushing DM request +*/ +#define MSM_HSUART_RX_STATE_IDLE 0 +#define MSM_HSUART_RX_STATE_DM_IN_PROGRESS 1 +#define MSM_HSUART_RX_STATE_DM_FLUSHING 2 + +/* +* TX transaction state +* Idle - No transaction configured +* In progress - executing transfer +* Wait completion - all the data was submitted to HW FIFO , now waiting for the FIFO to be emptied +*/ +#define MSM_HSUART_TX_STATE_IDLE 0 +#define MSM_HSUART_TX_STATE_IN_PROGRESS 1 +#define MSM_HSUART_TX_STATE_WAIT_COMPLETION 2 + +/* +* Hsuart context state +* Active - Active state +* Suspending - suspend ongoing +* Suspended - suspend complete +*/ +#define MSM_HSUART_STATE_ACTIVE 0 +#define MSM_HSUART_STATE_SUSPENDING 1 +#define MSM_HSUART_STATE_SUSPENDED 2 + +#define MSM_HSUART_FLOW_DIR_RX 0 +#define MSM_HSUART_FLOW_DIR_TX 1 + +#define MSM_HSUART_FLOW_STATE_ASSERT 0 +#define MSM_HSUART_FLOW_STATE_DEASSERT 1 + +struct hsuart_context { + /* + * The unique ID of the context + */ + int context_id; + + int flags; + + /* + * Lock to protect the context + */ + spinlock_t lock; + /* + * Work related stuff + */ + struct hsuart_worker reader; + struct hsuart_worker writer; + + /* + * Rx/Tx related buffer management lists. + */ + struct rxtx_lists lists; + + /* + * TODO:MSM_HSUART Since I am lazy, added p_buffer to point at the only buffer + * we now hold when reading....should move it to the above member which support list of buffers.... + * When switching to use the rxtx_lists, should also revise the locking to lock the lists when + * accessing list members, whereas right now I am locking the entire context. + */ + struct buffer_item* p_rx_buffer; + struct buffer_item* p_tx_buffer; + /* + * Transaction complete lock + */ + struct mutex read_complete_lock; + struct mutex write_complete_lock; + + /* + * Pointer to uart-port device handle, this port represent the physical + * uart port that is used by the lower hs-uart driver (platform-specific) + */ + struct generic_uart_port* p_uart_port; + + struct { + /* + * Indicates the number of valid bytes that should be recieved + * in the current rx transaction. + */ + int valid_byte_cnt; + /* + * If set a new request to the HW must be issued + * to request the next chunk of data. + */ + int the_end; + + /* + * Indicate the number of bytes that were read + * during the current transaction. + */ + int read_byte_cnt; + + /* + * Max packet size + */ + int max_packet_size; + + /* + * Min packet size + */ + int min_packet_size; + + /* + * Latency in bytes at current speed + */ + int latency; + + /* + * + */ + bool enable_stale; + + /* + * transfer state + */ + int state; + + /* + * rx transaction complete flag + */ + wait_queue_head_t transaction_complete; + /* + * rx flow control + */ + int flow_ctl; + /* + * rx flow state + */ + int flow_state; + } rx; + + struct { + /* + * transaction size in bytes + */ + int transaction_size; + + /* + * transfer state + */ + int state; + + /* + * tx transaction complete flag + */ + wait_queue_head_t transaction_complete; + /* + * tx flow control + */ + int flow_ctl; + } tx; + + /* + * Callback Zone... + */ + int (*p_board_pin_mux_cb) ( int on ); + int (*p_board_rts_pin_deassert_cb) ( int deassert ); + + struct { + struct buffer_item* (*p_cbk)(void* p_data, int free_bytes); + void* p_data; + } rx_get_buffer; + struct { + void (*p_cbk)(void* p_data, struct buffer_item* p_buffer); + void* p_data; + } rx_put_buffer; + struct { + struct buffer_item* (*p_cbk)(void* p_data); + void* p_data; + } tx_get_buffer; + struct { + void (*p_cbk)(void* p_data, struct buffer_item* p_buffer, int transaction_size); + void* p_data; + } tx_put_buffer; + /* + * Misc parameters of the UART context. + */ + msm_uartdm_parity_t parity; + int baud_rate; + + /* + * Context state + */ + int state; +}; + +static int context_cnt = 0; + +/* + * The Rx callback function. The function is in fact doing + * the read from the msm uartdm HW FIFO into the local buffer of the hsuart + * driver + * + * @param[in][out] - p_data - pointer to the hsuart context to work on. + * + * return - None + * + */ + +static int irq_fire_cnt = 0; +void __msm_hsuart_rx_level_cbk(void* p_data) +{ + struct hsuart_context* p_context; + unsigned long flags; + + MSM_HSUART_ENTER(); + if (NULL == p_data) { + MSM_HSUART_ERR("%s: %s, invalid(null) contxt\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + return; + } + p_context = (struct hsuart_context*)p_data; + + MSM_HSUART_LOG(p_context, MSM_UART_PIO_RX_LEVEL_ENT, 0 , 0 ); + + if (p_context->p_rx_buffer) { + spin_lock_irqsave(&(p_context->lock), flags); + if (p_context->rx.the_end) { + spin_unlock_irqrestore(&(p_context->lock), flags); + printk("RX LEVEL IRQ after STALE\n"); + } + else { + p_context->rx.valid_byte_cnt += (4*(msm_uartdm_get_rx_fifo_fullness(p_context->p_uart_port, NULL) - 1)); + + MSM_HSUART_DEBUG("%s valid_cnt %d\n",__FUNCTION__, p_context->rx.valid_byte_cnt); + + irq_fire_cnt++; + msm_uartdm_disable_rx_irqs(p_context->p_uart_port); + spin_unlock_irqrestore(&(p_context->lock), flags); + queue_work(p_context->reader.p_worker, + &(p_context->reader.worker)); + } + } + else { + MSM_HSUART_ERR("%s: %s, no buffer available for this context!\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + } + + MSM_HSUART_LOG(p_context, MSM_UART_PIO_RX_LEVEL_EXT, p_context->rx.valid_byte_cnt , 0 ); + + MSM_HSUART_EXIT(); +} + +/* + * The Rx pio stale callback function. We read the number of valid bytes + * read by the HW and update the context to hold this information, + * then call the standard rx callback to read bytes from the FIFO. + * + * @param[in][out] - p_data - pointer to the hsuart context to work on. + * + * return - None + * + */ +void __msm_hsuart_rx_pio_stale_cbk(void* p_data) +{ + struct hsuart_context* p_context; + unsigned long flags; + + MSM_HSUART_ENTER(); + + if (NULL == p_data) { + MSM_HSUART_ERR("%s: %s, invalid(null) contxt\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + return; + } + p_context = (struct hsuart_context*)p_data; + + MSM_HSUART_LOG(p_context, MSM_UART_PIO_STALE_ENT, 0 , 0 ); + + if (p_context->p_rx_buffer) { + spin_lock_irqsave(&(p_context->lock), flags); + irq_fire_cnt++; + + p_context->rx.valid_byte_cnt = + msm_uartdm_get_received_byte_cnt(p_context->p_uart_port); + + p_context->rx.the_end = 1; + MSM_HSUART_DEBUG("%s valid_cnt %d\n",__FUNCTION__, p_context->rx.valid_byte_cnt); + msm_uartdm_disable_rx_irqs(p_context->p_uart_port); + spin_unlock_irqrestore(&(p_context->lock), flags); + queue_work(p_context->reader.p_worker, + &(p_context->reader.worker)); + } + MSM_HSUART_EXIT(); +} + +/* + * The Rx dm stale callback function. We read the number of valid bytes + * read by the HW and invoke DMA flush function if the number of bytes read + * greater than 0. DMA flush function handles invokes call the standard dma + * rx callback . + * + * @param[in][out] - p_data - pointer to the hsuart context to work on. + * + * return - None + * + */ +void __msm_hsuart_rx_dm_stale_cbk(void* p_data) +{ + struct hsuart_context* p_context; + unsigned long flags; + + MSM_HSUART_ENTER(); + + if (NULL == p_data) { + MSM_HSUART_ERR("%s: %s, invalid(null) contxt\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + return; + } + p_context = (struct hsuart_context*)p_data; + + MSM_HSUART_LOG(p_context, MSM_UART_DM_STALE_ENT, 0 , 0 ); + + if (p_context->p_rx_buffer) { + int byte_cnt; + + spin_lock_irqsave(&(p_context->lock), flags); + irq_fire_cnt++; + + byte_cnt = msm_uartdm_get_received_byte_cnt(p_context->p_uart_port); + + if ( byte_cnt > 0 ) { + + msm_uartdm_disable_rx_irqs(p_context->p_uart_port); + + msm_uartdm_rx_dm_flush(p_context->p_uart_port); + + } else { + printk("RX DM empty STALE\n"); + + } + + spin_unlock_irqrestore(&(p_context->lock), flags); + MSM_HSUART_LOG(p_context, MSM_UART_DM_STALE_EXT, byte_cnt , 0 ); + + } + MSM_HSUART_EXIT(); +} + +/* + * This routine is called when we are done with a DMA transfer or the + * a flush has been sent to the data mover driver. + * + * This routine is registered with Data mover when we set up a Data Mover + * transfer. It is called from Data mover ISR when the DMA transfer is done. + */ +static void __msm_hsuart_rx_dm_cbk(void* p_data) +{ + struct hsuart_context *p_context; + + MSM_HSUART_ENTER(); + + if (NULL == p_data) { + MSM_HSUART_ERR("%s: %s, invalid(null) contxt\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + return; + } + + + p_context = (struct hsuart_context*)p_data; + + MSM_HSUART_LOG(p_context, MSM_UART_DM_CB_ENT, 0 , 0 ); + + msm_uartdm_disable_rx_irqs(p_context->p_uart_port); + + if (p_context->p_rx_buffer) { + irq_fire_cnt++; + p_context->rx.valid_byte_cnt = + msm_uartdm_get_received_byte_cnt(p_context->p_uart_port); + + + MSM_HSUART_DEBUG("%s valid_cnt %d\n",__FUNCTION__, p_context->rx.valid_byte_cnt); + MSM_HSUART_LOG(p_context, MSM_UART_DM_CB_EXT, p_context->rx.valid_byte_cnt , 0 ); + queue_work(p_context->reader.p_worker, + &(p_context->reader.worker)); + } + + MSM_HSUART_EXIT(); +} + +/* + * The Tx RDY callback function. This function is called to indicate that the TX FIFO is empty + * + * @param[in][out] - p_data - pointer to the hsuart context to work on. + * + * return - None + * + */ +void __msm_hsuart_tx_rdy_cbk(void* p_data) +{ + struct hsuart_context* p_context; + + MSM_HSUART_ENTER(); + + if (NULL == p_data) { + MSM_HSUART_ERR("%s: %s, invalid(null) contxt\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + return; + } + p_context = (struct hsuart_context*)p_data; + if (NULL != p_context) { + msm_uartdm_disable_tx_rdy(p_context->p_uart_port); + if ( p_context->state == MSM_HSUART_STATE_SUSPENDING ) { + wake_up(&p_context->rx.transaction_complete); + } + + p_context->tx.state = MSM_HSUART_TX_STATE_IDLE; + queue_work(p_context->writer.p_worker, + &(p_context->writer.worker)); + } + MSM_HSUART_EXIT(); +} + +/* + * The Tx level callback function. This function is called to indicate that the TX FIFO is + * below the configured threshold. + * + * @param[in][out] - p_data - pointer to the hsuart context to work on. + * + * return - None + * + */ +void __msm_hsuart_tx_level_cbk(void* p_data) +{ + struct hsuart_context* p_context; + + MSM_HSUART_ENTER(); + + if (NULL == p_data) { + MSM_HSUART_ERR("%s: %s, invalid(null) contxt\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + return; + } + p_context = (struct hsuart_context*)p_data; + if (NULL != p_context) { + msm_uartdm_disable_tx_level(p_context->p_uart_port); + queue_work(p_context->writer.p_worker, + &(p_context->writer.worker)); + } + MSM_HSUART_EXIT(); +} +/* + * ============= Worker threads =============== + */ + +/** +* +* Worker thread, responsible to write to the UART port indicated by the context +* +* @param[in][out] io_p_work - Pointer to the workqueue. +* +* @return None. +* +*/ +static void +__msm_hsuart_write_worker(struct work_struct* io_p_work) +{ + struct hsuart_worker* p_wr_worker; + struct hsuart_context* p_context; + struct buffer_item* p_buffer = NULL; + unsigned long flags; + unsigned int data; + int ready = 0; + int start_xfer = 0; + + int tx_state ; + + MSM_HSUART_ENTER(); + + p_wr_worker = container_of(io_p_work, struct hsuart_worker, worker); + + p_context = container_of(p_wr_worker, struct hsuart_context, writer); + + + if ( p_context ) { + + + spin_lock_irqsave(&(p_context->lock), flags); + + MSM_HSUART_LOG(p_context, MSM_UART_TX_PIO_EVENT, 1 , p_context->tx.state ); + tx_state = p_context->tx.state; + if ( !p_context->p_tx_buffer && ( p_context->state == MSM_HSUART_STATE_ACTIVE )) { + p_context->p_tx_buffer = p_context->tx_get_buffer.p_cbk( p_context->tx_get_buffer.p_data ); + } + + p_buffer = p_context->p_tx_buffer; + + if ( !p_buffer || tx_state == MSM_HSUART_TX_STATE_WAIT_COMPLETION ) { + MSM_HSUART_LOG(p_context, MSM_UART_TX_PIO_EVENT, 2 , p_buffer ); + spin_unlock_irqrestore(&(p_context->lock), flags); + goto exit; + } + + if ( tx_state == MSM_HSUART_TX_STATE_IDLE ) { + MSM_HSUART_LOG(p_context, MSM_UART_TX_PIO_EVENT, 3 , p_buffer->fullness ); + start_xfer = 1; + p_context->tx.transaction_size = p_context->p_tx_buffer->fullness; + p_context->tx.state = tx_state = MSM_HSUART_TX_STATE_IN_PROGRESS; + } + + spin_unlock_irqrestore(&(p_context->lock), flags); + + MSM_HSUART_LOG(p_context, MSM_UART_WRITE_WORKER_ENT, p_buffer->fullness , start_xfer ); + + if (start_xfer) { + + msm_uartdm_config_write_size(p_context->p_uart_port, + p_buffer->fullness); + } + + { + //printk("%s,%d\n",__FUNCTION__, p_buffer->fullness); + for (;p_buffer->fullness > 3;) { + + ready = msm_uartdm_tx_ready(p_context->p_uart_port); + if (!ready) { + // printk(KERN_ERR"\nzz%d,%d\n",__LINE__,p_buffer->fullness); + goto done_write_chunk; + } + data = *(int *)(&p_buffer->p_vaddr[p_buffer->read_index]); + #if MSM_HSUART_FEATURE_PRINT_TX_DATA + printk(KERN_ERR"<<<>>>\n", data); + #endif // MSM_HSUART_FEATURE_PRINT_TX_DATA + msm_uartdm_send_dword(p_context->p_uart_port, data); + p_buffer->fullness -= 4; + p_buffer->read_index += 4; + if (p_buffer->read_index >= p_buffer->size) { + p_buffer->read_index -= p_buffer->size; + } + } + /* + * Handle the last bytes which are less than a dword (4 byte) + * - Make sure to reset the buffer bookkeeping when done. + */ + if (p_buffer->fullness) { + ready = msm_uartdm_tx_ready(p_context->p_uart_port); + if (!ready) { + //printk(KERN_ERR"\nzz%d\n",__LINE__); + goto done_write_chunk; + } + + data = *(int *)(&p_buffer->p_vaddr[p_buffer->read_index]); + #if MSM_HSUART_FEATURE_PRINT_TX_DATA + { + printk(KERN_ERR"fullness %d\n",p_buffer->fullness); + printk(KERN_ERR"============\n"); + int byte_cnt = p_buffer->fullness; + // for (;byte_cnt;byte_cnt--){ + // int shift = 8 * (byte_cnt-1); + // printk(KERN_ERR"data: is %c\n", (data >> shift) & 0xFF); + // } + printk(KERN_ERR"============\n"); + printk(KERN_ERR"<<<>>>\n", data); + } + #endif //MSM_HSUART_FEATURE_PRINT_TX_DATA + + msm_uartdm_send_dword(p_context->p_uart_port, data); + } + + spin_lock_irqsave(&(p_context->lock), flags); + + p_buffer->fullness = 0; + p_buffer->read_index = 0; + p_buffer->write_index = 0; + p_context->tx_put_buffer.p_cbk( + p_context->tx_put_buffer.p_data, + p_buffer, + p_context->tx.transaction_size); + p_context->tx.transaction_size = 0; + + p_context->p_tx_buffer = NULL; + + p_context->tx.state = MSM_HSUART_TX_STATE_WAIT_COMPLETION; + msm_uartdm_enable_tx_rdy(p_context->p_uart_port); + + spin_unlock_irqrestore(&(p_context->lock), flags); + + + } + + goto exit; + } + else { + MSM_HSUART_ERR("%s:%s, error, no buffer associated with the context\n", + DRIVER_NAME, + __FUNCTION__); + } + +done_write_chunk: + msm_uartdm_enable_tx_level(p_context->p_uart_port); + + +exit: + MSM_HSUART_LOG(p_context, MSM_UART_WRITE_WORKER_EXT, 0 , 0 ); + MSM_HSUART_EXIT(); +} +/** +* +* Private function to configure the flow control of the UART port. +* +* @param[in][out] io_p_context - Pointer to the uart context +* to operate on. +* @param[in] flow_dir - RX or TX +* +* @param[in] flow_ctl - flow control , SW or HW +* +* @param[in] flow_state - assert or de-assert flow ctl line +* Appplicable for rx flow line in SW ctl mode only +* +* @return 0 for success -1 otherwise. +* +* @Note The function assume that any locking of the context DB +* will be done by the calling layer. +*/ +static void __msm_hsuart_set_flow(struct hsuart_context* io_p_context, int flow_dir, int flow_ctl, int flow_state) +{ + if ( flow_dir == MSM_HSUART_FLOW_DIR_RX ) { + if (HSUART_MODE_FLOW_CTRL_HW == flow_ctl) { + msm_uartdm_set_rx_flow(io_p_context->p_uart_port, 1 , 0); + } + else { + int set_flow_state; + + if ( HSUART_MODE_FLOW_CTRL_NONE == flow_ctl ) { + flow_state = MSM_HSUART_FLOW_STATE_DEASSERT; + } + + set_flow_state = ( flow_state == MSM_HSUART_FLOW_STATE_ASSERT ) ? 1 : 0; + + msm_uartdm_set_rx_flow(io_p_context->p_uart_port, 0, set_flow_state); + } + io_p_context->rx.flow_ctl = flow_ctl; + io_p_context->rx.flow_state = flow_state; + } + + if ( flow_dir == MSM_HSUART_FLOW_DIR_TX ) { + if (HSUART_MODE_FLOW_CTRL_HW == flow_ctl) { + msm_uartdm_set_tx_flow(io_p_context->p_uart_port, 1); + } + else { + msm_uartdm_set_tx_flow(io_p_context->p_uart_port, 0); + } + io_p_context->tx.flow_ctl = flow_ctl; + } +} + +int +msm_hsuart_config_dm_rx(struct hsuart_context* p_context,struct buffer_item* io_p_buffer , bool enable_stale) +{ + size_t read_size = p_context->rx.max_packet_size; + + MSM_HSUART_LOG(p_context, MSM_UART_DM_CONFIG_RX_ENT, 0 , 0 ); + + if ( io_p_buffer ) { + p_context->p_rx_buffer = io_p_buffer; + } + + p_context->rx.state = MSM_HSUART_RX_STATE_DM_IN_PROGRESS; + msm_uartdm_rx_dm_config( p_context->p_uart_port, + (p_context->p_rx_buffer->phys_addr + p_context->p_rx_buffer->write_index), + read_size); + + msm_uartdm_enable_rx_irqs(p_context->p_uart_port, enable_stale); + MSM_HSUART_LOG(p_context, MSM_UART_DM_CONFIG_RX_EXT, 0 , 0 ); + + return read_size; +} + +/** +* +* Rx DM flush function - determines whether DM flush is required and flushes DM ( if needed ) +* +* @param[in][out] p_context - Pointer to context +* +* @return true - DM flush was required and executed +* false - DM flush was not required +*/ +bool +msm_hsuart_flush_dm(struct hsuart_context* p_context) +{ + unsigned long flags; + bool flushing; + + spin_lock_irqsave(&(p_context->lock), flags); + + flushing = ( p_context->rx.state == MSM_HSUART_RX_STATE_DM_IN_PROGRESS ); + + if ( flushing ) { + p_context->rx.state = MSM_HSUART_RX_STATE_DM_FLUSHING; + msm_uartdm_disable_rx_irqs(p_context->p_uart_port); + msm_uartdm_rx_dm_flush(p_context->p_uart_port); + } + + spin_unlock_irqrestore(&(p_context->lock), flags); + + if ( flushing ) { + wait_event_interruptible(p_context->rx.transaction_complete, p_context->rx.state == MSM_HSUART_RX_STATE_IDLE ); + } + + return flushing; +} + +/** +* +* Rx DM resume function - resumes RX DM transactions ( called after DM Flush was executed ) +* +* @param[in][out] p_context - Pointer to context +* +* @return None. +* +*/ +void +msm_hsuart_resume_dm(struct hsuart_context* p_context) +{ + msm_hsuart_config_dm_rx(p_context, NULL, p_context->rx.enable_stale); +} + +/** +* +* Worker thread, responsible to read from the UART in PIO mode +* +* @param[in][out] io_p_work - Pointer to the workqueue. +* +* @return None. +* +*/ +static void +__msm_hsuart_read_pio_worker(struct work_struct* io_p_work) +{ + struct hsuart_worker* p_rd_worker; + struct hsuart_context* p_context; + struct buffer_item* p_buffer = NULL; + unsigned long flags; + int write_index; + int fullness; + int size; + unsigned int* p_word; + /* + * born ready! + */ + int ready = 1; + + MSM_HSUART_ENTER(); + p_rd_worker = container_of(io_p_work, struct hsuart_worker, worker); + + p_context = container_of(p_rd_worker, struct hsuart_context, reader); + + if (p_context && (p_context->p_rx_buffer)) { + spin_lock_irqsave(&(p_context->lock), flags); + MSM_HSUART_DEBUG("%s, %d, irq_cnt %d, valid_bytes %d read_byte_cnt %d\n",__FUNCTION__, __LINE__,irq_fire_cnt, p_context->rx.valid_byte_cnt, p_context->rx.read_byte_cnt); + p_buffer = p_context->p_rx_buffer; + write_index = p_buffer->write_index; + fullness = p_buffer->fullness; + size = p_buffer->size; + + for (;0 < (p_context->rx.valid_byte_cnt - p_context->rx.read_byte_cnt);) { + ready = msm_uartdm_rx_ready(p_context->p_uart_port); + if (!ready) { + printk("\n\n*********!rdy**************\n\n"); + break; + } + while (size - fullness < 4) { + /* + * First, deposit the fresh buffer. + */ + p_buffer->write_index = write_index; + p_buffer->fullness = fullness; + spin_unlock_irqrestore(&(p_context->lock), flags); + + p_context->rx_put_buffer.p_cbk( + p_context->rx_put_buffer.p_data, + p_buffer); + /* + * Ask for the next buffer to read into. + */ + p_buffer = p_context->rx_get_buffer.p_cbk( + p_context->rx_get_buffer.p_data, + 4); + write_index = p_buffer->write_index; + fullness = p_buffer->fullness; + size = p_buffer->size; + spin_lock_irqsave(&(p_context->lock), flags); + } + + p_word = (unsigned int*)&(p_buffer->p_vaddr[write_index]); + (*p_word) = + msm_uartdm_get_dword(p_context->p_uart_port); +#if MSM_HSUART_FEATURE_PRINT_RX_DATA + printk("+++++ 0x%x +++++\n",(*p_word)); +#endif // HSUART_FEATURE_PRINT_RX_DATA + write_index += 4; + fullness += 4; + p_context->rx.read_byte_cnt += 4; + if (write_index >= size) { + write_index -= size; + } + } + + p_buffer->write_index = write_index; + p_buffer->fullness = fullness; + p_context->p_rx_buffer = p_buffer; +MSM_HSUART_DEBUG("%s, write_index = %d; fullness = %d; valid_byte_cnt = %d\n", + __FUNCTION__, + write_index, + fullness, + p_context->rx.valid_byte_cnt); + + /* + * End of rx transaction.... + */ + + if (0 >= (p_context->rx.valid_byte_cnt - p_context->rx.read_byte_cnt)) { + if (p_context->rx.the_end) { + /* + * In case that we received a number that is not + * a multiple of dwords (4bytes), make sure to + * mark the relevant bytes only. + */ + if (0 != (p_context->rx.valid_byte_cnt & 0x3)) { + p_buffer->fullness -= + (4 - (p_context->rx.valid_byte_cnt & 0x3)); + //TODO: need to fix the write_index as well + } + MSM_HSUART_DEBUG(" THE END!\n"); + MSM_HSUART_DEBUG(" DMRX at reader 0x%x\n", msm_uartdm_get_received_byte_cnt(p_context->p_uart_port)); + msm_uartdm_config_read_size(p_context->p_uart_port, + 0xFFFF); + p_context->rx.the_end = 0; + p_context->rx.read_byte_cnt = 0; + p_context->rx.valid_byte_cnt = 0; + msm_uartdm_enable_rx_irqs(p_context->p_uart_port, 1); + + MSM_HSUART_DEBUG("==> END OF READ XFER (fullness %d, valid_byte_cnt %d)<==\n",p_buffer->fullness, p_context->rx.valid_byte_cnt); + spin_unlock_irqrestore(&(p_context->lock), flags); + + /* + * First, deposit the fresh buffer. + */ + p_context->rx_put_buffer.p_cbk( + p_context->rx_put_buffer.p_data, + p_context->p_rx_buffer); + /* + * Ask for the next buffer to read into. + */ + p_context->p_rx_buffer = p_context->rx_get_buffer.p_cbk( + p_context->rx_get_buffer.p_data, + 4); + } + else { + msm_uartdm_enable_rx_irqs(p_context->p_uart_port, 0); + spin_unlock_irqrestore(&(p_context->lock), flags); + + } + } + else { + msm_uartdm_enable_rx_irqs(p_context->p_uart_port, 0); + spin_unlock_irqrestore(&(p_context->lock), flags); + + } + + MSM_HSUART_DEBUG("%s, %d, irq_cnt %d, valid_bytes %d read_byte_cnt %d\n",__FUNCTION__, __LINE__,irq_fire_cnt, p_context->rx.valid_byte_cnt, p_context->rx.read_byte_cnt); + } + else { + MSM_HSUART_ERR("%s:%s, error, no buffer associated with the context\n", + DRIVER_NAME, + __FUNCTION__); + } + MSM_HSUART_DEBUG("%s, %d, irq_cnt %d, valid_bytes %d read_byte_cnt %d\n",__FUNCTION__, __LINE__,irq_fire_cnt, p_context->rx.valid_byte_cnt, p_context->rx.read_byte_cnt); + + MSM_HSUART_EXIT(); +} + + +/** +* +* Worker thread, responsible to read from the UART in DMA mode +* +* @param[in][out] io_p_work - Pointer to the workqueue. +* +* @return None. +* +*/ +void hsuart_tty_flip(void); + +static void +__msm_hsuart_read_dm_worker(struct work_struct* io_p_work) +{ + struct hsuart_worker* p_rd_worker; + struct hsuart_context* p_context; + struct buffer_item* p_buffer = NULL; + + MSM_HSUART_ENTER(); + + p_rd_worker = container_of(io_p_work, struct hsuart_worker, worker); + + p_context = container_of(p_rd_worker, struct hsuart_context, reader); + + + if (p_context && (p_context->p_rx_buffer)) { + + unsigned long flags; + bool flushing ; + + MSM_HSUART_LOG(p_context, MSM_UART_DM_WQ_ENT, p_context->rx.valid_byte_cnt , 0 ); + + spin_lock_irqsave(&(p_context->lock), flags); + + flushing = ( p_context->rx.state == MSM_HSUART_RX_STATE_DM_FLUSHING ); + + p_buffer = p_context->p_rx_buffer; + + p_context->rx.state = MSM_HSUART_RX_STATE_IDLE; + + if ( unlikely(flushing) ) { + p_context->rx.valid_byte_cnt = 0; + wake_up(&p_context->rx.transaction_complete); + } else { + p_buffer->fullness += p_context->rx.valid_byte_cnt; + p_buffer->write_index += p_context->rx.valid_byte_cnt; + + + if ( p_context->rx.valid_byte_cnt > 0 ) { + p_context->rx_put_buffer.p_cbk( + p_context->rx_put_buffer.p_data, + p_context->p_rx_buffer); + + p_context->p_rx_buffer = p_context->rx_get_buffer.p_cbk( + p_context->rx_get_buffer.p_data, + p_context->rx.max_packet_size); + /* + * buffers to DM must be word aligned. + */ + if ( ((unsigned int)p_context->p_rx_buffer & 0xFFFFFFFC) != (unsigned int)p_context->p_rx_buffer){ + MSM_HSUART_ERR("rx buffer is not word aligned 0x%x, fixing to 0x%x\n", + p_context->p_rx_buffer, + (unsigned int)p_context->p_rx_buffer & 0xFFFFFFFC); + p_context->p_rx_buffer = (struct buffer_item*)(((unsigned int)p_context->p_rx_buffer) & 0xFFFFFFFC); + } + } + else { + MSM_HSUART_DEBUG( " read work empty received \n"); + } + + msm_hsuart_config_dm_rx( p_context, NULL , p_context->rx.enable_stale); + } + + spin_unlock_irqrestore(&(p_context->lock), flags); + + MSM_HSUART_LOG(p_context, MSM_UART_DM_WQ_EXT, 0 , 0 ); + + } + + MSM_HSUART_EXIT(); +// hsuart_tty_flip(); +} + +static int __msm_hsuart_suspend(struct hsuart_context* p_context) +{ + msm_uartdm_port_suspend(p_context->p_uart_port); + + return 0; +} + +static int __msm_hsuart_resume(struct hsuart_context* p_context) +{ + msm_uartdm_port_resume(p_context->p_uart_port); + + __msm_hsuart_set_flow(p_context, MSM_HSUART_FLOW_DIR_RX, p_context->rx.flow_ctl , p_context->rx.flow_state); + + __msm_hsuart_set_flow(p_context, MSM_HSUART_FLOW_DIR_TX, p_context->tx.flow_ctl , 0); + + msm_uartdm_enable_rx(p_context->p_uart_port); + msm_uartdm_enable_tx(p_context->p_uart_port); + + return 0; +} + +/** +* +* opens a msm hsuart context. +* +* @param[in] i_p_cfg - the configuration to use. +* @param[out] o_p_context_id_handle - Pointer to a container to be filled +* with the newly created context in case +* of success otherwise undefined. +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_open_context(struct hsuart_config* io_p_cfg, int* o_p_context_id_handle) +{ + int ret = 0; + struct hsuart_context* p_context = NULL; + struct generic_uart_config cfg = {0}; + + MSM_HSUART_ENTER(); + + if ((NULL == io_p_cfg) || (NULL == o_p_context_id_handle)) { + ret = -EINVAL; + } + else { + /* + * TODO:HSUART Lock the global driver DB + */ + + /* + * Allocate control block. + */ + p_context = kzalloc(sizeof(struct hsuart_context), GFP_KERNEL); + if (NULL == p_context) { + ret = -EINVAL; + MSM_HSUART_ERR("%s, error, failed allocing context\n", __FUNCTION__); + goto exit; + } + + p_context->flags = io_p_cfg->flags; + p_context->state = MSM_HSUART_STATE_ACTIVE; + + p_context->rx.max_packet_size = io_p_cfg->max_packet_size; + p_context->rx.min_packet_size = io_p_cfg->min_packet_size; + p_context->rx.latency = io_p_cfg->rx_latency; + p_context->rx.state = MSM_HSUART_RX_STATE_IDLE; + + p_context->p_board_pin_mux_cb = io_p_cfg->p_board_pin_mux_cb; + p_context->p_board_rts_pin_deassert_cb = io_p_cfg->p_board_rts_pin_deassert_cb; + + init_waitqueue_head(&p_context->rx.transaction_complete); + init_waitqueue_head(&p_context->tx.transaction_complete); + + p_context->tx.state = MSM_HSUART_TX_STATE_IDLE; + p_context->tx.transaction_size = 0; + /* + * TODO:MSM_HSUART, init rxtx_lists and other structs in the context + */ + p_context->context_id = context_cnt++; + init_completion(&(p_context->reader.xfer_done)); + init_completion(&(p_context->writer.xfer_done)); + mutex_init(&(p_context->read_complete_lock)); + mutex_init(&(p_context->write_complete_lock)); + + spin_lock_init(&(p_context->lock)); + /* + * Get the desired UART port ID from the context into + * the configuration request. + */ + cfg.port_id = io_p_cfg->port_id; + + if ( RX_MODE_PIO(p_context) ) { + cfg.flags |= UART_DM_MODE_RX_PIO; + } + + if ( RX_MODE_DM(p_context) ) { + cfg.flags |= UART_DM_MODE_RX_DM; + } + + cfg.rx_latency = p_context->rx.latency; + cfg.p_board_pin_mux_cb = p_context->p_board_pin_mux_cb; + cfg.p_board_config_gsbi_cb = io_p_cfg->p_board_gsbi_config_cb; + cfg.p_board_rts_pin_deassert_cb = p_context->p_board_rts_pin_deassert_cb; + + + p_context->rx_get_buffer.p_cbk = io_p_cfg->rx_get_buffer.p_cbk; + p_context->rx_get_buffer.p_data = io_p_cfg->rx_get_buffer.p_data; + p_context->rx_put_buffer.p_cbk = io_p_cfg->rx_put_buffer.p_cbk; + p_context->rx_put_buffer.p_data = io_p_cfg->rx_put_buffer.p_data; + + p_context->tx_get_buffer.p_cbk = io_p_cfg->tx_get_buffer.p_cbk; + p_context->tx_get_buffer.p_data = io_p_cfg->tx_get_buffer.p_data; + p_context->tx_put_buffer.p_cbk = io_p_cfg->tx_put_buffer.p_cbk; + p_context->tx_put_buffer.p_data = io_p_cfg->tx_put_buffer.p_data; + + ret = msm_uartdm_port_open(&cfg, &(p_context->p_uart_port)); + + MSM_HSUART_LOG(p_context, MSM_UART_OPEN_CONTEXT_ENT, 0 , 0 ); + + if ( RX_MODE_DM(p_context) ) { + //Stale is enabled in dm mode when expecting variable size packets + p_context->rx.enable_stale = !RX_MODE_DM_FIXED_PACKET_LEN(p_context); + } + + if ( RX_MODE_PIO(p_context) ) { + p_context->rx.enable_stale = 1; + } + + MSM_HSUART_DEBUG("%s: %s, allocated platform hsuart, handle_0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_context->p_uart_port); + + if ( RX_MODE_DM(p_context) ) { + if (0 == ret) { + ret = msm_uartdm_set_rx_dm_cbk(p_context->p_uart_port, + __msm_hsuart_rx_dm_cbk, + (void*)p_context); + MSM_HSUART_DEBUG("%s: %s, registered rx dma cbk, handle_0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_context->p_uart_port, + ret); + } + + if (0 == ret && p_context->rx.enable_stale) { + ret = msm_uartdm_set_rx_stale_cbk(p_context->p_uart_port, + __msm_hsuart_rx_dm_stale_cbk, + (void*) p_context); + MSM_HSUART_DEBUG("%s: %s, registered dm rx stale cbk, handle_0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_context->p_uart_port, + ret); + } + + } + + + if ( TX_MODE_PIO(p_context) ) { + + if (0 == ret) { + ret = msm_uartdm_set_tx_level_cbk(p_context->p_uart_port, + __msm_hsuart_tx_level_cbk, + (void*)p_context); + MSM_HSUART_DEBUG("%s: %s, registered tx level cbk, handle_0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_context->p_uart_port, + ret); + } + + if (0 == ret) { + ret = msm_uartdm_set_tx_rdy_cbk(p_context->p_uart_port, + __msm_hsuart_tx_rdy_cbk, + (void*)p_context); + MSM_HSUART_DEBUG("%s: %s, registered tx rdy cbk, handle_0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_context->p_uart_port, + ret); + } + } + + if ( RX_MODE_PIO(p_context) ) { + + if (0 == ret) { + ret = msm_uartdm_set_rx_level_cbk(p_context->p_uart_port, + __msm_hsuart_rx_level_cbk, + (void*) p_context); + MSM_HSUART_DEBUG("%s: %s, registered rx level cbk, handle_0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_context->p_uart_port, + ret); + } + + if (0 == ret) { + ret = msm_uartdm_set_rx_stale_cbk(p_context->p_uart_port, + __msm_hsuart_rx_pio_stale_cbk, + (void*) p_context); + MSM_HSUART_DEBUG("%s: %s, registered rx pio stale cbk, handle_0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_context->p_uart_port, + ret); + } + + } + + + if (0 == ret) { + ret = msm_uartdm_port_init(p_context->p_uart_port); + MSM_HSUART_DEBUG("%s: %s, initialized platform hsuart, handle_0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_context->p_uart_port, + ret); + } + + /* + * TODO:HSUART Add the control block into list of ctl blocks + */ + + /* + * Check and handle the configuration options for the context + */ + if (RX_MODE_DM(p_context)) { + /* + * Config and launch worker thread to handle + * read request + */ + snprintf(p_context->reader.name, + sizeof(p_context->reader.name), + "hsuart_rd_dm_%d", + context_cnt); + p_context->reader.p_worker = + create_singlethread_workqueue(p_context->reader.name); + INIT_WORK(&(p_context->reader.worker), + __msm_hsuart_read_dm_worker); + } else if (RX_MODE_PIO(p_context)) { + /* + * Config and launch worker thread to handle + * read request + */ + snprintf(p_context->reader.name, + sizeof(p_context->reader.name), + "hsuart_rd_pio_%d", + context_cnt); + p_context->reader.p_worker = + create_singlethread_workqueue(p_context->reader.name); + + INIT_WORK(&(p_context->reader.worker), + __msm_hsuart_read_pio_worker); + } + + if (TX_MODE_PIO(p_context) || TX_MODE_DM(p_context)) { + /* + * Config and launch worker thread to handle + * write request + */ + snprintf(p_context->writer.name, + sizeof(p_context->writer.name), + "hsuart_wr_%d", + context_cnt); + p_context->writer.p_worker = + create_singlethread_workqueue(p_context->reader.name); + INIT_WORK(&(p_context->writer.worker), + __msm_hsuart_write_worker); + + } + + /* + * Unlock he driver DB + */ + } + + MSM_HSUART_EXIT(); +exit: + if (0 != ret) { + /* + * Cleanup time... + */ + if (p_context) { + if (p_context->p_uart_port) { + /* + * TODO:HSUART remove the rx/tx cbks. + */ + msm_uartdm_port_close(p_context->p_uart_port); + } + kfree(p_context); + } + } + else { + (*o_p_context_id_handle) = (int)&(p_context->context_id); + } + + MSM_HSUART_LOG(p_context, MSM_UART_OPEN_CONTEXT_EXT, 0 , 0 ); + + return ret; +} + +EXPORT_SYMBOL(msm_hsuart_open_context); + + +/** +* +* close msm hsuart context. +* +* @param[in] context_id_handle - Handle that serves as the +* context ID to close +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_close_context(int context_id_handle) +{ + int ret = 0; + struct hsuart_context* p_context; + + MSM_HSUART_ENTER(); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + if (NULL != p_context) { + + MSM_HSUART_LOG(p_context, MSM_UART_CLOSE_CONTEXT_ENT, 0 , 0 ); + + msm_hsuart_flush_dm(p_context); + /* + * Stop the workqueues. + */ + if (RX_MODE_DM(p_context) || RX_MODE_PIO(p_context)) { + flush_workqueue(p_context->reader.p_worker); + destroy_workqueue(p_context->reader.p_worker); + } + + if (TX_MODE_DM(p_context) || TX_MODE_PIO(p_context)) { + flush_workqueue(p_context->writer.p_worker); + destroy_workqueue(p_context->writer.p_worker); + } + + ret = msm_uartdm_port_close(p_context->p_uart_port); + if (p_context->p_rx_buffer) { + /* + * Deposit the buffer back to where it came from... + */ + p_context->rx_put_buffer.p_cbk( + p_context->rx_put_buffer.p_data, + p_context->p_rx_buffer); + } + MSM_HSUART_DEBUG("%s: %s, released platform hsuart, handle_0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_context->p_uart_port, + ret); + + p_context->p_uart_port = NULL; + + kfree(p_context); + } + else { + ret = -EINVAL; + } + MSM_HSUART_EXIT(); + return ret; +} + +EXPORT_SYMBOL(msm_hsuart_close_context); + +static int __msm_hsuart_write(struct hsuart_context* p_context) +{ + int ret = 0; + + MSM_HSUART_ENTER(); + + if ((NULL != p_context)) { + + queue_work(p_context->writer.p_worker, + &(p_context->writer.worker)); + + } + else { + MSM_HSUART_ERR("%s, %s, error - illegal parameters p_ctxt_0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (unsigned int) p_context); + ret = -EINVAL; + } + MSM_HSUART_EXIT(); + return ret; +} + +/** +* +* start/continue writing data to the msm hsuart context. +* +* @param[in] context_id_handle - Handle that serves as the +* context ID to write from. +* +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_write(int context_id_handle) +{ + int ret = 0; + struct hsuart_context* p_context; + + MSM_HSUART_ENTER(); + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + if ((NULL != p_context)) { + /* + * Make sure we don't initiate another tx before the current + * one is complete. + */ + __msm_hsuart_write(p_context); + } + else { + MSM_HSUART_ERR("%s, %s, error - illegal parameters p_ctxt_0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (unsigned int) p_context); + ret = -EINVAL; + } + MSM_HSUART_EXIT(); + return ret; +} + +EXPORT_SYMBOL(msm_hsuart_write); + +static void __msm_hsuart_tx_wait_for_completion(struct hsuart_context* p_context) +{ + //Wait for TX transaction to complete + wait_event_interruptible(p_context->tx.transaction_complete, p_context->tx.state == MSM_HSUART_TX_STATE_IDLE ); +} + +/** +* +* suspend msm hsuart context +* +* @param[in] context_id_handle - Handle that serves as the +* context ID to suspend +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int msm_hsuart_suspend(int context_id_handle) +{ + int ret = 0; + struct hsuart_context* p_context; + unsigned long flags; + + MSM_HSUART_ENTER(); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + + if (NULL != p_context) { + + MSM_HSUART_LOG(p_context, MSM_UART_SUSPEND_ENT, 0 , 0 ); + + spin_lock_irqsave(&(p_context->lock), flags); + + if ( p_context->state == MSM_HSUART_STATE_ACTIVE ) { + + p_context->state = MSM_HSUART_STATE_SUSPENDING; + + spin_unlock_irqrestore(&(p_context->lock), flags); + + __msm_hsuart_tx_wait_for_completion(p_context); + + if ( RX_MODE_DM(p_context) ) { + msm_hsuart_flush_dm(p_context); + } + + spin_lock_irqsave(&(p_context->lock), flags); + + p_context->state = MSM_HSUART_STATE_SUSPENDED; + ret = __msm_hsuart_suspend(p_context); + } + + spin_unlock_irqrestore(&(p_context->lock), flags); + MSM_HSUART_LOG(p_context, MSM_UART_SUSPEND_EXT, 0 , 0 ); + + } + + MSM_HSUART_EXIT(); + return ret; +} + +EXPORT_SYMBOL(msm_hsuart_suspend); + +/** +* +* resume msm hsuart context +* +* @param[in] context_id_handle - Handle that serves as the +* context ID to resume +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int msm_hsuart_resume(int context_id_handle) +{ + int ret = 0; + struct hsuart_context* p_context; + unsigned long flags; + + MSM_HSUART_ENTER(); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + + if (NULL != p_context) { + + MSM_HSUART_LOG(p_context, MSM_UART_RESUME_ENT, 0 , 0 ); + + spin_lock_irqsave(&(p_context->lock), flags); + + if ( p_context->state != MSM_HSUART_STATE_ACTIVE ) { + + + ret = __msm_hsuart_resume(p_context); + + if ( !ret ) { + p_context->state = MSM_HSUART_STATE_ACTIVE; + + if ( RX_MODE_DM(p_context) ) { + msm_hsuart_config_dm_rx( p_context, NULL , p_context->rx.enable_stale); + } + + if (TX_MODE_DM(p_context) || TX_MODE_PIO(p_context)) { + __msm_hsuart_write(p_context); + } + } + } + + spin_unlock_irqrestore(&(p_context->lock), flags); + + MSM_HSUART_LOG(p_context, MSM_UART_RESUME_EXT, 0 , 0 ); + } + + MSM_HSUART_EXIT(); + return ret; +} + +EXPORT_SYMBOL(msm_hsuart_resume); + +/** +* +* Initiate data read from the msm hsuart context in DMA mode +* +* @param[in] context_id_handle - Handle that serves as the +* context ID to read from +* +* @param[in][out] io_p_buffer - Pointer to the buffer structure +* to use for read from. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_read_pio(int context_id_handle, struct buffer_item* io_p_buffer) +{ + int ret = 0; + struct hsuart_context* p_context; + unsigned long flags; + + MSM_HSUART_DEBUG("%s, enter context_id 0x%x, p_buffer 0x%x, sz 0x%x\n", + __FUNCTION__, + (unsigned int)context_id_handle, + (unsigned int)io_p_buffer, + io_p_buffer->size); + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + if ((NULL != p_context) && (NULL != io_p_buffer)) { + /* + * Make sure we don't initiate another read before the current + * one is complete. + */ +#if 0 + ret = mutex_lock_interruptible(&(p_context->read_complete_lock)); + if (ret) { + MSM_HSUART_ERR("%s, %s:failed locking the mutex ret:%d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + ret); + goto exit; + } +#endif + spin_lock_irqsave(&(p_context->lock), flags); + /* + * Cache that buffer to used for the current read. + */ + p_context->p_rx_buffer = io_p_buffer; + + /* + * Be positive - assume we are going to get all the bytes that + * we asked for... + */ + //use the fifo threshold level + p_context->rx.the_end = 0; + p_context->rx.read_byte_cnt = 0; + p_context->rx.valid_byte_cnt = 0;//io_p_buffer->size; + + msm_uartdm_config_read_size(p_context->p_uart_port, 0xFFFF); + msm_uartdm_enable_rx_irqs(p_context->p_uart_port, 1); + + spin_unlock_irqrestore(&(p_context->lock), flags); + +#if 0 + wait_for_completion_interruptible(&(p_context->reader.xfer_done)); + + mutex_unlock(&(p_context->read_complete_lock)); +#endif + } + else { + ret = -EINVAL; + } + MSM_HSUART_EXIT(); + return ret; +} + +/** +* +* Initiate data read from the msm hsuart context in DMA mode +* +* @param[in] context_id_handle - Handle that serves as the +* context ID to read from +* +* @param[in][out] io_p_buffer - Pointer to the buffer structure +* to use for read from. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_read_dm(int context_id_handle, struct buffer_item* io_p_buffer) +{ + + int ret = 0; + struct hsuart_context* p_context; + + MSM_HSUART_DEBUG("%s, enter context_id 0x%x, p_buffer 0x%x, sz 0x%x\n", + __FUNCTION__, + (unsigned int)context_id_handle, + (unsigned int)io_p_buffer, + io_p_buffer->size); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + + if ((NULL != p_context) && (NULL != io_p_buffer)) { + unsigned long flags; + + spin_lock_irqsave(&(p_context->lock), flags); + + p_context->rx.valid_byte_cnt = 0; + msm_hsuart_config_dm_rx( p_context ,io_p_buffer , p_context->rx.enable_stale); + + spin_unlock_irqrestore(&(p_context->lock), flags); + + } + + MSM_HSUART_EXIT(); + return ret; +} + +/** +* +* Initiate data read from the msm hsuart +* +* @param[in] context_id_handle - Handle that serves as the +* context ID to read from +* +* @param[in][out] io_p_buffer - Pointer to the buffer structure +* to use for read from. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_read(int context_id_handle, struct buffer_item* io_p_buffer) +{ + + int ret = 0; + struct hsuart_context* p_context; + + MSM_HSUART_DEBUG("%s, enter context_id 0x%x, p_buffer 0x%x, sz 0x%x\n", + __FUNCTION__, + (unsigned int)context_id_handle, + (unsigned int)io_p_buffer, + io_p_buffer->size); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + + if ( RX_MODE_PIO(p_context)){ + ret = msm_hsuart_read_pio(context_id_handle, io_p_buffer); + + } else { + ret = msm_hsuart_read_dm(context_id_handle, io_p_buffer); + } + + MSM_HSUART_EXIT(); + return ret; +} + +EXPORT_SYMBOL(msm_hsuart_read); + + +/** +* +* Register 'set buffer' callback function +* +* @param[in] context_id_handle - Handle to register the callback to. +* @param[in] p_cbk - Pointer to a callback function that will be +* called to get the next free buffer to read data into. +* @param[in] p_data -Pointer to data to pass to the callback function +* when it is called. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_register_rx_put_buffer( + int context_id_handle, + void (*p_cbk)(void* p_data, struct buffer_item* p_buffer), + void* p_data) +{ + int ret = 0; + unsigned long flags; + struct hsuart_context* p_context = NULL; + + MSM_HSUART_DEBUG("%s, enter context_id 0x%x\n", + __FUNCTION__, + (unsigned int)context_id_handle); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + if ( (NULL != p_context) && + (NULL == p_context->rx_put_buffer.p_cbk) && + (NULL == p_context->rx_put_buffer.p_data) ) { + + spin_lock_irqsave(&(p_context->lock), flags); + p_context->rx_put_buffer.p_cbk = p_cbk; + p_context->rx_put_buffer.p_data = p_data; + spin_unlock_irqrestore(&(p_context->lock), flags); + } + else { + MSM_HSUART_ERR("%s, %s, error - illegal parameters p_ctxt_0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (unsigned int) p_context); + ret = -EINVAL; + } + MSM_HSUART_EXIT(); + + return ret; +} +EXPORT_SYMBOL(msm_hsuart_register_rx_put_buffer); + +/** +* +* Register 'get buffer' callback function +* +* @param[in] context_id_handle - Handle to register the callback to. +* @param[in] p_cbk - Pointer to a callback function that will be +* called to get the next free buffer to read data into. +* @param[in] p_data -Pointer to data to pass to the callback function +* when it is called. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_register_rx_get_buffer( + int context_id_handle, + struct buffer_item* (*p_cbk)(void* p_data, int free_bytes), + void* p_data) +{ + int ret = 0; + unsigned long flags; + struct hsuart_context* p_context = NULL; + + MSM_HSUART_DEBUG("%s, enter context_id 0x%x\n", + __FUNCTION__, + (unsigned int)context_id_handle); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + if ( (NULL != p_context) && + (NULL == p_context->rx_get_buffer.p_cbk) && + (NULL == p_context->rx_get_buffer.p_data) ) { + + spin_lock_irqsave(&(p_context->lock), flags); + p_context->rx_get_buffer.p_cbk = p_cbk; + p_context->rx_get_buffer.p_data = p_data; + spin_unlock_irqrestore(&(p_context->lock), flags); + } + else { + MSM_HSUART_ERR("%s, %s, error - illegal parameters p_ctxt_0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (unsigned int) p_context); + ret = -EINVAL; + } + MSM_HSUART_EXIT(); + + return ret; +} +EXPORT_SYMBOL(msm_hsuart_register_rx_get_buffer); + +/** +* +* Register 'set buffer' callback function +* +* @param[in] context_id_handle - Handle to register the callback to. +* @param[in] p_cbk - Pointer to a callback function that will be +* called to get the next free buffer to read data into. +* @param[in] p_data -Pointer to data to pass to the callback function +* when it is called. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_register_tx_put_buffer( + int context_id_handle, + void (*p_cbk)(void* p_data, struct buffer_item* p_buffer, int transaction_size), + void* p_data) +{ + int ret = 0; + unsigned long flags; + struct hsuart_context* p_context = NULL; + + MSM_HSUART_DEBUG("%s, enter context_id 0x%x\n", + __FUNCTION__, + (unsigned int)context_id_handle); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + if ( (NULL != p_context) && + (NULL == p_context->tx_put_buffer.p_cbk) && + (NULL == p_context->tx_put_buffer.p_data) ) { + + spin_lock_irqsave(&(p_context->lock), flags); + p_context->tx_put_buffer.p_cbk = p_cbk; + p_context->tx_put_buffer.p_data = p_data; + spin_unlock_irqrestore(&(p_context->lock), flags); + } + else { + MSM_HSUART_ERR("%s, %s, error - illegal parameters p_ctxt_0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (unsigned int) p_context); + ret = -EINVAL; + } + MSM_HSUART_EXIT(); + + return ret; +} +EXPORT_SYMBOL(msm_hsuart_register_tx_put_buffer); + +/** +* +* Register 'get buffer' callback function +* +* @param[in] context_id_handle - Handle to register the callback to. +* @param[in] p_cbk - Pointer to a callback function that will be +* called to get the next free buffer to read data into. +* @param[in] p_data -Pointer to data to pass to the callback function +* when it is called. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_register_tx_get_buffer( + int context_id_handle, + struct buffer_item* (*p_cbk)(void* p_data), + void* p_data) +{ + int ret = 0; + unsigned long flags; + struct hsuart_context* p_context = NULL; + + MSM_HSUART_DEBUG("%s, enter context_id 0x%x\n", + __FUNCTION__, + (unsigned int)context_id_handle); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + if ( (NULL != p_context) && + (NULL == p_context->tx_get_buffer.p_cbk) && + (NULL == p_context->tx_get_buffer.p_data) ) { + + spin_lock_irqsave(&(p_context->lock), flags); + p_context->tx_get_buffer.p_cbk = p_cbk; + p_context->tx_get_buffer.p_data = p_data; + spin_unlock_irqrestore(&(p_context->lock), flags); + } + else { + MSM_HSUART_ERR("%s, %s, error - illegal parameters p_ctxt_0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (unsigned int) p_context); + ret = -EINVAL; + } + MSM_HSUART_EXIT(); + + return ret; +} +EXPORT_SYMBOL(msm_hsuart_register_tx_get_buffer); + + +/** +* +* Configure the flow control of the UART port. +* +* @param[in] context_id_handle - Handle to register the callback to. +* @param[in] flow - The flow mode to apply. See hsuart.h for definition of +* the possible modes. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_set_flow( + int context_id_handle, + int flow) +{ + int ret = 0; + unsigned long flags; + struct hsuart_context* p_context = NULL; + + MSM_HSUART_DEBUG( "%s, enter context_id 0x%x flow 0x%x\n", + __FUNCTION__, + (uint32_t)context_id_handle, + flow); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + + if (NULL != p_context){ + int flow_dir = flow & HSUART_MODE_FLOW_DIRECTION_MASK; + bool set_rx_flow = ( flow_dir == HSUART_MODE_FLOW_DIRECTION_RX_TX ) || ( flow_dir == HSUART_MODE_FLOW_DIRECTION_RX_ONLY ); + bool set_tx_flow = ( flow_dir == HSUART_MODE_FLOW_DIRECTION_RX_TX ) || ( flow_dir == HSUART_MODE_FLOW_DIRECTION_TX_ONLY ); + int flow_ctl = ( flow & HSUART_MODE_FLOW_CTRL_MODE_MASK ); + int flow_state = ( flow & HSUART_MODE_FLOW_STATE_MASK); + int msm_hsuart_flow_state = (flow_state == HSUART_MODE_FLOW_STATE_ASSERT ) ? MSM_HSUART_FLOW_STATE_ASSERT : MSM_HSUART_FLOW_STATE_DEASSERT; + + spin_lock_irqsave(&(p_context->lock), flags); + + if ( set_rx_flow ) { + __msm_hsuart_set_flow(p_context, MSM_HSUART_FLOW_DIR_RX, flow_ctl, msm_hsuart_flow_state); + } + + if ( set_tx_flow ) { + __msm_hsuart_set_flow(p_context, MSM_HSUART_FLOW_DIR_TX, flow_ctl, msm_hsuart_flow_state); + } + + spin_unlock_irqrestore(&(p_context->lock), flags); + } + else { + MSM_HSUART_ERR( "%s, %s, error - illegal parameters p_ctxt_0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (uint32_t) p_context); + ret = -EINVAL; + } + MSM_HSUART_EXIT(); + + return ret; +} +EXPORT_SYMBOL(msm_hsuart_set_flow); +/** +* +* Configure the parity of the UART port. +* +* @param[in] context_id_handle - Handle to register the callback to. +* @param[in] parity - The parity mode to apply. See hsuart.h for definition of +* the possible modes. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_hsuart_set_parity( + int context_id_handle, + int parity) +{ + int ret = 0; + struct hsuart_context* p_context = NULL; + msm_uartdm_parity_t msm_uartdm_parity; + unsigned long flags; + + MSM_HSUART_ERR( "%s, enter context_id 0x%x parity 0x%x\n", + __FUNCTION__, + (uint32_t)context_id_handle, + parity); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + + if (NULL != p_context){ + spin_lock_irqsave(&(p_context->lock), flags); + switch (parity) { + case HSUART_MODE_PARITY_NONE: + msm_uartdm_parity = MSM_UARTDM_PARITY_NONE; + break; + case HSUART_MODE_PARITY_ODD: + msm_uartdm_parity = MSM_UARTDM_PARITY_ODD; + break; + case HSUART_MODE_PARITY_EVEN: + msm_uartdm_parity = MSM_UARTDM_PARITY_EVEN; + break; + default: + MSM_HSUART_ERR(KERN_ERR "%s, %s, error, invalid parity %d\n", + DRIVER_NAME, + __FUNCTION__, + parity); + goto Done; + break; + } + p_context->parity = msm_uartdm_parity; + msm_uartdm_set_parity( + p_context->p_uart_port, + msm_uartdm_parity); +Done: + spin_unlock_irqrestore(&(p_context->lock), flags); + + } + else { + MSM_HSUART_ERR("%s, %s, error - illegal parameters p_ctxt_0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (uint32_t) p_context); + ret = -EINVAL; + } + + MSM_HSUART_EXIT(); + return ret; +} +EXPORT_SYMBOL(msm_hsuart_set_parity); + +/** +* +* Indicates whether there are bytes in rx fifo +* +* @param[in] context_id_handle - context id +* +* @return 0 if rx fifo is empty and 1 otherwise. +* +*/ +int +msm_hsuart_rx_fifo_has_bytes( int context_id_handle) +{ + int ret = 0; + unsigned long flags; + struct hsuart_context* p_context = NULL; + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + + if (NULL != p_context){ + int packing_bytes = 0; + int rx_fifo_fullness; + + spin_lock_irqsave(&(p_context->lock), flags); + + rx_fifo_fullness = msm_uartdm_get_rx_fifo_fullness(p_context->p_uart_port, &packing_bytes); + + ret = !!rx_fifo_fullness || !!packing_bytes; + + spin_unlock_irqrestore(&(p_context->lock), flags); + } + + return ret; +} + +EXPORT_SYMBOL(msm_hsuart_rx_fifo_has_bytes); + + +/** +* +* Configure the uart port to the requested baud-rate. +* +* @param[in] i_p_port - The UART port to configure. +* @param[in] baud - The requested baud rate. +* +* @return 0 for success -1/-ErrCode otherwise. +* +*/ +int +msm_hsuart_set_baud_rate(int context_id_handle, uint32_t baud_rate) +{ + int ret = 0; + struct hsuart_context* p_context = NULL; + unsigned long flags; + int rx_irqs_enable; + int tx_rdy_enable; + int tx_lvl_enable; + struct generic_uart_port* p_port; + + MSM_HSUART_DEBUG("%s, enter context_id 0x%x speed %d\n", + __FUNCTION__, + (uint32_t)context_id_handle, + baud_rate); + + p_context = container_of((void*)context_id_handle, + struct hsuart_context, + context_id); + if (NULL != p_context){ + bool dm_flush; + + dm_flush = msm_hsuart_flush_dm(p_context); + + spin_lock_irqsave(&(p_context->lock), flags); + + p_port = p_context->p_uart_port; + + /* + * Save IRQ status to restore it later. + */ + rx_irqs_enable = msm_uartdm_disable_rx_irqs(p_port); + tx_rdy_enable = msm_uartdm_disable_tx_rdy(p_port); + tx_lvl_enable = msm_uartdm_disable_tx_level(p_port); + + p_context->baud_rate = baud_rate; + ret = msm_uartdm_set_baud_rate( + p_context->p_uart_port, + baud_rate); + if (ret) { + MSM_HSUART_ERR("%s, %s, error at %d ret %d", + DRIVER_NAME, + __FUNCTION__, + __LINE__, + ret); + goto Done; + } + //flush_workqueue(p_context->reader.p_worker); + //flush_workqueue(p_context->writer.p_worker); + + if (tx_lvl_enable) { + msm_uartdm_enable_tx_level(p_context->p_uart_port); + } + if (tx_rdy_enable) { + msm_uartdm_enable_tx_rdy(p_context->p_uart_port); + } + if (rx_irqs_enable) { + msm_uartdm_enable_rx_irqs(p_context->p_uart_port, p_context->rx.enable_stale); + } +Done: + + if ( dm_flush ) { + msm_hsuart_resume_dm(p_context); + } + + //asm("nop"); + spin_unlock_irqrestore(&(p_context->lock), flags); + } + else { + MSM_HSUART_ERR("%s, %s, error - illegal parameters p_ctxt_0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (uint32_t) p_context); + ret = -EINVAL; + } + + MSM_HSUART_EXIT(); + return ret; +} +EXPORT_SYMBOL(msm_hsuart_set_baud_rate); + + +static int __init +msm_init_hsuart(void) +{ + int ret = 0; + + return ret; +} + +arch_initcall(msm_init_hsuart); + +MODULE_AUTHOR("Amir Frenkel "); +MODULE_DESCRIPTION("Driver for msm7x high speed uart"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/msm_uart_dm.c b/arch/arm/mach-msm/msm_uart_dm.c new file mode 100644 index 00000000000..b0ae0444a76 --- /dev/null +++ b/arch/arm/mach-msm/msm_uart_dm.c @@ -0,0 +1,2357 @@ +/* + * arch/arm/mach-msm/msm_uart_dm.c - Driver for MSM uart DM ports + * + * + * Copyright (C) 2008 Palm, Inc. + * Author: Amir Frenkel + * + * Based on drivers/serial/msm_serial.c driver implementation + * Copyright (C) 2007 Google, Inc. + * Author: Robert Love + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +/* + * Debug related macros and defs + */ +#define DRIVER_NAME "msm_uartdm" +#define DRIVER_VERSION (0x100) + + +#define MSM_UARTDM_DEBUG_ENABLE 0 +#define MSM_UARTDM_FUNC_LOG_ENABLE 0 + +#if MSM_UARTDM_DEBUG_ENABLE +#define MSM_UARTDM_DEBUG(args...) (printk(KERN_DEBUG args)) +#define MSM_UARTDM_INFO(args...) (printk(KERN_INFO args)) +#define MSM_UARTDM_ERR(args...) (printk(KERN_ERR args)) +#else +#define MSM_UARTDM_INFO(args...) +#define MSM_UARTDM_DEBUG(args...) +#define MSM_UARTDM_ERR(args...) +#endif // MSM_UARTDM_DEBUG_ENABLE + +#if MSM_UARTDM_FUNC_LOG_ENABLE +#define MSM_UARTDM_ENTER() (printk(KERN_INFO"%s: %s, %u[usec] enter\n",\ + DRIVER_NAME, __PRETTY_FUNCTION__, jiffies_to_msecs(jiffies))) + +#define MSM_UARTDM_EXIT() (printk(KERN_INFO"%s: %s, %u[usec] exit\n",\ + DRIVER_NAME, __PRETTY_FUNCTION__, jiffies_to_msecs(jiffies))) + +#else +#define MSM_UARTDM_ENTER() +#define MSM_UARTDM_EXIT() +#endif + +#define RX_MODE_PIO(p_port) (p_port->flags & UART_DM_MODE_RX_PIO) +#define RX_MODE_DM(p_port) (p_port->flags & UART_DM_MODE_RX_DM) + +#define MSM_UARTDM_BURST_SIZE 16 + +/* + * UART clk related definitions. + */ + +/* + * The MSM possible fundamental clk for UART DM + * Don't know how to represent it in C, but having the + * numbers here is a good start. Keep in mind that when sending them over to the + * modem via proc-comm, the granularity is hz, hence 3.6864 Mhz should be + * sent as 3686 + * - 3.6864Mhz + * - 7.3728Mhz + * - 14.7456Mhz + * - 46.4Mhz + + * - 51.2Mhz + * - 58.9824Mhz + * - 64Mhz + */ + +#define GSBI_CONTROL_ADDR 0x0 +#define GSBI_PROTOCOL_UART 0x40 +#define GSBI_PROTOCOL_IDLE 0x0 + +#define TCSR_ADM_1_A_CRCI_MUX_SEL 0x78 +#define TCSR_ADM_1_B_CRCI_MUX_SEL 0x7C +#define ADM1_CRCI_GSBI6_RX_SEL 0x800 +#define ADM1_CRCI_GSBI6_TX_SEL 0x400 + +/* + * static port DB definition + */ + +struct uart_port_item { + struct generic_uart_port port; + + /* + * 1 - used + * 0 - not used + */ + int used; +}; + +#define UARTDM_NUM_PORTS (2) +#define FIFO_SZ (64) +static struct uart_port_item ports_db[UARTDM_NUM_PORTS] = +{ + { + .port = + { + /* + * The UARTDM has a total of 128*32bit SRAM for Rx/Tx + * FIFO The split between Rx and Tx is configurable, + * hence the rx and tx fifos may be of different size. + * Currently for simplicity - assuming they are equal + * in size + */ + .rx_fifo_size = FIFO_SZ, + .tx_fifo_size = FIFO_SZ, + .p_clk_name = "core_clk", + .p_pclk_name = "iface_clk", + .id = 0, + .rx_dm = {0}, + }, + + .used = 0, + }, + + { + .port = + { + /* + * The UARTDM has a total of 128*32bit SRAM for Rx/Tx + * FIFO The split between Rx and Tx is configurable, + * hence the rx and tx fifos may be of different size. + * Currently for simplicity - assuming they are equal + * in size + */ + .rx_fifo_size = FIFO_SZ, + .tx_fifo_size = FIFO_SZ, + .p_clk_name = "core_clk", + .p_pclk_name = "iface_clk", + .id = 1, + .rx_dm = {0}, + }, + + .used = 0, + } + +}; +#define UART_NR ARRAY_SIZE(ports_db) + +static void +__msm_uartdm_set_baud_rate(struct generic_uart_port* i_p_port, unsigned int baud); + +static void +__msm_uartdm_set_stale_timeout(struct generic_uart_port* i_p_port); + +static void +__msm_uartdm_reset(struct generic_uart_port *port); + +static inline void msm_write(struct generic_uart_port *port, unsigned int val, + unsigned int off) +{ + __raw_writel(val, port->p_membase + off); +} + +static inline unsigned int msm_read(struct generic_uart_port *port, unsigned int off) +{ + return __raw_readl(port->p_membase + off); +} + + +/** + * Disable 'tx-level' interrupt + * + * @param[in] i_p_port - The UART DM port. + * + * @return 1 - tx-level was enabled BEFORE the call to this function, + * 0 otherwise. + */ +int +msm_uartdm_disable_tx_level(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + unsigned long flags; + int ret; + + spin_lock_irqsave(&(p_msm_uart_port->lock), flags); + ret = (p_msm_uart_port->imr & UART_DM_IMR_TX_LEV); + ret = !!ret; + p_msm_uart_port->imr &= ~UART_DM_IMR_TX_LEV; + msm_write(i_p_port, p_msm_uart_port->imr, UART_DM_IMR); + spin_unlock_irqrestore(&(p_msm_uart_port->lock), flags); + + return ret; +} +EXPORT_SYMBOL(msm_uartdm_disable_tx_level); + +void +msm_uartdm_enable_tx_level(struct generic_uart_port* i_p_port) +{ + unsigned long flags; + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + + spin_lock_irqsave(&(p_msm_uart_port->lock), flags); + p_msm_uart_port->imr |= UART_DM_IMR_TX_LEV; + msm_write(i_p_port, p_msm_uart_port->imr, UART_DM_IMR); + spin_unlock_irqrestore(&(p_msm_uart_port->lock), flags); + +} +EXPORT_SYMBOL(msm_uartdm_enable_tx_level); + +/** + * Disable 'tx-ready' interrupt + * + * @param[in] i_p_port - The UART DM port. + * + * @return 1 - tx-ready was enabled BEFORE the call to this function, + * 0 otherwise. + */ +int +msm_uartdm_disable_tx_rdy(struct generic_uart_port* i_p_port) +{ + unsigned long flags; + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + int ret; + + spin_lock_irqsave(&(p_msm_uart_port->lock), flags); + + ret = (p_msm_uart_port->imr & UART_DM_IMR_TX_RDY); + ret = !!ret; + p_msm_uart_port->imr &= ~UART_DM_IMR_TX_RDY; + msm_write(i_p_port, p_msm_uart_port->imr, UART_DM_IMR); + spin_unlock_irqrestore(&(p_msm_uart_port->lock), flags); + + return ret; +} +EXPORT_SYMBOL(msm_uartdm_disable_tx_rdy); + +void +msm_uartdm_enable_tx_rdy(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + unsigned long flags; + + MSM_UARTDM_ENTER(); + spin_lock_irqsave(&(p_msm_uart_port->lock), flags); + + p_msm_uart_port->imr |= UART_DM_IMR_TX_RDY; + msm_write(i_p_port, p_msm_uart_port->imr, UART_DM_IMR); + + spin_unlock_irqrestore(&(p_msm_uart_port->lock), flags); + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_enable_tx_rdy); + +void +msm_uartdm_enable_rx_irqs(struct generic_uart_port *port, int enable_stale) +{ + unsigned long flags; + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(port); + u32 imr = 0; + MSM_UARTDM_ENTER(); + + if ( RX_MODE_PIO(p_msm_uart_port) ) { + imr = UART_DM_IMR_RX_LEV | UART_DM_IMR_RX_STALE; + } + else if ( RX_MODE_DM(p_msm_uart_port)) { + imr = UART_DM_IMR_RX_STALE; + } + + + spin_lock_irqsave(&(p_msm_uart_port->lock), flags); + + /* + * Enable stale event if needed + */ + if (enable_stale) { + msm_write(port, UART_DM_CR_GCMD_EN_STALE_EVENT, UART_DM_CR); + } + + /* + * Enable RX interrupts. + */ + p_msm_uart_port->imr |= imr; + msm_write(port, p_msm_uart_port->imr, UART_DM_IMR); + + spin_unlock_irqrestore(&(p_msm_uart_port->lock), flags); + + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_enable_rx_irqs); + +/** + * Disable 'rx-level/stale' interrupts + * + * @param[in] i_p_port - The UART DM port. + * + * @return 1 - any of the rx intr was enabled BEFORE the call to + * this function, 0 otherwise. + */ +int +msm_uartdm_disable_rx_irqs(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + int ret = 0; + unsigned long flags; + u32 imr; + + MSM_UARTDM_ENTER(); + + if ( RX_MODE_PIO(p_msm_uart_port) ) { + imr = UART_DM_IMR_RX_LEV | UART_DM_IMR_RX_STALE; + } + else if ( RX_MODE_DM(p_msm_uart_port)) { + imr = UART_DM_IMR_RX_STALE; + //Disable stale event + msm_write(i_p_port, UART_DM_CR_GCMD_DIS_STALE_EVENT, UART_DM_CR); + } + else { + printk(KERN_ERR"%s, %d, invalid port settings\n", + __func__, + __LINE__); + goto done; + } + spin_lock_irqsave(&(p_msm_uart_port->lock), flags); + + /* + * Test and see if the rx irqs were disabled already. + */ + ret = p_msm_uart_port->imr & imr; + ret = !!ret; + p_msm_uart_port->imr &= ~imr; + msm_write(i_p_port, p_msm_uart_port->imr, UART_DM_IMR); + + spin_unlock_irqrestore(&(p_msm_uart_port->lock), flags); + + MSM_UARTDM_EXIT(); +done: + return ret; +} +EXPORT_SYMBOL(msm_uartdm_disable_rx_irqs); + +void +msm_uartdm_enable_rx(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + MSM_UARTDM_ENTER(); + + msm_write(p_msm_uart_port, + UART_DM_CR_RX_ENABLE, + UART_DM_CR); + + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_enable_rx); + +/** + * Reset the receiver as if HW reset was issued. + * The receiver is disabled, and the HW FIFO and packing + * and shift registers are flushed. + * + * @param[in] - The UART port to operate on. + * @return - None. + * + */ +void +msm_uartdm_reset_rx(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + MSM_UARTDM_ENTER(); + + msm_write(p_msm_uart_port, + UART_DM_CR_CMD_RESET_RX, + UART_DM_CR); + + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_reset_rx); + + + +void +msm_uartdm_disable_rx(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + MSM_UARTDM_ENTER(); + + msm_write(p_msm_uart_port, + UART_DM_CR_RX_DISABLE, + UART_DM_CR); + + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_disable_rx); + +void +msm_uartdm_enable_tx(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + MSM_UARTDM_ENTER(); + + msm_write(p_msm_uart_port, + UART_DM_CR_TX_ENABLE, + UART_DM_CR); + + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_enable_tx); + +void +msm_uartdm_disable_tx(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + MSM_UARTDM_ENTER(); + + msm_write(p_msm_uart_port, + UART_DM_CR_TX_DISABLE, + UART_DM_CR); + + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_disable_tx); + +/** + * Reset the transmitter as if HW reset was issued. + * The transmitter signal goes high and the HW FIFO and packing + * and shift registers are flushed. + * + * @param[in] - The UART port to operate on. + * @return - None. + * + */ +void +msm_uartdm_reset_tx(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + MSM_UARTDM_ENTER(); + + msm_write(p_msm_uart_port, + UART_DM_CR_CMD_RESET_TX, + UART_DM_CR); + + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_reset_tx); + + +/* +static void msm_enable_ms(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_uart_port = GEN_UART_TO_MSM(i_p_port); + + p_msm_uart_port->imr |= UART_DM_IMR_DELTA_CTS; + msm_write(i_p_port, p_msm_uart_port->imr, UART_DM_IMR); +} +*/ +static void +handle_rx_stale(struct generic_uart_port* i_p_port) +{ + MSM_UARTDM_ENTER(); + + /* + * Clear the interrupt bit. + */ + msm_write(i_p_port, UART_DM_CR_CMD_CLR_STALE, UART_DM_CR); + + /* + * Handle overrun. My understanding of the hardware is that overrun + * is not tied to the RX buffer, so we handle the case out of band. + */ + if ((msm_read(i_p_port, UART_DM_SR) & UART_DM_SR_OVERRUN)) { + msm_write(i_p_port, UART_DM_CR_CMD_RESET_ERR, UART_DM_CR); + } + + /* + * Invoke callback if one is configured. + */ + if (NULL != i_p_port->p_rx_stale_callback) { + MSM_UARTDM_DEBUG("%s, FIFO LEVEL %d\n",__FUNCTION__, msm_read(i_p_port, UART_DM_RXFS)); + + i_p_port->p_rx_stale_callback(i_p_port->p_rx_stale_data); + } + MSM_UARTDM_EXIT(); +} + + + +static void +handle_rx_level(struct generic_uart_port* i_p_port) +{ + MSM_UARTDM_ENTER(); + + /* + * Handle overrun. My understanding of the hardware is that overrun + * is not tied to the RX buffer, so we handle the case out of band. + */ + if ((msm_read(i_p_port, UART_DM_SR) & UART_DM_SR_OVERRUN)) { + msm_write(i_p_port, UART_DM_CR_CMD_RESET_ERR, UART_DM_CR); + } + + /* + * Invoke callback if one is configured. + */ + if (NULL != i_p_port->p_rx_level_callback) { + MSM_UARTDM_DEBUG("%s, FIFO LEVEL %d\n",__FUNCTION__, msm_read(i_p_port, UART_DM_RXFS)); + i_p_port->p_rx_level_callback(i_p_port->p_rx_level_data); + } + + MSM_UARTDM_EXIT(); +} + +static void +msm_uartdm_clear_dm_error(struct generic_uart_port* i_p_port) +{ + msm_write( i_p_port, UART_DM_DMEN_RX_DM_DIS, UART_DM_DMEN ); + __msm_uartdm_reset(i_p_port); + +// msm_dmov_clear_error_condition(i_p_port->dma_rx_channel, i_p_port->dma_rx_crci); + + msm_write( i_p_port, + UART_DM_CR_RX_ENABLE | UART_DM_CR_TX_ENABLE, + UART_DM_CR); + + msm_write( i_p_port, UART_DM_DMEN_RX_DM_EN, UART_DM_DMEN ); +} + +static void +handle_rx_dm(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, + struct msm_dmov_errdata *e + __maybe_unused) +{ + struct generic_uart_port* i_p_port; + + MSM_UARTDM_ENTER(); + + if (NULL == cmd_ptr) { + MSM_UARTDM_ERR("%s: %s, invalid(null) contxt\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + return; + } + + i_p_port = container_of(cmd_ptr, struct generic_uart_port, rx_xfer); + + if ( result & DMOV_RSLT_ERROR ) { + msm_uartdm_clear_dm_error(i_p_port); + } + + /* + * Invoke callback if one is configured. + */ + if (NULL != i_p_port->p_rx_dm_callback) { + MSM_UARTDM_DEBUG("%s, FIFO LEVEL %d\n",__FUNCTION__, msm_read(i_p_port, UART_DM_RXFS)); + i_p_port->p_rx_dm_callback(i_p_port->p_rx_dm_data); + } + + MSM_UARTDM_EXIT(); +} + +unsigned int msm_uartdm_read_reg(struct generic_uart_port* i_p_port, int addr) +{ + unsigned int ret; + ret = msm_read(i_p_port, addr); + return ret; +} + +EXPORT_SYMBOL(msm_uartdm_read_reg); + +static void +handle_tx_level(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_port = GEN_UART_TO_MSM(i_p_port); + + MSM_UARTDM_ENTER(); + + /* + * Invoke callback if one is configured. + */ + if (NULL != i_p_port->p_tx_level_callback) { + i_p_port->p_tx_level_callback(i_p_port->p_tx_level_data); + } + else { + /* disable tx level interrupts */ + p_msm_port->imr &= ~UART_DM_IMR_TX_LEV; + msm_write(i_p_port, p_msm_port->imr, UART_DM_IMR); + } + MSM_UARTDM_EXIT(); +} +static void +handle_tx_rdy(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_port = GEN_UART_TO_MSM(i_p_port); + + MSM_UARTDM_ENTER(); + + /* + * Clear the interrupt bit. + */ + msm_write(i_p_port, UART_DM_CR_GCMD_RESET_TX_RDY_INT, UART_DM_CR); + + /* + * Invoke callback if one is configured. + */ + if (NULL != i_p_port->p_tx_rdy_callback) { + i_p_port->p_tx_rdy_callback(i_p_port->p_tx_rdy_data); + } + else { + /* disable tx rdy interrupts */ + p_msm_port->imr &= ~UART_DM_IMR_TX_RDY; + msm_write(i_p_port, p_msm_port->imr, UART_DM_IMR); + } + MSM_UARTDM_EXIT(); +} + + +/* + * Helper function, predicate which check whether the TX fifo has room and can + * pushed with more data + * + * @param[in] i_p_port - The UART DM port to check. + * + * @return 0 no more space in tx fifo, 1 we have room in the tx fifo. + */ +int +msm_uartdm_tx_ready(struct generic_uart_port* i_p_port) + +{ + + return (msm_read(i_p_port, UART_DM_SR) & UART_DM_SR_TX_READY); +} + +EXPORT_SYMBOL(msm_uartdm_tx_ready); + +/** + * Helper function, predicate which check whether the RX fifo has more dat + * + * @param[in] i_p_port - The UART DM port to check. + * + * @return 0 no more data in the rx fifo, 1 we have more data in + * the rx fifo. + */ +int +msm_uartdm_rx_ready(struct generic_uart_port* i_p_port) + +{ + return (msm_read(i_p_port, UART_DM_SR) & UART_DM_SR_RX_READY); +} + +EXPORT_SYMBOL(msm_uartdm_rx_ready); + + +/* +static void handle_delta_cts(struct generic_uart_port *port) +{ + msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR); + port->icount.cts++; + wake_up_interruptible(&port->info->delta_msr_wait); +} + +*/ +static irqreturn_t +msm_uartdm_irq(int irq, void *dev_id) +{ + struct generic_uart_port* p_port = dev_id; + struct msm_uart_port* p_msm_port = GEN_UART_TO_MSM(p_port); + unsigned int misr; + unsigned int sr; + unsigned int isr; + + //TODO: cleanup the logic a bit (locks, disable interrupts etc). +// spin_lock(&(p_port->lock)); + misr = msm_read(p_port, UART_DM_MISR); + isr = msm_read(p_port, UART_DM_ISR); + sr = msm_read(p_port, UART_DM_SR); + + MSM_UARTDM_DEBUG("%s enter misr0x%x, isr0x%x, sr0x%x\n", __FUNCTION__,misr, isr, sr); + msm_write(p_port, 0, UART_DM_IMR); /* disable interrupt */ + if (misr & UART_DM_IMR_RX_LEV) { + handle_rx_level(p_port); + } + if (misr & UART_DM_IMR_RX_STALE) { + handle_rx_stale(p_port); + } + if (misr & UART_DM_IMR_TX_LEV) { + handle_tx_level(p_port); + } + if (misr & UART_DM_IMR_TX_RDY) { + /* + * Clear TX-RDY interrupt. + */ + handle_tx_rdy(p_port); + } + + if (misr & UART_DM_IMR_DELTA_CTS) { + // handle_delta_cts(port); + } + + msm_write(p_port, p_msm_port->imr, UART_DM_IMR); /* restore interrupt */ + +// spin_unlock(&(p_port->lock)); + misr = msm_read(p_port, UART_DM_MISR); + isr = msm_read(p_port, UART_DM_ISR); + sr = msm_read(p_port, UART_DM_SR); + + MSM_UARTDM_DEBUG("%s exit misr0x%x, isr0x%x, sr0x%x\n", __FUNCTION__,misr, isr, sr); + + return IRQ_HANDLED; +} +/** + * Function to control the Rx flow control. + * The MSM UARTDM support + * - on for HW RX flow + * - off for HW Rx Flow (which can be implemented as SW based flow) + * + * @param[in] i_p_port - The UART DM port to check. + * @param[in] flow_ctl - 1 for HW flow control, 0 for SW flow control. + * @param[in] flow_state - 1 flow line asserted , 0 flow line de-asserted + * Applicable in case of SW flow control only + * + * @return None + */ +static void +__msm_uartdm_set_rx_flow(struct generic_uart_port* i_p_port, uint32_t flow_ctl, uint32_t flow_state) +{ + unsigned int tmp; + struct msm_uart_port* p_msm_port = GEN_UART_TO_MSM(i_p_port); + + MSM_UARTDM_DEBUG("%s, port0x%x, flow %d\n", + __FUNCTION__, (uint32_t)i_p_port, flow_ctl); + + tmp = msm_read(i_p_port, UART_DM_MR1); + if (flow_ctl) { + tmp |= UART_MR1_RX_RDY_CTL; + msm_write(i_p_port, tmp, UART_DM_MR1); + + if ( i_p_port->p_board_rts_pin_deassert_cb ) { + i_p_port->p_board_rts_pin_deassert_cb(0); + } + } + else { + if ( flow_state ) { + /* + * The the RFR to low + */ + msm_write(i_p_port, UART_DM_CR_CMD_SET_RFR, UART_DM_CR); + + if ( i_p_port->p_board_rts_pin_deassert_cb ) { + i_p_port->p_board_rts_pin_deassert_cb(0); + } + } else { + /* + * The the RFR to high + */ + if ( i_p_port->p_board_rts_pin_deassert_cb ) { + i_p_port->p_board_rts_pin_deassert_cb(1); + } + + msm_write(i_p_port, UART_DM_CR_CMD_RESET_RFR, UART_DM_CR); + } + + tmp &= ~UART_MR1_RX_RDY_CTL; + msm_write(i_p_port, tmp, UART_DM_MR1); + } + + p_msm_port->rx_flow_ctl = flow_ctl; + p_msm_port->rx_flow_state = flow_state; + + +} + +void +msm_uartdm_set_rx_flow(struct generic_uart_port* i_p_port, uint32_t flow_ctl, uint32_t flow_state) +{ + unsigned long flags; + spin_lock_irqsave(&(i_p_port->lock), flags); + + __msm_uartdm_set_rx_flow(i_p_port, flow_ctl, flow_state); + + spin_unlock_irqrestore(&(i_p_port->lock), flags); +} +EXPORT_SYMBOL(msm_uartdm_set_rx_flow); + +/** + * Function to control the Tx flow control. + * The MSM UARTDM support + * - on for HW flow + * - off for HW Flow + * + * @param[in] i_p_port - The UART DM port. + * @param[in] flow_ctl - 1 for HW flow control, 0 for SW flow control. + * @return None + */ +static void +__msm_uartdm_set_tx_flow(struct generic_uart_port* i_p_port, uint32_t flow_ctl) +{ + uint32_t tmp; + struct msm_uart_port* p_msm_port = GEN_UART_TO_MSM(i_p_port); + + MSM_UARTDM_DEBUG("%s, port0x%x, flow %d\n", + __FUNCTION__, (uint32_t)i_p_port, flow_ctl); + + tmp = msm_read(i_p_port, UART_DM_MR1); + + if (flow_ctl) { + tmp |= UART_MR1_CTS_CTL; + msm_write(i_p_port, tmp, UART_DM_MR1); + } + else { + tmp &= ~UART_MR1_CTS_CTL; + msm_write(i_p_port, tmp, UART_DM_MR1); + } + + p_msm_port->tx_flow_ctl = flow_ctl; +} + +void +msm_uartdm_set_tx_flow(struct generic_uart_port* i_p_port, uint32_t flow_ctl) +{ + unsigned long flags; + + spin_lock_irqsave(&(i_p_port->lock), flags); + + __msm_uartdm_set_tx_flow(i_p_port, flow_ctl); + + spin_unlock_irqrestore(&(i_p_port->lock), flags); +} +EXPORT_SYMBOL(msm_uartdm_set_tx_flow); + +/** + * Function to control the parity (NONE, ODD or EVEN) + * + * @param[in] i_p_port - The UART DM port. + * @param[in] parity - 1 for HW flow control, 0 for no flow control. + * + * @return None + */ +void +msm_uartdm_set_parity(struct generic_uart_port* i_p_port, + msm_uartdm_parity_t parity) +{ + uint32_t data; + unsigned long flags; + unsigned int tmp; + + MSM_UARTDM_DEBUG("%s, port0x%x, parity %d\n", + __FUNCTION__, (uint32_t)i_p_port, parity); + + switch(parity) { + case MSM_UARTDM_PARITY_NONE: + data = UART_DM_MR2_PARITY_MODE_NONE; + break; + case MSM_UARTDM_PARITY_EVEN: + data = UART_DM_MR2_PARITY_MODE_EVEN; + break; + case MSM_UARTDM_PARITY_ODD: + data = UART_DM_MR2_PARITY_MODE_ODD; + break; + default: + goto Done; + break; + } + spin_lock_irqsave(&(i_p_port->lock), flags); + + tmp = msm_read(i_p_port, UART_DM_MR2); + tmp &= ~UART_DM_MR2_PARITY_MODE_MASK; + tmp |= data; + msm_write(i_p_port, tmp, UART_DM_MR2); + i_p_port->parity_data = data; + + spin_unlock_irqrestore(&(i_p_port->lock), flags); +Done: + return; +} +EXPORT_SYMBOL(msm_uartdm_set_parity); + + +/* +static void msm_break_ctl(struct generic_uart_port *port, int break_ctl) +{ + if (break_ctl) + msm_write(port, UART_DM_CR_CMD_START_BREAK, UART_DM_CR); + else + msm_write(port, UART_DM_CR_CMD_STOP_BREAK, UART_DM_CR); +} + +*/ +/** + * Helper function to dump the content of a uart port structure + * + * @param[in] - i_p_port - the port to print. + * + * @return - None. + */ +static void +_uartdm_print_port(struct generic_uart_port* i_p_port, int line_num) +{ +return; + MSM_UARTDM_DEBUG("%s: %s, Dumping port 0x%x at line %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (int)i_p_port, + line_num); + MSM_UARTDM_DEBUG("name %s\n", &(i_p_port->name[0])); + MSM_UARTDM_DEBUG("clk name %s\n", i_p_port->p_clk_name); + MSM_UARTDM_DEBUG("clk_rate %d\n", i_p_port->clk_rate); + MSM_UARTDM_DEBUG("irq %d\n", i_p_port->irq); + MSM_UARTDM_DEBUG("id %d\n", i_p_port->id); + MSM_UARTDM_DEBUG("mapbase 0x%x\n",i_p_port->mapbase); + MSM_UARTDM_DEBUG("mem_size 0x%x, %d\n", i_p_port->mem_size, i_p_port->mem_size); + MSM_UARTDM_DEBUG("p_membase 0x%x\n",(int)i_p_port->p_membase); +} + +int +msm_uartdm_init_dma(struct generic_uart_port *i_p_port) +{ + int ret = 0; + MSM_UARTDM_ENTER(); + + if ( RX_MODE_DM(i_p_port)) { + + i_p_port->rx_dm.command_ptr = (dmov_box *) + dma_alloc_coherent(NULL, + sizeof(dmov_box), + &(i_p_port->rx_dm.command_ptr_phys), + GFP_KERNEL); + + + i_p_port->rx_dm.command_ptr_ptr = (u32 *)dma_alloc_coherent(NULL, + sizeof(u32), + &(i_p_port->rx_dm.command_ptr_ptr_phys), + GFP_KERNEL); + + i_p_port->rx_dm.command_ptr->cmd = CMD_LC | CMD_SRC_CRCI( i_p_port->dma_rx_crci ) | CMD_MODE_BOX; + + i_p_port->rx_dm.command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16) | (MSM_UARTDM_BURST_SIZE); + + i_p_port->rx_dm.command_ptr->row_offset = MSM_UARTDM_BURST_SIZE; + + i_p_port->rx_dm.command_ptr->src_row_addr = (unsigned int)i_p_port->mapbase + UART_DM_RF; + + i_p_port->rx_xfer.complete_func = handle_rx_dm; + } + MSM_UARTDM_EXIT(); + return ret; +} + +int +msm_uartdm_destroy_dma(struct generic_uart_port *i_p_port) +{ + int ret = 0; + + MSM_UARTDM_ENTER(); + + if (RX_MODE_DM(i_p_port)) { + + //TODO Should we flush the dm first ? and wait for completion ? + if ( i_p_port->rx_dm.command_ptr ) { + dma_free_coherent(NULL, sizeof(dmov_box), i_p_port->rx_dm.command_ptr, i_p_port->rx_dm.command_ptr_phys); + } + + if ( i_p_port->rx_dm.command_ptr_ptr ) { + dma_free_coherent(NULL, sizeof(u32), i_p_port->rx_dm.command_ptr_ptr, i_p_port->rx_dm.command_ptr_ptr_phys); + } + + i_p_port->rx_dm.command_ptr = NULL; + i_p_port->rx_dm.command_ptr_ptr = NULL; + } + + MSM_UARTDM_EXIT(); + return ret; +} + +/** +* +* Open the UART port, mark it as "busy" any subsequent open request to the same +* UART port will fail, unless the port has been closed. +* The function allocate a control strucutre and mark it as taken, registers +* callbacks as specified in the input parameters. +* +* @param[in] i_p_config - The UART port configure. +* @param[out] o_pp_port - Pointer to pointer container to be filled with +* pointer to the newly allocate port. +* +* +* @return 0 for success -1 otherwise. +* +* @Note This function DOES NOT reset/configure the UART HW, it only deals with +* managing the data-structures used for handling the UART port. +*/ + +int +msm_uartdm_port_open( struct generic_uart_config* i_p_config, + struct generic_uart_port** o_pp_port) +{ + int ret = 0; + int i; + int line_num; + struct generic_uart_port* p_port = NULL; + + MSM_UARTDM_ENTER(); + + /* + * Basic sanity check + */ + if (NULL == o_pp_port) { + ret = -EINVAL; + line_num = __LINE__; + goto uartdm_open_err; + } + + + /* TODO: amir, Add spinlock to msmuartdm layer to protect global data structures.*/ + + /* + * See if the uart port is vacant and if so can a pointer to its + * control structure + */ + + for (i = 0; i < UARTDM_NUM_PORTS; i++) { + if ((0 == ports_db[i].used)&& (ports_db[i].port.id == i_p_config->port_id)) { + ports_db[i].used = 1; + p_port = &(ports_db[i].port); + break; + } + } + + /* + * Check if we couldn't find free port and bail out in case we didn't. + */ + if (NULL == p_port) { + ret = -EBUSY; + line_num = __LINE__; + goto uartdm_open_err; + } + + /* + * Init spinlock + */ + spin_lock_init(&(p_port->lock)); + + /* + * Clear the callback zone. + */ + p_port->p_rx_level_callback = NULL; + p_port->p_rx_level_data = NULL; + p_port->p_rx_stale_callback = NULL; + p_port->p_rx_stale_data = NULL; + p_port->p_tx_level_callback = NULL; + p_port->p_tx_level_data = NULL; + p_port->p_tx_rdy_callback = NULL; + p_port->p_tx_rdy_data = NULL; + + p_port->rx_flow_ctl = 1; + p_port->tx_flow_ctl = 1; + /* + * Default baud rate + */ + p_port->baud_rate = 115200; + p_port->clk_rate = 7372800; + + p_port->parity_data = UART_DM_MR2_PARITY_MODE_NONE; + + p_port->flags = i_p_config->flags; + + p_port->rx_latency = i_p_config->rx_latency; + + p_port->p_board_pin_mux_cb = i_p_config->p_board_pin_mux_cb; + p_port->p_board_rts_pin_deassert_cb = i_p_config->p_board_rts_pin_deassert_cb; + + /* + * Enable the UARTDM clock. + */ + clk_set_rate(p_port->p_clk, p_port->clk_rate); + clk_enable(p_port->p_clk); + if (p_port->p_pclk) + clk_enable(p_port->p_pclk); + + if (i_p_config->p_board_config_gsbi_cb) + i_p_config->p_board_config_gsbi_cb(); + + msm_uartdm_init_dma(p_port); + + /* + * At last, initialization are done, assign the newly allocated + * control structure to be returned via the pointer + */ + (*o_pp_port) = p_port; + + MSM_UARTDM_EXIT(); + return ret; + +uartdm_open_err: + MSM_UARTDM_ERR("%s: %s, error %d at line %d\n", + DRIVER_NAME, __PRETTY_FUNCTION__, ret, line_num); + + _uartdm_print_port(p_port, __LINE__); + MSM_UARTDM_EXIT(); + return ret; + +} +EXPORT_SYMBOL(msm_uartdm_port_open); + +/** +* +* Disable the UART port +* +* @param[in] io_p_port - The UART port to configure. +* +* @Note +*/ +static void __msm_uartdm_port_disable(struct generic_uart_port* io_p_port) +{ + msm_uartdm_disable_rx(io_p_port); + + //Un-mux the relevant pins + if (io_p_port->p_board_pin_mux_cb) { + io_p_port->p_board_pin_mux_cb(0); + } + + msm_uartdm_disable_tx(io_p_port); + /* + * Turn off the UART interrupts + */ + msm_write(io_p_port, 0, UART_DM_IMR); + + +} +/** +* +* Close the UART port, unregisters callbacks. +* +* @param[in] io_p_port - The UART port to configure. +* +* @return 0 for success -1 otherwise. +* +* @Note +*/ +int +msm_uartdm_port_close(struct generic_uart_port* io_p_port) +{ + int ret = 0; + struct uart_port_item* p_item; + unsigned long irq_flags; + + MSM_UARTDM_ENTER(); + /* + * TODO: amir, make sure to abort/cancel any ongoing activity on the uart port BEFORE marking it as vacant. + * export the disable-port logic to its own internal function so we can call it from other locations as well. + */ + if (NULL == io_p_port) { + ret = -EINVAL; + } + else { + /* + * lock the DB + */ + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + + p_item = container_of(io_p_port, struct uart_port_item, port); + +/* TODO:amir, ioremap and release mem was done at open/close, moved to probe */ +#if 0 + release_mem_region(io_p_port->mapbase, io_p_port->mem_size); + iounmap(io_p_port->p_membase); + io_p_port->p_membase = NULL; + io_p_port->mem_size = 0; +#endif + __msm_uartdm_port_disable(io_p_port); + + io_p_port->p_rx_level_callback = NULL; + io_p_port->p_rx_level_data = NULL; + io_p_port->p_rx_stale_callback = NULL; + io_p_port->p_rx_stale_data = NULL; + io_p_port->p_tx_level_callback = NULL; + io_p_port->p_tx_level_data = NULL; + io_p_port->p_tx_rdy_callback = NULL; + io_p_port->p_tx_rdy_data = NULL; + io_p_port->p_rx_dm_callback = NULL; + + io_p_port->p_board_pin_mux_cb = NULL; + io_p_port->p_board_rts_pin_deassert_cb = NULL; + + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + + free_irq(io_p_port->irq, io_p_port); + + msm_uartdm_destroy_dma(io_p_port); + + p_item->used = 0; + + clk_disable(io_p_port->p_clk); + if (io_p_port->p_pclk) { + clk_disable(io_p_port->p_pclk); + } + } + + MSM_UARTDM_EXIT(); + + return ret; +} +EXPORT_SYMBOL(msm_uartdm_port_close); + + +/** +* +* Suspend the UART port +* +* @param[in] io_p_port - The UART port to configure. +* +* @Note +*/ +int msm_uartdm_port_suspend(struct generic_uart_port* io_p_port) +{ + int ret = 0; + unsigned long irq_flags; + + MSM_UARTDM_ENTER(); + + if (NULL == io_p_port) { + ret = -EINVAL; + } + else { + /* + * lock the DB + */ + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + + __msm_uartdm_port_disable(io_p_port); + + clk_disable(io_p_port->p_clk); + if (io_p_port->p_pclk) { + clk_disable(io_p_port->p_pclk); + } + + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + } + + MSM_UARTDM_EXIT(); + + return ret; +} + +EXPORT_SYMBOL(msm_uartdm_port_suspend); + +/** +* +* Initialize the UART port, this function is configuring the UART related HW +* subscribe to IRQs ( if needed ) , configure register and all the good stuff that makes +* the UART tick... +* The settings that will be used (e.g. baud rate etc) are those that were +* configured before calling this function, if parameter is not set, the +* default value will be used. +* It is strongly advised not to rely on default values and configure the +* parameters to the desired value.... +* +* @param[in] io_p_port - The UART port control structure. +* parity - UART parity +* need_irq - flags whether to request irq or not +* enable_rx_tx - flags whether enable rx and tx or not +* +* @return 0 for success -1 otherwise. +* +*/ +static int __msm_uartdm_port_init(struct generic_uart_port* io_p_port) +{ + int ret = 0; + u32 rfr_level; + unsigned int data; + struct msm_uart_port* p_msm_port = GEN_UART_TO_MSM(io_p_port); + + MSM_UARTDM_ENTER(); + + + /* + * Turn off the UART interrupts + */ + msm_write(io_p_port, 0, UART_DM_IMR); + + //Mux the relevant pins as functional + if (io_p_port->p_board_pin_mux_cb) { + io_p_port->p_board_pin_mux_cb(1); + } + + /* + * Set the UART speed + */ + __msm_uartdm_set_baud_rate(io_p_port, io_p_port->baud_rate); + + /* Reset UART */ + /* TODO: amir - move to a function to do more generic configuraion */ + msm_write(io_p_port, + UART_DM_MR2_BITS_PER_CHAR_8 | UART_DM_MR2_STOP_BIT_LEN_ONE | io_p_port->parity_data, + UART_DM_MR2); /* 8N1 */ + + /* Configure RFR for proper flow control */ + if (likely(io_p_port->rx_fifo_size > 12)) { + rfr_level = io_p_port->rx_fifo_size - 12; + } + else { + rfr_level = io_p_port->rx_fifo_size; + } + + /* set automatic RFR level */ + data = msm_read(io_p_port, UART_DM_MR1); + data &= ~UART_DM_MR1_AUTO_RFR_LEVEL1; + data &= ~UART_DM_MR1_AUTO_RFR_LEVEL0; + data |= UART_DM_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2); + data |= UART_DM_MR1_AUTO_RFR_LEVEL0 & rfr_level; + msm_write(io_p_port, data, UART_DM_MR1); + + /* Set flow control */ + __msm_uartdm_set_rx_flow(io_p_port, p_msm_port->rx_flow_ctl, p_msm_port->rx_flow_state); + __msm_uartdm_set_tx_flow(io_p_port, p_msm_port->tx_flow_ctl); + + /* Configure stale settings */ + /* make sure that RXSTALE count is non-zero */ + data = msm_read(io_p_port, UART_DM_IPR); + if (unlikely(!data)) { + data |= UART_DM_IPR_STALE_TIMEOUT_LSB_MSK; + msm_write(io_p_port, data, UART_DM_IPR); + } + + if ( RX_MODE_DM(p_msm_port) ) { + msm_write( io_p_port, UART_DM_DMEN_RX_DM_EN, UART_DM_DMEN ); + } + /* Reset UART */ + __msm_uartdm_reset(io_p_port); + + MSM_UARTDM_EXIT(); + + return ret; +} + +static int __msm_uartdm_port_init_imr(struct generic_uart_port* io_p_port) +{ + int ret = 0; + struct msm_uart_port* p_msm_port = GEN_UART_TO_MSM(io_p_port); + + /* + * Configure IMR + */ + p_msm_port->imr = UART_DM_IMR_CURRENT_CTS ; + + msm_write(io_p_port, + p_msm_port->imr, + UART_DM_IMR); + + return ret; +} +/** +* +* Resumes the UART port, this function is configuring the UART related HW +* subscribe configure register and all the good stuff that makes +* the UART tick... +* The settings that will be used (e.g. baud rate etc) are those that were +* configured before calling this function, if parameter is not set, the +* default value will be used. +* It is strongly advised not to rely on default values and configure the +* parameters to the desired value.... +* +* @param[in] io_p_port - The UART port control structure. +* +* +* @return 0 for success -1 otherwise. +* +*/ +int msm_uartdm_port_resume(struct generic_uart_port* io_p_port) +{ + int ret = 0; + unsigned long irq_flags; + + MSM_UARTDM_ENTER(); + + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + + /* + * Enable the UARTDM clock. + */ + + clk_enable(io_p_port->p_clk); + if (io_p_port->p_pclk) + clk_enable(io_p_port->p_pclk); + + ret = __msm_uartdm_port_init(io_p_port); + + if (ret) { + MSM_UARTDM_DEBUG("%s: %s, to initialize udartdm port 0x%xn", + DRIVER_NAME, + __PRETTY_FUNCTION__, + ret); + goto end; + } + + ret = __msm_uartdm_port_init_imr(io_p_port); + +end: + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + + MSM_UARTDM_EXIT(); + + return ret; +} + +EXPORT_SYMBOL(msm_uartdm_port_resume); + +/** +* +* Initialize the UART port, this function is configuring the UART related HW +* subscribe to IRQs, configure register and all the good stuff that makes +* the UART tick... +* The settings that will be used (e.g. baud rate etc) are those that were +* configured before calling this function, if parameter is not set, the +* default value will be used. +* It is strongly advised not to rely on default values and configure the +* parameters to the desired value.... +* +* @param[in] io_p_port - The UART port control structure. +* +* @return 0 for success -1 otherwise. +* +*/ +int +msm_uartdm_port_init(struct generic_uart_port* io_p_port) +{ + int ret = 0; + unsigned long irq_flags; + + struct msm_uart_port* p_msm_port = GEN_UART_TO_MSM(io_p_port); + + MSM_UARTDM_ENTER(); + + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + + + ret = __msm_uartdm_port_init(io_p_port); + + if (ret) { + MSM_UARTDM_DEBUG("%s: %s, to initialize udartdm port 0x%xn", + DRIVER_NAME, + __PRETTY_FUNCTION__, + ret); + goto end; + } + + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + + /* Register for IRQ */ + ret = request_irq(io_p_port->irq, + msm_uartdm_irq, + IRQF_TRIGGER_HIGH, + p_msm_port->name, + io_p_port); + if (ret) { + MSM_UARTDM_DEBUG("%s: %s, failed to register IRQ err 0x%xn", + DRIVER_NAME, + __PRETTY_FUNCTION__, + ret); + goto end; + } + + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + + /* Enable Rx/Tx */ + msm_write( io_p_port, + UART_DM_CR_RX_ENABLE | UART_DM_CR_TX_ENABLE, + UART_DM_CR); + + ret = __msm_uartdm_port_init_imr(io_p_port); + +end: + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + + MSM_UARTDM_EXIT(); + return ret; +} + +static void +__msm_uartdm_set_stale_timeout(struct generic_uart_port* i_p_port) +{ + /* RX stale watermark */ + int watermark; + int latency = i_p_port->rx_latency; + + BUG_ON( latency == 0 ); + + watermark = UART_DM_IPR_STALE_TIMEOUT_LSB_MSK & latency; + watermark |= UART_DM_IPR_STALE_TIMEOUT_MSB_MSK & + ( (latency >> UART_DM_IPR_STALE_TIMEOUT_LSB_SIZE) << UART_DM_IPR_STALE_TIMEOUT_MSB_OFFSET ); + + msm_write(i_p_port, watermark, UART_DM_IPR); +} +/** +* +* Configure the UARTDM port to the requested baud-rate. +* +* @param[in] i_p_port - The UART port to configure. +* @param[in] baud - The requested baud rate. +* +* @return 0 for success -1 otherwise. +* +* @Note The baud rate is calculated as follows: +* fundamental_clk / CSR = 16 * baud_rate +* hence, once the baud_rate is specified, we should pick CSR value and +* fundamental clk that satisfies the above formula. +*/ +static void +__msm_uartdm_set_baud_rate(struct generic_uart_port* i_p_port, unsigned int baud) +{ + u32 csr; + u32 fund_clk_freq; + u32 watermark; + u32 read_clk; + + MSM_UARTDM_DEBUG("%s: %s, enter port_0x%x baud %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__ + ,(unsigned int)i_p_port, + baud); + + i_p_port->baud_rate = baud; + + switch (baud) { + case 4000000: + fund_clk_freq = 64000000; + csr = UART_DM_CSR_RX_DIV_1|UART_DM_CSR_TX_DIV_1; + break; + case 3686400: + /* Set the fund_clk in hz */ + fund_clk_freq = 58982400; + csr = UART_DM_CSR_RX_DIV_1|UART_DM_CSR_TX_DIV_1; + break; + case 3000000: + fund_clk_freq = 48000000; + csr = UART_DM_CSR_RX_DIV_1|UART_DM_CSR_TX_DIV_1; + break; + case 1843200: + /* Set the fund_clk in hz */ + fund_clk_freq = 58982400; + csr = UART_DM_CSR_RX_DIV_2 | UART_DM_CSR_TX_DIV_2; + break; + case 1228800: + /* Set the fund_clk in hz */ + fund_clk_freq = 58982400; + csr = UART_DM_CSR_RX_DIV_3 | UART_DM_CSR_TX_DIV_3; + break; + case 921600: + /* Set the fund_clk in hz */ + fund_clk_freq = 58982400; + csr = UART_DM_CSR_RX_DIV_4 | UART_DM_CSR_TX_DIV_4; + break; + case 614400: + /* Set the fund_clk in hz */ + fund_clk_freq = 58982400; + csr = UART_DM_CSR_RX_DIV_6 | UART_DM_CSR_TX_DIV_6; + break; + case 460800: + /* Set the fund_clk in hz */ + fund_clk_freq = 58982400; + csr = UART_DM_CSR_RX_DIV_8 | UART_DM_CSR_TX_DIV_8; + break; + case 115200: + /* Set the fund_clk in hz */ + fund_clk_freq = 7372800; + csr = UART_DM_CSR_RX_DIV_4 | UART_DM_CSR_TX_DIV_4; + break; + default: + MSM_UARTDM_ERR("%s: %s, invalid baud rate specified %d, using default\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + baud); + + /* Default to 115200 - Set the fund_clk in hz */ + i_p_port->baud_rate = 115200; + fund_clk_freq = 7372800; + csr = UART_DM_CSR_RX_DIV_4 | UART_DM_CSR_TX_DIV_4; + break; + } + clk_set_rate(i_p_port->p_clk, fund_clk_freq); + + read_clk = clk_get_rate(i_p_port->p_clk); + if (fund_clk_freq != read_clk) { + MSM_UARTDM_ERR("%s, error, read_clk(%d), fund_clk_freq(%d)\n", + __PRETTY_FUNCTION__, read_clk, fund_clk_freq); + } + /* + * Make sure to update the clk rate in the port structure. + */ + i_p_port->clk_rate = read_clk; + + msm_write(i_p_port, csr, UART_DM_CSR); + + __msm_uartdm_set_stale_timeout(i_p_port); + + /* set RX watermark */ + watermark = 32;//(i_p_port->rx_fifo_size * 3) / 16; + msm_write(i_p_port, watermark, UART_DM_RFWR); + + /* set TX watermark */ + /* TODO: amir this is nice, find better value for Tx watermark if possible */ + msm_write(i_p_port, 32, UART_DM_TFWR); + + MSM_UARTDM_EXIT(); +} + + +/** +* +* Configure the UARTDM port to the requested baud-rate. +* +* @param[in] i_p_port - The UART port to configure. +* @param[in] baud - The requested baud rate. +* +* @return 0 for success -1/-ErrCode otherwise. +* +*/ +int +msm_uartdm_set_baud_rate(struct generic_uart_port* i_p_port, unsigned int baud) +{ + int ret = 0; + struct msm_uart_port* p_msm_port= GEN_UART_TO_MSM(i_p_port); + + unsigned long flags; + + MSM_UARTDM_ENTER(); + + if (NULL != i_p_port) { + spin_lock_irqsave(&(i_p_port->lock), flags); + /* + * Turn off the UART interrupts + */ + msm_write(i_p_port, 0, UART_DM_IMR); + + __msm_uartdm_set_baud_rate(i_p_port, baud); + + /* Reset UART */ + /* TODO: amir - move to a function to do more generic configuraion */ + msm_write(i_p_port, + UART_DM_MR2_BITS_PER_CHAR_8 | UART_DM_MR2_STOP_BIT_LEN_ONE | i_p_port->parity_data, + UART_DM_MR2); /* 8N1 */ + + msm_write(i_p_port, + p_msm_port->imr, + UART_DM_IMR); + + if ( RX_MODE_DM(p_msm_port)) { + msm_write( i_p_port, UART_DM_DMEN_RX_DM_EN, UART_DM_DMEN ); + } + + /* Reset UART */ + __msm_uartdm_reset(i_p_port); + + /* Enable Rx/Tx */ + msm_write(i_p_port, + UART_DM_CR_RX_ENABLE | UART_DM_CR_TX_ENABLE, + UART_DM_CR); + + spin_unlock_irqrestore(&(i_p_port->lock), flags); + } + else { + ret = -EINVAL; + MSM_UARTDM_ERR("%s: %s, invalid port ID 0x%x\n", + DRIVER_NAME, __FUNCTION__, (uint32_t)i_p_port); + } + + MSM_UARTDM_EXIT(); + return ret; +} +EXPORT_SYMBOL(msm_uartdm_set_baud_rate); + +/** +* +* Resets the UARTDM controller. +* +* @param[in] i_p_port - Pointer to a generic UART structure that +* corresponds to the port to reset. +* +* @return 0 for success -1 otherwise. +* +*/ +static void +__msm_uartdm_reset(struct generic_uart_port* i_p_port) +{ + struct msm_uart_port* p_msm_port = GEN_UART_TO_MSM(i_p_port); + + MSM_UARTDM_DEBUG("%s: %s enter, port 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)i_p_port); + + msm_write(i_p_port, UART_DM_CR_CMD_RESET_RX, UART_DM_CR); + /* Restore rx flow control */ + __msm_uartdm_set_rx_flow(i_p_port, p_msm_port->rx_flow_ctl, p_msm_port->rx_flow_state); + + msm_write(i_p_port, UART_DM_CR_CMD_RESET_TX, UART_DM_CR); + /* Restore tx flow control */ + __msm_uartdm_set_tx_flow(i_p_port, p_msm_port->tx_flow_ctl); + + msm_write(i_p_port, UART_DM_CR_CMD_RESET_ERR, UART_DM_CR); + msm_write(i_p_port, UART_DM_CR_CMD_RESET_BCI, UART_DM_CR); + msm_write(i_p_port, UART_DM_CR_CMD_CLR_CTS, UART_DM_CR); + msm_write(i_p_port, UART_DM_CR_CMD_CLR_STALE, UART_DM_CR); + msm_write(i_p_port, UART_DM_CR_CMD_CLR_TX_ERR, UART_DM_CR); + + + msm_write(i_p_port, UART_DM_CR_CMD_CLR_TX_ERR, UART_DM_CR); + msm_write(i_p_port, UART_DM_CR_CMD_CLR_TX_DONE, UART_DM_CR); + + MSM_UARTDM_EXIT(); +} + +#if 0 +static void +msm_uartdm_reset(struct generic_uart_port* i_p_port) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&(i_p_port->lock), irq_flags); + + __msm_uartdm_reset(i_p_port); + + spin_unlock_irqrestore(&(i_p_port->lock), irq_flags); +} +#endif + +void +msm_uartdm_config_write_size(struct generic_uart_port* i_p_port, int num_bytes) +{ + MSM_UARTDM_ENTER(); + msm_write(i_p_port, num_bytes, UART_DM_NUM_CHARS_FOR_TX); + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_config_write_size); + +void +msm_uartdm_config_read_size(struct generic_uart_port* i_p_port, int num_bytes) +{ + MSM_UARTDM_ENTER(); + /* + * Clear stale event + */ + msm_write(i_p_port, UART_DM_CR_CMD_CLR_STALE, UART_DM_CR); + msm_write(i_p_port, num_bytes, UART_DM_DMRX); + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_config_read_size); + +int +msm_uartdm_get_received_byte_cnt(struct generic_uart_port* i_p_port) +{ + int ret; + MSM_UARTDM_ENTER(); + ret = msm_read(i_p_port, UART_DM_RX_TOTAL_SNAP); + MSM_UARTDM_DEBUG("%s: %s, exit, port 0x%x, rx bytes 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)i_p_port, + ret); + MSM_UARTDM_EXIT(); + return ret; +} +EXPORT_SYMBOL(msm_uartdm_get_received_byte_cnt); + +int +msm_uartdm_rx_dm_config(struct generic_uart_port* i_p_port, uint32_t dst_phys_addr, size_t read_size ) +{ + int ret = 0; + struct msm_dmov_cmd *rx_xfer_ptr; + + MSM_UARTDM_ENTER(); + + BUG_ON(!i_p_port); + i_p_port->rx_dm.command_ptr->num_rows = ((read_size >> 4) << 16) | (read_size >> 4); + + i_p_port->rx_dm.command_ptr->dst_row_addr = dst_phys_addr; + + //BUG_ON(p_context->p_rx_buffer->write_index != 0 ); + + *(i_p_port->rx_dm.command_ptr_ptr) = CMD_PTR_LP | DMOV_CMD_ADDR(i_p_port->rx_dm.command_ptr_phys); + + rx_xfer_ptr = &(i_p_port->rx_xfer); + + rx_xfer_ptr->cmdptr = DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(i_p_port->rx_dm.command_ptr_ptr_phys); + + msm_dmov_enqueue_cmd(i_p_port->dma_rx_channel, rx_xfer_ptr); + + msm_uartdm_config_read_size( i_p_port, read_size); + + MSM_UARTDM_EXIT(); + return ret; +} +EXPORT_SYMBOL(msm_uartdm_rx_dm_config); + +void +msm_uartdm_rx_dm_flush(struct generic_uart_port* i_p_port) +{ + MSM_UARTDM_ENTER(); + + msm_dmov_flush(i_p_port->dma_rx_channel); + + MSM_UARTDM_EXIT(); +} +EXPORT_SYMBOL(msm_uartdm_rx_dm_flush); + +int +msm_uartdm_get_rx_fifo_fullness(struct generic_uart_port* i_p_port, int *o_p_packing_bytes) +{ + int ret; + + MSM_UARTDM_ENTER(); + + ret = msm_read(i_p_port, UART_DM_RXFS); + + if ( o_p_packing_bytes ) { + *o_p_packing_bytes = (ret & UART_DM_RX_BUFFER_STATE_MASK) >> UART_DM_RX_BUFFER_STATE_SHIFT; + } + + ret = (ret & UART_DM_RX_FIFO_STATE_LSB) | ((ret & UART_DM_RX_FIFO_STATE_MSB) >> 2); + + + MSM_UARTDM_EXIT(); + return ret; +} +EXPORT_SYMBOL(msm_uartdm_get_rx_fifo_fullness); +void +msm_uartdm_send_dword(struct generic_uart_port* i_p_port, unsigned int data) +{ + //TODO: see if we can remove the check... +// while (!(msm_read(i_p_port, UART_DM_SR) & UART_DM_SR_TX_READY)) +// ; + msm_write(i_p_port, data, UART_DM_TF); +} +EXPORT_SYMBOL(msm_uartdm_send_dword); + +unsigned int +msm_uartdm_get_dword(struct generic_uart_port* i_p_port) +{ + if (!(msm_read(i_p_port, UART_DM_SR) & UART_DM_SR_RX_READY)) { + return -1; + } + return msm_read(i_p_port, UART_DM_RF); +} +EXPORT_SYMBOL(msm_uartdm_get_dword); + +/** +* +* Sets the callback function to be called in case that tx-level event has occured. +* This means that the level in the TX FIFO is below the pre-configured threashold. +* +* @param[in][out] io_p_port - The UART port to configure. +* @param[in] callback - callback function. +* +* @return 0 for success, -EINVAL for invalid input and -EPERM otherwise. +* +* @Note Currently we support only one callback function per event at any +* given point in time. Note that passing NULL as callback parameter will +* clear the previous callback. +* Calling this function with non-NULL 2 consecutive times without +* clearing the callback function in between will cause the +* function to fail. +* +*/ +int +msm_uartdm_set_tx_level_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata) +{ + int ret = 0; + unsigned long irq_flags; + + MSM_UARTDM_DEBUG("%s: %s, enter port 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port); + + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + if (NULL == io_p_port) { + MSM_UARTDM_ERR("%s, invalid port handle", __PRETTY_FUNCTION__); + ret = -EINVAL; + } + else if ((NULL != pcallback) && + (NULL != io_p_port->p_tx_level_callback) && + (NULL != io_p_port->p_tx_level_data)) { + MSM_UARTDM_ERR("%s, setting cbk while another cbk is valid is not allowed", + __PRETTY_FUNCTION__); + ret = -EPERM; + } + else { + io_p_port->p_tx_level_callback = pcallback; + io_p_port->p_tx_level_data = pdata; + } + + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + MSM_UARTDM_DEBUG("%s: %s, exit, port 0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port, + ret); + + return ret; +} + +EXPORT_SYMBOL(msm_uartdm_set_tx_level_cbk); + +/** +* +* Sets the callback function to be called in case that tx-ready event has occured. +* This means that the level in the TX FIFO is empty or if we sent the number of +* characters that were prog before the write transaction. +* +* @param[in][out] io_p_port - The UART port to configure. +* @param[in] callback - callback function. +* +* @return 0 for success, -EINVAL for invalid input and -EPERM otherwise. +* +* @Note Currently we support only one callback function per event at any +* given point in time. Note that passing NULL as callback parameter will +* clear the previous callback. +* Calling this function with non-NULL 2 consecutive times without +* clearing the callback function in between will cause the +* function to fail. +* +*/ +int +msm_uartdm_set_tx_rdy_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata) +{ + int ret = 0; + unsigned long irq_flags; + + MSM_UARTDM_DEBUG("%s: %s, enter port 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port); + + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + if (NULL == io_p_port) { + MSM_UARTDM_ERR("%s, invalid port handle", __PRETTY_FUNCTION__); + ret = -EINVAL; + } + else if ((NULL != pcallback) && + (NULL != io_p_port->p_tx_rdy_callback) && + (NULL != io_p_port->p_tx_rdy_data)) { + MSM_UARTDM_ERR("%s, setting cbk while another cbk is valid is not allowed", + __PRETTY_FUNCTION__); + ret = -EPERM; + } + else { + io_p_port->p_tx_rdy_callback = pcallback; + io_p_port->p_tx_rdy_data = pdata; + } + + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + MSM_UARTDM_DEBUG("%s: %s, exit, port 0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port, + ret); + + return ret; +} + +EXPORT_SYMBOL(msm_uartdm_set_tx_rdy_cbk); + +/** +* +* Sets the callback function to be called in case that rx level +* event has occured. +* +* @param[in][out] io_p_port - The UART port to configure. +* @param[in] callback - callback function. +* +* @return 0 for success, -EINVAL for invalid input and -EPERM otherwise. +* +* @Note Currently we support only one callback function per event at any +* given point in time. Note that passing NULL as callback parameter will +* clear the previous callback. +* Calling this function with non-NULL 2 consecutive times without +* clearing the callback function in between will cause the +* function to fail. +* +*/ +int +msm_uartdm_set_rx_level_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata) +{ + int ret = 0; + unsigned long irq_flags; + + MSM_UARTDM_DEBUG("%s: %s, enter port 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port); + + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + if (NULL == io_p_port) { + MSM_UARTDM_ERR("%s, invalid port handle", __PRETTY_FUNCTION__); + ret = -EINVAL; + } + else if ((NULL != pcallback) && + (NULL != io_p_port->p_rx_level_callback) && + (NULL != io_p_port->p_rx_level_data)) { + MSM_UARTDM_ERR("%s, setting cbk while another cbk is valid is not allowed", + __PRETTY_FUNCTION__); + ret = -EPERM; + } + else { + io_p_port->p_rx_level_callback = pcallback; + io_p_port->p_rx_level_data = pdata; + } + + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + MSM_UARTDM_DEBUG("%s: %s, exit, port 0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port, + ret); + + return ret; +} + +EXPORT_SYMBOL(msm_uartdm_set_rx_level_cbk); + +/** +* +* Sets the callback function to be called in case that rx data mover ( dma ) +* event has occured. +* +* @param[in][out] io_p_port - The UART port to configure. +* @param[in] callback - callback function. +* +* @return 0 for success, -EINVAL for invalid input and -EPERM otherwise. +* +* @Note Currently we support only one callback function per event at any +* given point in time. Note that passing NULL as callback parameter will +* clear the previous callback. +* Calling this function with non-NULL 2 consecutive times without +* clearing the callback function in between will cause the +* function to fail. +* +*/ +int +msm_uartdm_set_rx_dm_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata) +{ + int ret = 0; + unsigned long irq_flags; + + MSM_UARTDM_DEBUG("%s: %s, enter port 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port); + + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + if (NULL == io_p_port) { + MSM_UARTDM_ERR("%s, invalid port handle", __PRETTY_FUNCTION__); + ret = -EINVAL; + } + else if ((NULL != pcallback) && + (NULL != io_p_port->p_rx_dm_callback) && + (NULL != io_p_port->p_rx_dm_data)) { + MSM_UARTDM_ERR("%s, setting cbk while another cbk is valid is not allowed", + __PRETTY_FUNCTION__); + ret = -EPERM; + } + else { + io_p_port->p_rx_dm_callback = pcallback; + io_p_port->p_rx_dm_data = pdata; + } + + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + MSM_UARTDM_DEBUG("%s: %s, exit, port 0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port, + ret); + + return ret; +} + +EXPORT_SYMBOL(msm_uartdm_set_rx_dm_cbk); + +/** +* +* Sets the callback function to be called in case that rx stale event +* has occured. +* +* @param[in][out] io_p_port - The UART port to configure. +* @param[in] callback - callback function. +* +* @return 0 for success, -EINVAL for invalid input and -EPERM otherwise. +* +* @Note Currently we support only one callback function per event at any +* given point in time. Note that passing NULL as callback parameter will +* clear the previous callback. +* Calling this function with non-NULL 2 consecutive times without +* clearing the callback function in between will cause the +* function to fail. +* +*/ +int +msm_uartdm_set_rx_stale_cbk(struct generic_uart_port* io_p_port, + void (* pcallback)(void *pdata), + void* pdata) +{ + int ret = 0; + unsigned long irq_flags; + + MSM_UARTDM_DEBUG("%s: %s, enter port 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port); + + spin_lock_irqsave(&(io_p_port->lock), irq_flags); + if (NULL == io_p_port) { + MSM_UARTDM_ERR("%s, invalid port handle", __PRETTY_FUNCTION__); + ret = -EINVAL; + } + else if ((NULL != pcallback) && + (NULL != io_p_port->p_rx_stale_callback) && + (NULL != io_p_port->p_rx_stale_data)) { + MSM_UARTDM_ERR("%s, setting cbk while another cbk is valid is not allowed", + __PRETTY_FUNCTION__); + ret = -EPERM; + } + else { + io_p_port->p_rx_stale_callback = pcallback; + io_p_port->p_rx_stale_data = pdata; + } + + spin_unlock_irqrestore(&(io_p_port->lock), irq_flags); + MSM_UARTDM_DEBUG("%s: %s, exit, port 0x%x, ret %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_port, + ret); + + return ret; +} + +EXPORT_SYMBOL(msm_uartdm_set_rx_stale_cbk); + +static int __devinit +msm_uartdm_probe(struct platform_device *pdev) +{ + struct msm_uart_port* p_msm_port; + struct resource* p_resource; + struct generic_uart_port* p_port = NULL; + int ret = 0; + int line_number = 0; + int i; + + MSM_UARTDM_ENTER(); + + if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) { + ret = -ENXIO; + line_number = __LINE__; + goto probe_err_invalid_param; + } + + MSM_UARTDM_INFO("%s: detected port #%d\n", DRIVER_NAME, pdev->id); +//TODO: amir, add alternative function to get the UART control structure from the id + for (i = 0; i < UARTDM_NUM_PORTS; i++) { + if (pdev->id == ports_db[i].port.id) { + p_port = &(ports_db[i].port); + break; + } + } + + if (NULL == p_port) { + ret = -ENXIO; + line_number = __LINE__; + goto probe_err_invalid_param; + } + + snprintf(p_port->name, sizeof(p_port->name), + "msm_uartdm%d", p_port->id+1); + + p_port->p_device = &pdev->dev; + p_msm_port = GEN_UART_TO_MSM(p_port); + + p_msm_port->p_clk = clk_get(&pdev->dev, p_msm_port->p_clk_name); + if (IS_ERR(p_msm_port->p_clk)) { + printk("Cannot get clock %s\n", p_msm_port->p_clk_name); + ret = -ENXIO; + line_number = __LINE__; + goto probe_err_invalid_param; + } + + p_msm_port->p_pclk = clk_get(&pdev->dev, p_msm_port->p_pclk_name); + if (IS_ERR(p_msm_port->p_pclk)) + p_msm_port->p_pclk = NULL; + + + /* + * Get and save the memory mapped area for the UART + */ + p_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(NULL == p_resource)) { + ret = -ENXIO; + line_number = __LINE__; + goto probe_err_invalid_param; + } + p_port->mapbase = p_resource->start; + p_port->mem_size = p_resource->end - p_resource->start + 1; + + p_resource = platform_get_resource_byname( pdev, IORESOURCE_DMA, "uartdm_channels" ); + if (likely(NULL != p_resource)) { + p_port->dma_tx_channel = p_resource->start; + p_port->dma_rx_channel = p_resource->end; + } + + p_resource = platform_get_resource_byname( pdev, IORESOURCE_DMA, "uartdm_crci" ); + if (likely(NULL != p_resource)) { + p_port->dma_tx_crci = p_resource->start; + p_port->dma_rx_crci = p_resource->end; + } + + MSM_UARTDM_DEBUG("%s: %s mapbase start 0x%x, end 0x%x, size %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_resource->start, + (unsigned int)p_resource->end, + p_port->mem_size); + /* + * Map phy to virt UART memory mapped area + */ + if (unlikely(!request_mem_region(p_port->mapbase, p_port->mem_size, "uartdm"))) { + ret = -EBUSY; + line_number = __LINE__; + goto probe_err_invalid_param; + } +_uartdm_print_port(p_port,__LINE__); + p_port->p_membase = ioremap(p_port->mapbase, p_port->mem_size); + if (NULL == p_port->p_membase) { + release_mem_region(p_port->mapbase, p_port->mem_size); + ret = -EBUSY; + line_number = __LINE__; + goto probe_err_invalid_param; + } +_uartdm_print_port(p_port, __LINE__); + + p_port->irq = platform_get_irq(pdev, 0); + + MSM_UARTDM_DEBUG("%s: %s, irq %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + p_port->irq); + + if (unlikely(p_port->irq < 0)) { + ret = -ENXIO; + line_number = __LINE__; + goto probe_err_invalid_param; + } + + MSM_UARTDM_EXIT(); + + return ret; +probe_err_invalid_param: + MSM_UARTDM_ERR("%s, %s invalid param at line %d", + DRIVER_NAME, + __PRETTY_FUNCTION__, + line_number ); + ret = -ENXIO; + + MSM_UARTDM_EXIT(); + return ret; + + +} + +static int __devexit +msm_uartdm_remove(struct platform_device *pdev) +{ + struct msm_uart_port *msm_uart_port = NULL; + int i; + + if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) + return -ENXIO; + + for (i = 0; i < UARTDM_NUM_PORTS; i++) { + if (pdev->id == ports_db[i].port.id) { + msm_uart_port = GEN_UART_TO_MSM(&(ports_db[i].port)); + break; + } + } + + if (!msm_uart_port) { + return -ENODEV; + } + + clk_put(msm_uart_port->p_clk); + + if (msm_uart_port->p_pclk) { + clk_put(msm_uart_port->p_pclk); + } + + return 0; +} + + +static struct platform_driver msm_platform_driver = { + .probe = msm_uartdm_probe, + .remove = msm_uartdm_remove, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init msm_uartdm_init(void) +{ + int ret; + + MSM_UARTDM_ENTER(); + + ret = platform_driver_probe(&msm_platform_driver, msm_uartdm_probe); + if (unlikely(ret)) { + MSM_UARTDM_ERR("%s: failed initialization\n", DRIVER_NAME); + } + else { + MSM_UARTDM_INFO("%s: initialized\n", DRIVER_NAME); + } + MSM_UARTDM_EXIT(); + return ret; +} + +static void __exit msm_uartdm_exit(void) +{ + platform_driver_unregister(&msm_platform_driver); +} + +module_init(msm_uartdm_init); +module_exit(msm_uartdm_exit); + +MODULE_AUTHOR("Amir Frenkel "); +MODULE_DESCRIPTION("Driver for msm7x UART DM device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 87cdb0215bd..fef928281e9 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -817,4 +817,16 @@ config TOUCHSCREEN_CYTTSP_I2C To compile this driver as a module, choose M here: the module will be called cyttsp-i2c. + +config TOUCHSCREEN_CY8CTMA395 + tristate "Cypress TMA395 capacitive touchscreen controller" + default n + help + Say Y here if you have a CY8CTMA395 based touchscreen controller + + If unsure, say N. + + To comile this driver as a module, choose M here: the module + will be called cy8ctma395 + endif diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 1d67427e275..44407734781 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -69,3 +69,4 @@ obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o obj-$(CONFIG_TOUCHSCREEN_MSM_LEGACY) += msm_touch.o obj-$(CONFIG_TOUCHSCREEN_CY8C_TS) += cy8c_ts.o obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp-i2c.o +obj-$(CONFIG_TOUCHSCREEN_CY8CTMA395) += cy8ctma395.o diff --git a/drivers/input/touchscreen/cy8ctma395.c b/drivers/input/touchscreen/cy8ctma395.c new file mode 100644 index 00000000000..119361bab14 --- /dev/null +++ b/drivers/input/touchscreen/cy8ctma395.c @@ -0,0 +1,1189 @@ +//#define DEBUG + +#include +#include +#include +#include +#include +#include + +#define BLOCK_LEN 256 +#define ECC_LEN 32 +#define DEVICE_CONFIG_ECCEN 0x08 + +#define DATA_RECORD_ADDR 0x00000000 +#define DATA_RECORD_LEN 64 +#define ECC_RECORD_ADDR 0x80000000 +#define ECC_RECORD_LEN 64 +#define NVL_RECORD_ADDR 0x90000000 +#define NVL_RECORD_LEN 4 + +#define APACC_ADDR_WRITE 0x8b +#define APACC_DATA_READ 0x9f +#define APACC_DATA_WRITE 0xbb +#define DPACC_DATA_WRITE 0x99 +#define DPACC_DP_CONFIG_WRITE 0xa9 +#define DPACC_IDCODE_READ 0xa5 + +#define RESPONSE_OK 0x1 +#define RESPONSE_WAIT 0x2 +#define RESPONSE_FAULT 0x4 + +struct addr_data_pair { + u32 addr; + u32 data; +}; + +struct program_row { + const struct ihex_binrec *dat_rec; + u16 dat_rec_off; + const struct ihex_binrec *ecc_rec; + u16 ecc_rec_off; + const struct ihex_binrec *nvl_rec; + int ecc_enabled; + u32 sram_addr; + u32 temp[2]; + u32 nr; + u32 phub_ch_status_addr; + u32 phub_ch_status_data; + u32 phub_ch_basic_cfg_addr; + u32 phub_cfgmem_cfg0_addr; + u32 phub_cfgmem_cfg1_addr; + u32 phub_tdmem_orig_td0_addr; + u32 phub_tdmem_orig_td1_addr; + u32 phub_ch_action_addr; +}; + +struct cy8ctma395_device_data { + int last_swdio_bit; +}; + +static int swd_read_bit(struct device *dev, int fast) +{ + int bit; + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + gpio_set_value(pdat->swdck, 0); + if (!fast) udelay(1); + bit = gpio_get_value(pdat->swdio); + gpio_set_value(pdat->swdck, 1); + + return (bit); +} + +static void swd_write_bit(struct device *dev, int bit, int fast) +{ + struct cy8ctma395_device_data *dat = dev_get_drvdata(dev); + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + if (bit == dat->last_swdio_bit) { + gpio_set_value(pdat->swdck, 0); + gpio_set_value(pdat->swdck, 1); + } + + else { + gpio_set_value(pdat->swdck, 0); + gpio_set_value(pdat->swdio, dat->last_swdio_bit = bit); + if (!fast) udelay(1); + gpio_set_value(pdat->swdck, 1); + } +} + +static void swd_turnaround(struct device *dev, int out) +{ + struct cy8ctma395_device_data *dat = dev_get_drvdata(dev); + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + if (out) + gpio_direction_output(pdat->swdio, dat->last_swdio_bit = 1); + + else + gpio_direction_input(pdat->swdio); + + gpio_set_value(pdat->swdck, 0); + gpio_set_value(pdat->swdck, 1); +} + +static u8 swd_read_response(struct device *dev, int fast) +{ + u8 response; + + response = swd_read_bit(dev, fast); + response |= swd_read_bit(dev, fast) << 1; + response |= swd_read_bit(dev, fast) << 2; + + return (response); +} + +static u8 swd_read_byte(struct device *dev) +{ + u8 byte; + + byte = swd_read_bit(dev, 0); + byte |= swd_read_bit(dev, 0) << 1; + byte |= swd_read_bit(dev, 0) << 2; + byte |= swd_read_bit(dev, 0) << 3; + byte |= swd_read_bit(dev, 0) << 4; + byte |= swd_read_bit(dev, 0) << 5; + byte |= swd_read_bit(dev, 0) << 6; + byte |= swd_read_bit(dev, 0) << 7; + + return (byte); +} + +static void swd_write_byte(struct device *dev, u8 byte, int fast) +{ + swd_write_bit(dev, (byte >> 0) & 0x01, fast); + swd_write_bit(dev, (byte >> 1) & 0x01, fast); + swd_write_bit(dev, (byte >> 2) & 0x01, fast); + swd_write_bit(dev, (byte >> 3) & 0x01, fast); + swd_write_bit(dev, (byte >> 4) & 0x01, fast); + swd_write_bit(dev, (byte >> 5) & 0x01, fast); + swd_write_bit(dev, (byte >> 6) & 0x01, fast); + swd_write_bit(dev, (byte >> 7) & 0x01, fast); +} + +static int even_parity(u32 data) +{ + int parity = 0; + + for (; data; data >>= 1) + parity ^= data; + + return (parity & 0x1); +} + +static int swd_read_data(struct device *dev, u32 *data) +{ + int rc; + int parity; + + *data = swd_read_byte(dev); + *data |= (u32)swd_read_byte(dev) << 8; + *data |= (u32)swd_read_byte(dev) << 16; + *data |= (u32)swd_read_byte(dev) << 24; + parity = swd_read_bit(dev, 0); + + if (parity != even_parity(*data)) { + dev_err(dev, "swd data parity error, data=0x%08x parity=%x\n", + *data, parity); + rc = -EIO; + goto exit; + } + + rc = 0; +exit: + return (rc); +} + +static void swd_write_data(struct device *dev, u32 data, int fast) +{ + swd_write_byte(dev, data >> 0, fast); + swd_write_byte(dev, data >> 8, fast); + swd_write_byte(dev, data >> 16, fast); + swd_write_byte(dev, data >> 24, fast); + swd_write_bit(dev, even_parity(data), fast); +} + +static int swd_read(struct device *dev, u8 command, u32 *data) +{ + u8 response; + int rc; + int retries = 0; + struct cy8ctma395_platform_data *pdat = dev->platform_data; + +retry: + swd_write_byte(dev, command, 0); + swd_turnaround(dev, 0); + response = swd_read_response(dev, 0); + + rc = swd_read_data(dev, data); + if (rc < 0) + goto exit; + + swd_turnaround(dev, 1); + + if ((response == RESPONSE_WAIT) && (retries++ < pdat->swd_wait_retries)) + goto retry; + + if (response != RESPONSE_OK) { + dev_err(dev, "swd read failed, command=%02x response=%x\n", + command, response); + rc = -EIO; + goto exit; + } + + rc = 0; +exit: + return (rc); +} + +static u8 __swd_write(struct device *dev, u8 command, u32 data, int fast) +{ + u8 response; + + swd_write_byte(dev, command, fast); + swd_turnaround(dev, 0); + response = swd_read_response(dev, fast); + swd_turnaround(dev, 1); + swd_write_data(dev, data, fast); + + return (response); +} + +static int swd_write(struct device *dev, u8 command, u32 data) +{ + u8 response; + int rc; + int retries = 0; + struct cy8ctma395_platform_data *pdat = dev->platform_data; + +retry: + response = __swd_write(dev, command, data, 0); + + if ((response == RESPONSE_WAIT) && (retries++ < pdat->swd_wait_retries)) + goto retry; + + if (response != RESPONSE_OK) { + dev_err(dev, "swd write failed, command=%02x data=%08x response=%x\n", + command, data, response); + rc = -EIO; + goto exit; + } + + rc = 0; +exit: + return (rc); +} + +static int apacc_addr_write(struct device *dev, u32 addr) +{ + dev_dbg(dev, "apacc addr write [%08x]\n", addr); + + return (swd_write(dev, APACC_ADDR_WRITE, addr)); +} + +static int apacc_data_read(struct device *dev, u32 *data, int nr) +{ + int i; + int rc; + u32 unused; + + rc = swd_read(dev, APACC_DATA_READ, &unused); + if (rc < 0) + goto exit; + + dev_dbg(dev, "apacc data read (dummy) [%08x]\n", unused); + + for (i = 0; i < nr; i++) { + rc = swd_read(dev, APACC_DATA_READ, &data[i]); + if (rc < 0) + goto exit; + + dev_dbg(dev, "apacc data read [%08x]\n", data[i]); + } + +exit: + return (rc); +} + +static int apacc_addr_write_data_read(struct device *dev, u32 addr, u32 *data, + int nr) +{ + int rc; + + rc = apacc_addr_write(dev, addr); + if (rc < 0) + goto exit; + + rc = apacc_data_read(dev, data, nr); +exit: + return (rc); +} + +static inline int apacc_data_write(struct device *dev, u32 data) +{ + dev_dbg(dev, "apacc data write [%08x]\n", data); + + return (swd_write(dev, APACC_DATA_WRITE, data)); +} + +static int apacc_addr_data_write(struct device *dev, u32 addr, u32 data) +{ + int rc; + + rc = apacc_addr_write(dev, addr); + if (rc < 0) + goto exit; + + rc = apacc_data_write(dev, data); +exit: + return (rc); +} + +static int apacc_addr_data_write_seq(struct device *dev, + struct addr_data_pair *seq, int nr) +{ + int i; + int rc; + + for (i = 0; i < nr; i++) { + rc = apacc_addr_data_write(dev, seq[i].addr, seq[i].data); + if (rc < 0) + goto exit; + } + + rc = 0; +exit: + return (rc); +} + +static int port_acquire(struct device *dev, u32 *id, u8 *rev) +{ + int rc; + u32 data[2]; + struct cy8ctma395_device_data *dat = dev_get_drvdata(dev); + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + rc = pdat->swdck_request(1); + if (rc < 0) + goto request_swdck_failed; + + rc = pdat->swdio_request(1); + if (rc < 0) + goto request_swdio_failed; + + gpio_set_value(pdat->swdck, 1); + gpio_set_value(pdat->swdio, dat->last_swdio_bit = 1); + gpio_set_value(pdat->xres, 0); + usleep(pdat->xres_us); + + local_irq_disable(); + { + u8 response; + int retries = 0; + + gpio_set_value(pdat->xres, 1); +retry: + response = __swd_write(dev, DPACC_DATA_WRITE, 0x7B0C06DB, 1); + if (response != RESPONSE_OK) { + if (retries++ < pdat->port_acquire_retries) + goto retry; + + rc = -EIO; + goto enable; + } + + rc = apacc_addr_data_write(dev, 0x00050210, 0xEA7E30A9); + } + +enable: + local_irq_enable(); + + if (rc < 0) { + dev_err(dev, "failed to acquire port\n"); + goto acquire_failed; + } + + { + struct addr_data_pair seq[] = { + {0x00050220, 0x000000B3}, + {0x000046EA, 0x00000001}, + {0x000043A0, 0x000000BF}, + {0x00004200, 0x00000000}, + }; + + rc = apacc_addr_data_write_seq(dev, seq, ARRAY_SIZE(seq)); + if (rc < 0) + goto acquire_failed; + } + + dev_dbg(dev, "dpacc idcode read\n"); + rc = swd_read(dev, DPACC_IDCODE_READ, &data[0]); + if (rc < 0) + goto acquire_failed; + + rc = apacc_addr_write_data_read(dev, 0x000046EC, &data[1], 1); + if (rc < 0) + goto acquire_failed; + + if (id) + *id = data[0]; + + if (rev) + *rev = data[1]; + + return (0); + +acquire_failed: + gpio_set_value(pdat->xres, 0); + usleep(pdat->xres_us); + gpio_set_value(pdat->xres, 1); + (void)pdat->swdio_request(0); +request_swdio_failed: + (void)pdat->swdck_request(0); +request_swdck_failed: + + return (rc); +} + +static void port_release(struct device *dev) +{ + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + gpio_set_value(pdat->xres, 0); + usleep(pdat->xres_us); + gpio_set_value(pdat->xres, 1); + (void)pdat->swdio_request(0); + (void)pdat->swdck_request(0); +} + +static int poll_status_reg(struct device *dev, u8 expected, const char *step) +{ + int rc; +#ifdef DEBUG + u64 ms; +#endif /* DEBUG */ + u32 data; + struct timespec now; + struct timespec expiry; + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + ktime_get_ts(&expiry); + timespec_add_ns(&expiry, pdat->status_reg_timeout_ms * NSEC_PER_MSEC); + + for (;;) { + rc = apacc_addr_write_data_read(dev, 0x00004722, &data, 1); + if (rc < 0) + goto exit; + + /* only low byte is relevant in status reg */ + if ((data & 0xff) == expected) + break; + + ktime_get_ts(&now); + if (timespec_compare(&now, &expiry) >= 0) { + dev_err(dev, "timed out waiting for '%s'\n", step); + rc = -ETIME; + goto exit; + } + + msleep(jiffies_to_msecs(1)); + } + +#ifdef DEBUG + ktime_get_ts(&now); + timespec_add_ns(&now, NSEC_PER_SEC); + ms = timespec_to_ns(&now) - timespec_to_ns(&expiry); + do_div(ms, NSEC_PER_MSEC); + dev_dbg(dev, "'%s' time=%lldms\n", step, ms); +#endif /* DEBUG */ + + rc = 0; +exit: + return (rc); +} + +static int find_record(struct device *dev, const struct ihex_binrec *beg, + u32 addr, u16 len, const struct ihex_binrec **rec) +{ + int rc; + + for (*rec = beg; be32_to_cpu((*rec)->addr) != addr;) { + *rec = ihex_next_binrec(*rec); + if (!*rec) { + dev_err(dev, "no record with address 0x%08x\n", addr); + rc = -EINVAL; + goto exit; + } + } + + if (be16_to_cpu((*rec)->len) != len) { + dev_err(dev, "record 0x%08x is %hu bytes, expected %hu\n", addr, + be16_to_cpu((*rec)->len), len); + rc = -EINVAL; + goto exit; + } + + rc = 0; +exit: + return (rc); +} + +static int load_row(struct device *dev, const struct ihex_binrec **rec, + u16 *rec_off, u32 beg, u32 end) +{ + int rc; + u32 data; + + for (; beg < end; beg += 4) { + if (*rec_off >= be16_to_cpu((*rec)->len)) { + rc = find_record(dev, *rec, be32_to_cpu((*rec)->addr) + + be16_to_cpu((*rec)->len), + be16_to_cpu((*rec)->len), rec); + if (rc < 0) + goto exit; + + *rec_off = 0; + } + + data = (*rec)->data[*rec_off] << 0; + data |= (*rec)->data[*rec_off+1] << 8; + data |= (*rec)->data[*rec_off+2] << 16; + data |= (*rec)->data[*rec_off+3] << 24; + rc = apacc_addr_data_write(dev, beg+4, data); + if (rc < 0) + goto exit; + + *rec_off += 4; + } + + rc = 0; +exit: + return (rc); +} + +static int program_row(struct device *dev, struct program_row *row) +{ + int rc; + int len = BLOCK_LEN; + + rc = apacc_addr_data_write(dev, row->sram_addr, 0x0002D5B6); + if (rc < 0) + goto exit; + + rc = load_row(dev, &row->dat_rec, &row->dat_rec_off, row->sram_addr, + row->sram_addr + BLOCK_LEN); + if (rc < 0) + goto exit; + + if (!row->ecc_enabled) { + rc = load_row(dev, &row->ecc_rec, &row->ecc_rec_off, + row->sram_addr + BLOCK_LEN, + row->sram_addr + BLOCK_LEN + ECC_LEN); + if (rc < 0) + goto exit; + + len += ECC_LEN; + } + + { + struct addr_data_pair seq[] = { + {row->sram_addr + len + 0x4, 0xB6000000}, + {row->sram_addr + len + 0x8, 0x000007DA}, + {row->sram_addr + len + 0xC, + ((row->temp[1] & 0xff) << 16) + | ((row->temp[0] & 0xff) << 8) + | (row->nr & 0xff)}, + {row->phub_ch_status_addr, row->phub_ch_status_data}, + {row->phub_ch_basic_cfg_addr, 0x00000021}, + {row->phub_cfgmem_cfg0_addr, 0x00000080}, + {row->phub_cfgmem_cfg1_addr, 0x00000000}, + {row->phub_tdmem_orig_td0_addr, 0x01FF0000 + len + 0xf}, + {row->phub_tdmem_orig_td1_addr, 0x47200000 + row->sram_addr}, + }; + + rc = apacc_addr_data_write_seq(dev, seq, ARRAY_SIZE(seq)); + if (rc < 0) + goto exit; + } + + rc = poll_status_reg(dev, 0x2, "dma"); + if (rc < 0) + goto exit; + + rc = apacc_addr_data_write(dev, row->phub_ch_action_addr, 0x00000001); +exit: + if (rc < 0) + dev_err(dev, "failure programming row %u\n", row->nr); + + return (rc); +} + +static int checksum(struct device *dev, u16 *sum) +{ + int rc; + u32 data[4]; + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + { + struct addr_data_pair seq[] = { + {0x00004720, 0x000000B6}, + {0x00004720, 0x000000DF}, + {0x00004720, 0x0000000C}, + {0x00004720, 0x00000000}, + {0x00004720, 0x00000000}, + {0x00004720, 0x00000000}, + {0x00004720, 0x00000000}, + {0x00004720, pdat->nr_blocks - 1}, + }; + + rc = apacc_addr_data_write_seq(dev, seq, ARRAY_SIZE(seq)); + if (rc < 0) + goto exit; + } + + rc = poll_status_reg(dev, 0x1, "checksum"); + if (rc < 0) + goto exit; + + rc = apacc_addr_write_data_read(dev, 0x00004720, data, 4); + if (rc < 0) + goto exit; + + rc = poll_status_reg(dev, 0x2, "idle"); + if (rc < 0) + goto exit; + + *sum = ((data[2] & 0xff) << 8) | (data[3] & 0xff); +exit: + return (rc); +} + +static ssize_t cy8ctma395_attr_checksum_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u16 sum; + ssize_t rc; + + rc = port_acquire(dev, NULL, NULL); + if (rc < 0) + goto exit; + + rc = checksum(dev, &sum); + if (rc < 0) + goto release; + + rc = snprintf(buf, PAGE_SIZE, "%04x\n", sum); +release: + port_release(dev); +exit: + return (rc); +} + +static struct device_attribute cy8ctma395_attr_checksum = { + .attr = { + .name = "checksum", + .mode = S_IRUSR, + }, + .show = cy8ctma395_attr_checksum_show, +}; + +static ssize_t cy8ctma395_attr_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 rev; + u32 id; + ssize_t rc; + + rc = port_acquire(dev, &id, &rev); + if (rc < 0) + goto exit; + + rc = snprintf(buf, PAGE_SIZE, "%08x %02x\n", id, rev); + port_release(dev); +exit: + return (rc); +} + +static struct device_attribute cy8ctma395_attr_id = { + .attr = { + .name = "id", + .mode = S_IRUSR, + }, + .show = cy8ctma395_attr_id_show, +}; + +static int read_device_config(struct device *dev, u8 *regs) +{ + int i; + int rc; + u32 data[4]; + + { + struct addr_data_pair seq[] = { + {0x00005112, 0x00000000}, + {0x00005113, 0x00000004}, + {0x00005114, 0x00000000}, + {0x00005110, 0x00000004}, + }; + + rc = apacc_addr_data_write_seq(dev, seq, ARRAY_SIZE(seq)); + if (rc < 0) + goto exit; + } + + for (i = 0; i < 4; i++) { + struct addr_data_pair seq[] = { + {0x00004720, 0x000000B6}, + {0x00004720, 0x000000D6}, + {0x00004720, 0x00000003}, + {0x00004720, 0x00000080}, + {0x00004720, i}, + }; + + rc = apacc_addr_data_write_seq(dev, seq, ARRAY_SIZE(seq)); + if (rc < 0) + goto exit; + + rc = poll_status_reg(dev, 0x1, "device config"); + if (rc < 0) + goto exit; + + rc = apacc_addr_write_data_read(dev, 0x00004720, &data[i], 1); + if (rc < 0) + goto exit; + + regs[i] = data[i]; + + rc = poll_status_reg(dev, 0x2, "idle"); + if (rc < 0) + goto exit; + } + +exit: + return (rc); +} + +static int write_device_config(struct device *dev, u8 *regs) +{ + int i; + int rc; + + for (i = 0; i < 4; i++) { + struct addr_data_pair seq[] = { + {0x00004720, 0x000000B6}, + {0x00004720, 0x000000D3}, + {0x00004720, 0x00000000}, + {0x00004720, 0x00000080}, + {0x00004720, i}, + {0x00004720, regs[i]}, + }; + + rc = apacc_addr_data_write_seq(dev, seq, ARRAY_SIZE(seq)); + if (rc < 0) + goto exit; + + rc = poll_status_reg(dev, 0x2, "device config"); + if (rc < 0) + goto exit; + } + + { + struct addr_data_pair seq[] = { + {0x00004720, 0x000000B6}, + {0x00004720, 0x000000D9}, + {0x00004720, 0x00000006}, + {0x00004720, 0x00000080}, + }; + + rc = apacc_addr_data_write_seq(dev, seq, ARRAY_SIZE(seq)); + if (rc < 0) + goto exit; + } + + rc = poll_status_reg(dev, 0x2, "device config"); + if (rc < 0) + goto exit; + + exit: + return (rc); + +} + +static ssize_t cy8ctma395_attr_device_config_show( + struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 regs[4]; + ssize_t rc; + + rc = port_acquire(dev, NULL, NULL); + if (rc < 0) + goto exit; + + rc = read_device_config(dev, regs); + if (rc < 0) + goto release; + + rc = snprintf(buf, PAGE_SIZE, "%02x %02x %02x %02x\n", regs[0], regs[1], + regs[2], regs[3]); +release: + port_release(dev); +exit: + return (rc); +} + +static ssize_t cy8ctma395_attr_device_config_store( + struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u8 regs[4]; + ssize_t rc; + + rc = sscanf(buf, "%hhx %hhx %hhx %hhx", ®s[0], ®s[1], ®s[2], + ®s[3]); + if (rc < 4) { + rc = -EINVAL; + goto exit; + } + + rc = port_acquire(dev, NULL, NULL); + if (rc < 0) + goto exit; + + rc = write_device_config(dev, regs); + if (rc < 0) + goto release; + + rc = count; +release: + port_release(dev); +exit: + return (rc); +} + +static struct device_attribute cy8ctma395_attr_device_config = { + .attr = { + .name = "device_config", + .mode = S_IRUSR|S_IWUSR, + }, + .show = cy8ctma395_attr_device_config_show, + .store = cy8ctma395_attr_device_config_store, +}; + +static ssize_t cy8ctma395_attr_flash_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u8 rev; + u8 regs[4]; + int i; + u16 sum; + u32 id; + ssize_t rc; + struct program_row row; + const struct firmware *fw = NULL; + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + rc = request_ihex_firmware(&fw, buf, dev); + if (rc < 0) { + dev_err(dev, "error %d requesting firmware %s\n", rc, buf); + goto exit; + } + + row.dat_rec_off = 0; + rc = find_record(dev, (struct ihex_binrec *)fw->data, DATA_RECORD_ADDR, + DATA_RECORD_LEN, &row.dat_rec); + if (rc < 0) + goto release_firmware; + + row.ecc_rec_off = 0; + rc = find_record(dev, (struct ihex_binrec *)fw->data, ECC_RECORD_ADDR, + ECC_RECORD_LEN, &row.ecc_rec); + if (rc < 0) + goto release_firmware; + + rc = find_record(dev, (struct ihex_binrec *)fw->data, NVL_RECORD_ADDR, + NVL_RECORD_LEN, &row.nvl_rec); + if (rc < 0) + goto release_firmware; + + rc = port_acquire(dev, &id, &rev); + if (rc < 0) + goto release_firmware; + + dev_info(dev, "jtag id=%08x revision=%08x\n", id, rev); + + /* erase all */ + { + struct addr_data_pair seq[] = { + {0x00004720, 0x000000B6}, + {0x00004720, 0x000000DC}, + {0x00004720, 0x00000009}, + }; + + rc = apacc_addr_data_write_seq(dev, seq, ARRAY_SIZE(seq)); + if (rc < 0) + goto port_release; + } + + rc = poll_status_reg(dev, 0x2, "erase all"); + if (rc < 0) + goto port_release; + + /* read_device config */ + rc = read_device_config(dev, regs); + if (rc < 0) + goto port_release; + + /* check if we need to flash device_config by comparing */ + /* the NVL bytes from target device with those in hex file */ + if (memcmp(regs, row.nvl_rec->data, NVL_RECORD_LEN) != 0) { + rc = write_device_config(dev, (u8 *) row.nvl_rec->data); + if (rc < 0) + goto port_release; + } + + /* program */ + for (i = 0; i < 2; i++) { + struct addr_data_pair seq[] = { + {0x00004720, 0x000000B6}, + {0x00004720, 0x000000E1}, + {0x00004720, 0x0000000E}, + {0x00004720, 0x00000003}, + }; + + rc = apacc_addr_data_write_seq(dev, seq, ARRAY_SIZE(seq)); + if (rc < 0) + goto port_release; + + rc = poll_status_reg(dev, 0x1, "temperature data"); + if (rc < 0) + goto port_release; + + rc = apacc_addr_write_data_read(dev, 0x00004720, row.temp, 2); + if (rc < 0) + goto port_release; + + rc = poll_status_reg(dev, 0x2, "idle"); + if (rc < 0) + goto port_release; + } + + dev_info(dev, "temperature sign=%08x magnitude=%08x\n", row.temp[0], + row.temp[1]); + + dev_dbg(dev, "dpacc dp config write [%08x]\n", 0x00000004); + rc = swd_write(dev, DPACC_DP_CONFIG_WRITE, 0x00000004); + if (rc < 0) + goto port_release; + + row.ecc_enabled = !!(regs[3] & DEVICE_CONFIG_ECCEN); + dev_info(dev, "ecc is %s\n", row.ecc_enabled ? "enabled" : "disabled"); + + for (i = 0; i < pdat->nr_blocks;) { + row.sram_addr = 0x000; + row.nr = i++; + row.phub_ch_status_addr = 0x00007018; + row.phub_ch_status_data = 0x00000000; + row.phub_ch_basic_cfg_addr = 0x00007010; + row.phub_cfgmem_cfg0_addr = 0x00007600; + row.phub_cfgmem_cfg1_addr = 0x00007604; + row.phub_tdmem_orig_td0_addr = 0x00007800; + row.phub_tdmem_orig_td1_addr = 0x00007804; + row.phub_ch_action_addr = 0x00007014; + rc = program_row(dev, &row); + if (rc < 0) + goto port_release; + + row.sram_addr = 0x200; + row.nr = i++; + row.phub_ch_status_addr = 0x00007028; + row.phub_ch_status_data = 0x00000100; + row.phub_ch_basic_cfg_addr = 0x00007020; + row.phub_cfgmem_cfg0_addr = 0x00007608; + row.phub_cfgmem_cfg1_addr = 0x0000760C; + row.phub_tdmem_orig_td0_addr = 0x00007808; + row.phub_tdmem_orig_td1_addr = 0x0000780C; + row.phub_ch_action_addr = 0x00007024; + rc = program_row(dev, &row); + if (rc < 0) + goto port_release; + } + + rc = poll_status_reg(dev, 0x2, "idle"); + if (rc < 0) + goto port_release; + + rc = checksum(dev, &sum); + if (rc < 0) + goto port_release; + + dev_info(dev, "checksum %04x\n", sum); + rc = count; +port_release: + port_release(dev); +release_firmware: + release_firmware(fw); +exit: + return (rc); +} + +static struct device_attribute cy8ctma395_attr_flash = { + .attr = { + .name = "flash", + .mode = S_IWUSR, + }, + .store = cy8ctma395_attr_flash_store, +}; + +static ssize_t cy8ctma395_attr_vdd_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + pdat->vdd_enable(!!simple_strtoul(buf, NULL, 10)); + + return (count); +} + +static struct device_attribute cy8ctma395_attr_vdd = { + .attr = { + .name = "vdd", + .mode = S_IWUSR, + }, + .store = cy8ctma395_attr_vdd_store, +}; + +static void cy8ctma395_xres_assert(struct cy8ctma395_platform_data *pdat, int assert) +{ + if (assert) { + gpio_set_value(pdat->xres, 0); + udelay(pdat->xres_us); + } + else + gpio_set_value(pdat->xres, 1); + +} + +static ssize_t cy8ctma395_attr_xres_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int assert = !!simple_strtoul(buf, NULL, 10); + struct cy8ctma395_platform_data *pdat = dev->platform_data; + + cy8ctma395_xres_assert(pdat, assert); + + return (count); +} + +static struct device_attribute cy8ctma395_attr_xres = { + .attr = { + .name = "xres", + .mode = S_IWUGO, + }, + .store = cy8ctma395_attr_xres_store, +}; + +static int cy8ctma395_device_probe(struct platform_device *pdev) +{ + int rc; + struct cy8ctma395_device_data *dat; + struct cy8ctma395_platform_data *pdat = pdev->dev.platform_data; + + if (!pdat) { + rc = -ENODEV; + goto failed; + } + + dat = kzalloc(sizeof(*dat), 0); + if (!dat) { + rc = -ENOMEM; + goto failed; + } + + dev_set_drvdata(&pdev->dev, dat); + + rc = device_create_file(&pdev->dev, &cy8ctma395_attr_checksum); + if (rc < 0) + goto attr_checksum_failed; + + rc = device_create_file(&pdev->dev, &cy8ctma395_attr_flash); + if (rc < 0) + goto attr_flash_failed; + + rc = device_create_file(&pdev->dev, &cy8ctma395_attr_id); + if (rc < 0) + goto attr_id_failed; + + rc = device_create_file(&pdev->dev, &cy8ctma395_attr_device_config); + if (rc < 0) + goto attr_device_config_failed; + + rc = device_create_file(&pdev->dev, &cy8ctma395_attr_xres); + if (rc < 0) + goto attr_xres_failed; + + if (pdat->vdd_enable) { + cy8ctma395_xres_assert(pdat, 1); + pdat->vdd_enable(1); + cy8ctma395_xres_assert(pdat, 0); + + rc = device_create_file(&pdev->dev, &cy8ctma395_attr_vdd); + if (rc < 0) + goto attr_vdd_failed; + } + + rc = 0; + goto exit; + +attr_vdd_failed: + pdat->vdd_enable(0); + + device_remove_file(&pdev->dev, &cy8ctma395_attr_xres); +attr_xres_failed: + device_remove_file(&pdev->dev, &cy8ctma395_attr_device_config); +attr_device_config_failed: + device_remove_file(&pdev->dev, &cy8ctma395_attr_id); +attr_id_failed: + device_remove_file(&pdev->dev, &cy8ctma395_attr_flash); +attr_flash_failed: + device_remove_file(&pdev->dev, &cy8ctma395_attr_checksum); +attr_checksum_failed: + kfree(dat); +failed: + dev_err(&pdev->dev, "probe failed with %d\n", rc); +exit: + return (rc); +} + +static int cy8ctma395_device_remove(struct platform_device *pdev) +{ + struct cy8ctma395_device_data *dat = dev_get_drvdata(&pdev->dev); + struct cy8ctma395_platform_data *pdat = pdev->dev.platform_data; + + if (pdat->vdd_enable) + device_remove_file(&pdev->dev, &cy8ctma395_attr_vdd); + + device_remove_file(&pdev->dev, &cy8ctma395_attr_xres); + device_remove_file(&pdev->dev, &cy8ctma395_attr_device_config); + device_remove_file(&pdev->dev, &cy8ctma395_attr_id); + device_remove_file(&pdev->dev, &cy8ctma395_attr_flash); + device_remove_file(&pdev->dev, &cy8ctma395_attr_checksum); + kfree(dat); + + return (0); +} + +static struct platform_driver cy8ctma395_driver = { + .driver = { + .name = CY8CTMA395_DRIVER, + }, + .probe = cy8ctma395_device_probe, + .remove = __devexit_p(cy8ctma395_device_remove), +}; + +static int __init cy8ctma395_module_init(void) +{ + int rc; + + rc = platform_driver_register(&cy8ctma395_driver); + + return (rc); +} + +static void __exit cy8ctma395_module_exit(void) +{ + platform_driver_unregister(&cy8ctma395_driver); +} + +module_init(cy8ctma395_module_init); +module_exit(cy8ctma395_module_exit); + +MODULE_DESCRIPTION("cy8ctma395 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index e303e47afd0..6bb50733769 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -229,6 +229,17 @@ config ATMEL_SSC If unsure, say N. +config HSUART + bool "High Speed UART engine" + default n + ---help--- + Generic HSUART engine, which calls platform specific code to facilitate High speed UART driver". + +config USER_PINS + tristate "Sysfs GPIO pins control" + ---help--- + Select Y if you want to expose some gpio pins through sysfs. + config ENCLOSURE_SERVICES tristate "Enclosure Services" default n diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 21e8e707910..cd79a060557 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -66,3 +66,5 @@ obj-$(CONFIG_PMIC8058_XOADC) += pmic8058-xoadc.o obj-$(CONFIG_TZCOM) += tzcom.o obj-$(CONFIG_QFP_FUSE) += qfp_fuse.o obj-$(CONFIG_KERNEL_LOG) += klog.o +obj-$(CONFIG_HSUART) += hsuart.o +obj-$(CONFIG_USER_PINS) += user-pins.o diff --git a/drivers/misc/hsuart.c b/drivers/misc/hsuart.c new file mode 100644 index 00000000000..d45c67465fb --- /dev/null +++ b/drivers/misc/hsuart.c @@ -0,0 +1,2464 @@ +/* + * linux/drivers/misc/hsuart.c - High speed UART driver + * + * Copyright (C) 2008 Palm Inc, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Author: Amir Frenkel (amir.frenkel@palm.com) + * Based on drivers/misc/omap-misc-hsuart.c + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + + +#define DRIVER_NAME "hsuart" +#define DRIVER_VERSION (0x100) + +/* + * global switch to enable debug msgs in the module + */ +static int _dbg_lvl_ = 0x1; +#define HSUART_DEBUG_LEVEL_ERR (0x1) +#define HSUART_DEBUG_LEVEL_INFO (0x2) +#define HSUART_DEBUG_LEVEL_DEBUG (0x4) +#define HSUART_DEBUG_LEVEL_ENTER (0x8) +#define HSUART_DEBUG_LEVEL_EXIT (0x10) + + +#define HSUART_DEBUG_ENABLE 0 +#define HSUART_FUNC_LOG_ENABLE 0 +#if HSUART_DEBUG_ENABLE +#define HSUART_DEBUG(args...) {if (_dbg_lvl_ & HSUART_DEBUG_LEVEL_DEBUG) \ + printk(KERN_ERR args);} +#define HSUART_INFO(args...) {if (_dbg_lvl_ & HSUART_DEBUG_LEVEL_INFO) \ + printk(KERN_ERR args);} +#define HSUART_ERR(args...) {if (_dbg_lvl_ & HSUART_DEBUG_LEVEL_ERR) \ + printk(KERN_ERR args);} +#else +#define HSUART_INFO(args...) +#define HSUART_DEBUG(args...) +#define HSUART_ERR(args...) +#endif // HSUART_DEBUG_ENABLE + +#if HSUART_FUNC_LOG_ENABLE + +#define HSUART_ENTER() {if (_dbg_lvl_ & HSUART_DEBUG_LEVEL_ENTER) \ + printk(KERN_INFO"%s: %s, %u[msec] enter\n", \ + DRIVER_NAME, __PRETTY_FUNCTION__, jiffies_to_msecs(jiffies));} +#define HSUART_EXIT() {if (_dbg_lvl_ & HSUART_DEBUG_LEVEL_EXIT) \ + printk(KERN_INFO"%s: %s, %u[msec] exit\n",\ + DRIVER_NAME, __PRETTY_FUNCTION__, jiffies_to_msecs(jiffies));} +#define HSUART_EXIT_RET(ret) {if (_dbg_lvl_ & HSUART_DEBUG_LEVEL_EXIT) \ + printk(KERN_INFO"%s: %s, ret %d %u[msec] exit\n",\ + DRIVER_NAME, __PRETTY_FUNCTION__, ret, jiffies_to_msecs(jiffies));} + +#else +#define HSUART_ENTER() +#define HSUART_EXIT() +#define HSUART_EXIT_RET(ret) + +#endif + +#define HSUART_DEBUG_TIMING 0 +#define HSUART_DEBUG_TIMING_PORT 1 + +#if HSUART_DEBUG_TIMING +#include + +static char* dbg_strings[] = { "hsuart rx get buff enter", + "hsuart rx get buff exit", + "hsuart rx put buff enter", + "hsuart rx put buff exit", + + "hsuart write enter", + "hsuart write exit", + "hsuart read enter", + "hsuart read exit", + + "hsuart tx get buff evt", + "hsuart tx put buff evt", + }; + +#define HS_UART_GB_ENT 0 +#define HS_UART_GB_EXT 1 +#define HS_UART_PB_ENT 2 +#define HS_UART_PB_EXT 3 +#define HS_UART_WRITE_ENT 4 +#define HS_UART_WRITE_EXT 5 +#define HS_UART_READ_ENT 6 +#define HS_UART_READ_EXT 7 +#define HS_UART_TX_GET_BUFF_EVT 8 +#define HS_UART_TX_PUT_BUFF_EVT 9 + + + #define HSUART_LOG(p_context, eventid, arg1, arg2 ) \ + if ( p_context->uart_port_number == HSUART_DEBUG_TIMING_PORT ) { \ + hres_event(dbg_strings[eventid], arg1, arg2 ); \ + } + +#else + #define HSUART_LOG(args...) +#endif + + +struct dev_ctxt { + int uart_port_number; + int uart_speed; + unsigned int uart_flags; + unsigned long is_opened; + unsigned long is_initilized; + + /* misc char device */ + struct miscdevice mdev; + struct file_operations fops; + struct platform_device *pdev; + const char *dev_name; + struct hsuart_platform_data *pdata; + spinlock_t lock; + struct mutex rx_mlock; + struct mutex tx_mlock; + + /* + * Platform hsuart context ID + */ + int hsuart_id; + + /* requirements for Rx Tx buffers */ + int tx_buf_size; + int tx_buf_num; + int rx_buf_size; + int rx_buf_num; + + /* RX DMA buffer information */ + struct buffer_item rx; + + /* TX DMA buffer information */ + struct buffer_item tx; + + + /* + * Rx related buffer management lists. + */ + struct rxtx_lists rx_lists; + + /* + * Tx related buffer management lists. + */ + struct rxtx_lists tx_lists; + + wait_queue_head_t got_rx_buffer; + wait_queue_head_t got_tx_buffer; + + int min_packet_sz; + + /* + * stats + */ + unsigned long rx_ttl; + unsigned long rx_dropped; + unsigned long tx_ttl; +}; + +/* + * Event loggin support. + */ +/* +static inline void +log_txrx_event(u32 type, u32 arg1, u32 arg2) +{ + if(unlikely(type >= ARRAY_SIZE(event_names))) + return; + if(event_log_mask & (1 << type)) { + hres_event((char*)event_names[type], arg1, arg2 ); + } +} +*/ + + +/* + * Sysfs area + */ +static ssize_t +debug_lvl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%x\n", + _dbg_lvl_); +} + +static ssize_t +debug_lvl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + _dbg_lvl_ = simple_strtol(buf, NULL, 10); + return count; +} + + +static DEVICE_ATTR(dbg_lvl, S_IRUGO | S_IWUSR, debug_lvl_show, debug_lvl_store); + + + + +/* + * RxTx Buffer Management logic + */ + +/* +* +* Helper function, allocate the Rx related buffers. +* +* @param[in][out] io_p_contxt - The device context to use. +* +* @return 0 for success -ENOMEM otherwise. +* +*/ +static int +hsuart_alloc_rx_dma_buf(struct dev_ctxt* io_p_contxt) +{ + int ret = 0; + + HSUART_ENTER(); + + /* + * 1. Calculate the required size for rx dma buffer based on + * inputs from the board file (stored int he context) + * 2. Allocate coherent memory to be used by DMA engine. + */ + + io_p_contxt->rx.size = io_p_contxt->rx_buf_num * + io_p_contxt->rx_buf_size; + + /* TODO: use the dev instead of NULL */ + io_p_contxt->rx.p_vaddr = dma_alloc_coherent( + NULL, //&(io_p_contxt->pdev->dev), + io_p_contxt->rx.size, + (dma_addr_t *)&(io_p_contxt->rx.phys_addr), + GFP_KERNEL ); + + if (NULL == io_p_contxt->rx.p_vaddr) { + ret = -ENOMEM; + HSUART_ERR("%s:%s, failed allocating virt 0x%x, phys 0x%x size 0x%x\n", + io_p_contxt->dev_name, + __PRETTY_FUNCTION__, + (unsigned int)io_p_contxt->rx.p_vaddr, + io_p_contxt->rx.phys_addr, + io_p_contxt->rx.size); + } + else { + HSUART_DEBUG("%s:%s, allocated virt 0x%x, phys 0x%x size 0x%x\n", + io_p_contxt->dev_name, + __PRETTY_FUNCTION__, + (unsigned int)io_p_contxt->rx.p_vaddr, + io_p_contxt->rx.phys_addr, + io_p_contxt->rx.size); + } + HSUART_EXIT(); + + return ret; +} + +/* +* +* Helper function, allocate the Tx related buffers. +* +* @param[in][out] io_p_contxt - The device context to use. +* +* @return 0 for success -ENOMEM otherwise. +* +*/ +static int +hsuart_alloc_tx_dma_buf(struct dev_ctxt* io_p_contxt) +{ + int ret = 0; + + HSUART_ENTER(); + + /* + * 1. Calculate the required size for dma buffer based on inputs + * from the board file (stored int the context) + * 2. Allocate coherent memory to be used by DMA engine. + */ + + io_p_contxt->tx.size = io_p_contxt->tx_buf_num * + io_p_contxt->tx_buf_size; + + io_p_contxt->tx.p_vaddr = dma_alloc_coherent( +/* TODO:use the driver instead of NULL */ + NULL, //&(io_p_contxt->pdev->dev), + io_p_contxt->tx.size, + (dma_addr_t *)&(io_p_contxt->tx.phys_addr), + GFP_KERNEL ); + + if (NULL == io_p_contxt->tx.p_vaddr) { + ret = -ENOMEM; + HSUART_ERR("%s:%s, failed allocating virt 0x%x, phys 0x%x size 0x%x\n", + io_p_contxt->dev_name, + __PRETTY_FUNCTION__, + (unsigned int)io_p_contxt->tx.p_vaddr, + io_p_contxt->tx.phys_addr, + io_p_contxt->tx.size); + } + else { + HSUART_DEBUG("%s:%s, allocated virt 0x%x, phys 0x%x size 0x%x\n", + io_p_contxt->dev_name, + __PRETTY_FUNCTION__, + (unsigned int)io_p_contxt->tx.p_vaddr, + io_p_contxt->tx.phys_addr, + io_p_contxt->tx.size); + } + HSUART_EXIT(); + + return ret; +} + +/** +* +* Helper function, used as predicate to indicate whether there are +* buffer read bytes. + +* @param[in][out] io_p_lists - The lists structure to look +* for the buffer in. + +* @return 1 in case that bytes are buffered 0 if not and for success; +* -EINVL in case of error. +* +*/ +static int +hsuart_rx_data_exist(struct rxtx_lists* io_p_lists) +{ + int ret = 0; + unsigned long flags; + int empty; + struct buffer_item* p_buffer; + + HSUART_ENTER(); + + spin_lock_irqsave(&(io_p_lists->lock), flags); + + if (NULL != io_p_lists) { + empty = list_empty(&(io_p_lists->full)); + if (!empty) { + ret = 1; + } + else { + empty = list_empty(&(io_p_lists->used)); + if (!empty) { + p_buffer = list_first_entry(&(io_p_lists->used), + struct buffer_item, + list_item); + if (p_buffer->fullness) { + ret = 1; + } + } + } + } + else { + HSUART_ERR("%s: %s, invalid params\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + ret = -EINVAL; + } + + spin_unlock_irqrestore(&(io_p_lists->lock), flags); + HSUART_EXIT(); + return ret; +} + + + +/** +* +* Helper function, predicate that checks if there is at least one empty buffer +* +* @param[in][out] io_p_lists - The lists structure to look +* for the buffer in. +* @param[in] min_free_bytes - The minimal amount of free bytes +* in the buffer. + +* @return 1 to indicate that we have a free buffer, 0 in case we don't and +* -ENOMEM in case that we failed. +* +*/ +static int +hsuart_vacant_tx_buf_exist(struct rxtx_lists* io_p_lists) +{ + int ret = 0; + unsigned long flags; + + HSUART_ENTER(); + + if (NULL != io_p_lists) { + spin_lock_irqsave(&(io_p_lists->lock), flags); + + if (io_p_lists->vacant_buffers >= (io_p_lists->buffer_cnt >>1)){ + ret = 1; + } + spin_unlock_irqrestore(&(io_p_lists->lock), flags); + } + else { + HSUART_ERR("%s: %s, invalid params\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + ret = -EINVAL; + } + HSUART_EXIT(); + + return ret; +} +/** +* +* Helper function, predicate that checks if there are no queued tx/rx buffers. +* +* @param[in][out] i_io_p_lists - The lists structure to check + +* @return 1 to indicate that the list is empty, 0 otherwise. +* +*/ + +static int +hsuart_rxtx_is_empty(struct rxtx_lists* i_p_lists) +{ + int ret = 0; + unsigned long flags; + int empty; + struct buffer_item* p_buffer = NULL; + + HSUART_ENTER(); + + spin_lock_irqsave(&(i_p_lists->lock), flags); + + /* + * First case, all the buffers are deposited in the 'empty' list. + */ + if(i_p_lists->vacant_buffers == i_p_lists->buffer_cnt) { + ret = 1; + } + /* + * Second case, all the buffers but one are in the empty list, + * and the buffer that is in the used list (pushed to the lower level + * driver) is empty. + */ + else if (i_p_lists->vacant_buffers == (i_p_lists->buffer_cnt-1)){ + empty = list_empty(&(i_p_lists->used)); + if (!empty) { + p_buffer = list_first_entry(&(i_p_lists->used), + struct buffer_item, + list_item); + if (0 == p_buffer->fullness) { + ret = 1; + } + } + } + spin_unlock_irqrestore(&(i_p_lists->lock), flags); + + HSUART_EXIT(); + return ret; +} + + + +/** +* +* Helper function, returns the next 'full' buffer (i.e. buffer with data) +* As the current implementation of the lists is FIFO, the function +* will first try to check whether there is a buffer in the 'full' list +* and if so return the first (head) of he list, otherwise, it will check +* the 'used' list and if there is buffer, it will be returned +* +* @param[in][out] io_p_lists - The lists structure to look +* for the buffer in. + @param[out] o_pp_buffer - Pointer to container to fill +* with pointer to the buffer, in case of +* failure to find empty buffer, will be +* set to NULL. +* +* @return 0 for success; -ENOMEM in case that we failed to find a +* matching buffer. +* +* +*/ +static int +__hsuart_rxtx_get_next_full_buf(struct rxtx_lists* io_p_lists, + struct buffer_item** o_pp_buffer) +{ + int ret = 0; + int empty; + struct buffer_item* p_buffer = NULL; + + HSUART_ENTER(); + + if ((NULL != io_p_lists) && (NULL != o_pp_buffer)) { + empty = list_empty(&(io_p_lists->full)); + if (empty) { + empty = list_empty(&(io_p_lists->used)); + if (!empty) { + p_buffer = list_first_entry(&(io_p_lists->used), + struct buffer_item, + list_item); + } +// else { +// panic("%s\n %d",__FUNCTION__, __LINE__); +// } + } + else { + p_buffer = list_first_entry(&(io_p_lists->full), + struct buffer_item, + list_item); + } + + if (NULL == p_buffer) { + HSUART_DEBUG("%s: %s, can't find full buffer, empty %d.\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + empty); + ret = -ENOMEM; + } + } + else { + HSUART_ERR("%s: %s, invalid params\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + ret = -EINVAL; + } + + if (!ret) { + (*o_pp_buffer) = p_buffer; + + /* + * Remove the buffer from the list it used to be in. + */ + list_del(&(p_buffer->list_item)); + + HSUART_DEBUG("%s: %s, p_buffer 0x%x\n", + DRIVER_NAME, + __FUNCTION__, + (unsigned int)p_buffer); + } + + HSUART_EXIT(); + return ret; +} + +static int +hsuart_rxtx_get_next_full_buf(struct rxtx_lists* io_p_lists, + struct buffer_item** o_pp_buffer) +{ + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&(io_p_lists->lock), flags); + + ret = __hsuart_rxtx_get_next_full_buf(io_p_lists, o_pp_buffer); + + spin_unlock_irqrestore(&(io_p_lists->lock), flags); + + return ret; +} + +/** +* +* Helper function, returns the next buffer which has at least the +* specified vacancy. +* As the current implementation of the lists is FIFO, the function +* will first try to check whether the buffer in the 'used' list exist +* and if so, check whether it has enough free space. +* In case that there is no 'used' buffer the function will get the +* first buffer from the 'empty' list +* +* @param[in][out] io_p_lists - The lists structure to look +* for the buffer in. +* @param[in] min_free_bytes - The minimal amount of free bytes +* in the buffer. +* @param[out] o_pp_buffer - Pointer to container to fill +* with pointer to the buffer, in case of +* failure to find empty buffer, will be +* set to NULL. +* +* @return 0 for success; -ENOMEM in case that we failed to find a +* matching buffer. +* +* +*/ +static int +hsuart_tx_buf_get_empty(struct rxtx_lists* io_p_lists, + int min_free_bytes, + struct buffer_item** o_pp_buffer) +{ + struct buffer_item* p_buffer = NULL; + int ret = 0; + int empty; + unsigned long flags; + + HSUART_ENTER(); + + if ((NULL != io_p_lists) && (NULL != o_pp_buffer)) { + spin_lock_irqsave(&(io_p_lists->lock), flags); +/* + * TODO: optimize by adding support for 'used' buffers when writing. + empty = list_empty(&(io_p_lists->used)); + if (!empty) { + p_buffer = list_first_entry(&(io_p_lists->used), + struct buffer_item, + list_item); + list_del(&(p_buffer->list_item)); + if (min_free_bytes > (p_buffer->size - p_buffer->fullness)) { +panic("%s %d\n",__FUNCTION__, __LINE__); + list_add_tail(&(p_buffer->list_item), + &(io_p_lists->full)); + p_buffer = NULL; + } + } +*/ + if (NULL == p_buffer) { + empty = list_empty(&(io_p_lists->empty)); + if (!empty) { + p_buffer = list_first_entry(&(io_p_lists->empty), + struct buffer_item, + list_item); + list_del(&(p_buffer->list_item)); + io_p_lists->vacant_buffers--; + BUG_ON(io_p_lists->vacant_buffers < 0); + BUG_ON(io_p_lists->vacant_buffers > io_p_lists->buffer_cnt); + } + } + + if (NULL == p_buffer) { + HSUART_ERR("%s: %s, can't find empty buffer, empty%d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + empty); + + ret = -ENOMEM; + } + else { + (*o_pp_buffer) = p_buffer; + } + spin_unlock_irqrestore(&(io_p_lists->lock), flags); + } + else { + HSUART_ERR("%s: %s, invalid params\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + ret = -EINVAL; + } + HSUART_EXIT(); + + return ret; +} + +/** +* +* Helper function, returns the next buffer which has at least the +* specified vacancy. +* Will try to first find a buffer in the empty list and if failed +* reuse the buffer in the used list +* +* @param[in][out] io_p_lists - The lists structure to look +* for the buffer in. +* @param[in] min_free_bytes - The minimal amount of free bytes +* in the buffer. +* @param[out] o_pp_buffer - Pointer to container to fill +* with pointer to the buffer, in case of +* failure to find empty buffer, will be +* set to NULL. +* +* @return 0 for success; -ENOMEM in case that we failed to find a +* matching buffer. +* +* +*/ +static int +hsuart_rx_buf_get_empty(struct rxtx_lists* io_p_lists, + int min_free_bytes, + struct buffer_item** o_pp_buffer) +{ + struct buffer_item* p_buffer = NULL; + int ret = 0; + int empty; + int item_from_empty = 0; + + HSUART_ENTER(); + if ((NULL != io_p_lists) && (NULL != o_pp_buffer)) { + empty = list_empty(&(io_p_lists->empty)); + if (empty) { + empty = list_empty(&(io_p_lists->used)); + if (!empty) { + p_buffer = list_first_entry(&(io_p_lists->used), + struct buffer_item, + list_item); + } + } + else { + p_buffer = list_first_entry(&(io_p_lists->empty), + struct buffer_item, + list_item); + item_from_empty = 1; + } + + if (empty || (min_free_bytes > (p_buffer->size - p_buffer->fullness))) { + HSUART_ERR("%s: %s, can't find empty buffer, empty%d, req_size%d, sz%d, fullness%d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + empty, + min_free_bytes, + p_buffer ? p_buffer->size : 666, + p_buffer ? p_buffer->fullness: 666); + ret = -ENOMEM; + } + else { + (*o_pp_buffer) = p_buffer; + /* + * Remove the buffer from the list it used to be in. + */ + list_del(&(p_buffer->list_item)); + if (item_from_empty) { + io_p_lists->vacant_buffers--; + BUG_ON(io_p_lists->vacant_buffers < 0); + BUG_ON(io_p_lists->vacant_buffers > io_p_lists->buffer_cnt); + } + } + + if (ret) { + (*o_pp_buffer) = NULL; + } + } + else { + HSUART_ERR("%s: %s, invalid params\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + ret = -EINVAL; + } + HSUART_EXIT(); + + return ret; +} + + +/** +* +* Helper function, initialize the rxtx_lists structure: +* - initialize the lists within the rxtx_lists structure, +* - init the lock protecting the lists, +* - allocate and initialize the nodes pointing to the chunks of memory +* that are served as buffers for rx/tx. +* +* @param[in][out] io_p_lists - The lists structure to initialize. +* @param[in] num_buffers - The number of buffers (memory chunks) +* to initialize. +* @param[in] buffer_size - The size (in bytes) of a buffer. +* @param[in] phys_addr_start - The start address (physical) of the +* buffer to be managed by the rxtx_list we are +* about to init in this function. +* @param[in] i_p_vaddr_start - The start address (virtual) of the +* buffer to be managed by the rxtx_list we are +* about to initialize in this function +* +* @return 0 for success; -ENOMEM in case that we failed to allocate memory. +* +*/ +static int +hsuart_rxtx_lists_init( struct rxtx_lists* io_p_lists, + int num_buffers, + int buffer_size, + dma_addr_t phys_addr_start, + char* i_p_vaddr_start) +{ + int ret = 0; + struct buffer_item* p_buffer = NULL; + int i; + + /* + * Initialize Rx lists: + * - Init the spin lock + * - set full and used buffer list to be empty. + * - set empty buffer list to hold all the allocated buffers. + */ + spin_lock_init(&(io_p_lists->lock)); + + INIT_LIST_HEAD(&(io_p_lists->full)); + INIT_LIST_HEAD(&(io_p_lists->empty)); + INIT_LIST_HEAD(&(io_p_lists->used)); + + io_p_lists->p_buffer_pool = + kzalloc(num_buffers * sizeof(struct buffer_item), + GFP_KERNEL); + + if (NULL == io_p_lists->p_buffer_pool) { + ret = -ENOMEM; + HSUART_ERR("%s:%s, failed allocating buffer pool, size %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + num_buffers * sizeof(struct buffer_item)); + } + else { + HSUART_DEBUG("%s:%s, allocated buff 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)io_p_lists->p_buffer_pool); + } + + /* + * It's all free. + */ + io_p_lists->vacant_buffers = num_buffers; + io_p_lists->buffer_cnt = num_buffers; + + p_buffer = io_p_lists->p_buffer_pool; + for (i = 0; i < num_buffers; i++) { + INIT_LIST_HEAD(&(p_buffer->list_item)); + p_buffer->size = buffer_size; + p_buffer->read_index = 0; + p_buffer->write_index = 0; + p_buffer->fullness = 0; + p_buffer->phys_addr = phys_addr_start + (i * buffer_size); + p_buffer->p_vaddr = i_p_vaddr_start + (i * buffer_size); + + list_add_tail(&(p_buffer->list_item), &(io_p_lists->empty)); + /* + * Point to the next element in the buffer pool + */ + p_buffer++; + } + return ret; +} + + +/** +* +* Constructor, creates, allocate and initialize the RxTx related buffers. +* +* @param[in][out] io_p_contxt - The device context to use. +* +* @return 0 for success; -ENOMEM in case that we can't allocate buffers. +* +*/ +static int +hsuart_rxtx_buf_init(struct dev_ctxt* io_p_contxt) +{ + int ret = 0; + + HSUART_ENTER(); + + ret = hsuart_alloc_rx_dma_buf(io_p_contxt); + if (0 == ret) { + ret = hsuart_rxtx_lists_init( + &(io_p_contxt->rx_lists), + io_p_contxt->rx_buf_num, + io_p_contxt->rx_buf_size, + io_p_contxt->rx.phys_addr, + io_p_contxt->rx.p_vaddr); + } + if (0 == ret) { + ret = hsuart_alloc_tx_dma_buf(io_p_contxt); + } + if (0 == ret) { + ret = hsuart_rxtx_lists_init( + &(io_p_contxt->tx_lists), + io_p_contxt->tx_buf_num, + io_p_contxt->tx_buf_size, + io_p_contxt->tx.phys_addr, + io_p_contxt->tx.p_vaddr); + } + + /* + * TODO: add proper cleanup in case of failure. + */ + HSUART_EXIT(); + + return ret; +} + +static struct buffer_item* +__rx_get_buffer_cbk(void* p_data, int free_bytes) +{ + struct dev_ctxt* p_context; + int empty = 0; + int err; + unsigned long flags; + struct buffer_item* p_buffer = NULL; + /* + * In case that we will reuse an existing driver, we will use this + * var to register how many bytes were "dropped". + */ + int dropped = 0; + + p_context = (struct dev_ctxt*)p_data; + + HSUART_ENTER(); + if (NULL != p_context) { + HSUART_LOG(p_context, HS_UART_GB_ENT, 0 , 0 ); + /* + * Find a free buffer and ask the platform-specific hsuart + * code to fill it. + */ + spin_lock_irqsave(&(p_context->rx_lists.lock), flags); + + /* TODO: handle the case that free_bytes is large number, more + than the buffer size.....*/ + err = hsuart_rx_buf_get_empty(&(p_context->rx_lists), + free_bytes, + &p_buffer); + /* + * If we failed to get vacant buffer, reuse the last one + */ + if ((!p_buffer) || err) { + int available_bytes; + int required_bytes; + + empty = list_empty(&(p_context->rx_lists.used)); + if (!empty) { + p_buffer = list_first_entry( + &(p_context->rx_lists.used), + struct buffer_item, + list_item); + } + else { + panic("%s line %d, err %d\n",__FUNCTION__, __LINE__, err); + } + list_del(&(p_buffer->list_item)); + + if ( p_buffer->size < free_bytes ) { + panic("%s:%d, not enough bytes to reuse %d %d\n" + ,__FUNCTION__, + __LINE__, + p_buffer->size, + free_bytes); + } + + available_bytes = p_buffer->size - p_buffer->fullness; + + if ( free_bytes > available_bytes ) { + required_bytes = free_bytes - available_bytes; + p_buffer->fullness -= required_bytes; + dropped = required_bytes; + p_buffer->write_index -= required_bytes; + } + } + spin_unlock_irqrestore(&(p_context->rx_lists.lock), flags); + + spin_lock_irqsave(&(p_context->lock), flags); + p_context->rx_dropped += dropped; + spin_unlock_irqrestore(&(p_context->lock), flags); + + HSUART_DEBUG("%s: %s, got new buffer? - %s 0x%x\n", + p_context->dev_name, + __PRETTY_FUNCTION__, + empty? "false": "true", + p_buffer ? (uint32_t)p_buffer: 0x666); + if(p_buffer){ +//printk(KERN_ERR"%s p_buffer 0x%x read_index %d write_index %d fullness %d\n", __FUNCTION__,(uint32_t)p_buffer, p_buffer->read_index, p_buffer->write_index, p_buffer->fullness); + } + else { + panic("%s %d\n",__FUNCTION__, __LINE__); + } + + HSUART_LOG(p_context, HS_UART_GB_EXT, p_buffer->fullness , p_buffer->size ); + } + + HSUART_DEBUG("%s: %s, exit, p_buffer 0x%x, p_data 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (uint32_t)p_buffer, + (uint32_t)p_data); + + return p_buffer; +} +static void +__rx_put_buffer_cbk(void* p_data, struct buffer_item* p_buffer) +{ + struct dev_ctxt* p_context; + int empty; + int fullness; + struct buffer_item* p_last_buffer = NULL; + unsigned long flags; + + + p_context = (struct dev_ctxt*)p_data; + + HSUART_DEBUG("%s: %s, enter, p_buffer 0x%x, p_data 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (uint32_t)p_buffer, + (uint32_t)p_data); +//printk(KERN_ERR"%s p_buffer 0x%x read_index %d write_index %d fullness %d\n", __FUNCTION__,(uint32_t)p_buffer, p_buffer->read_index, p_buffer->write_index, p_buffer->fullness); + + if ((NULL != p_context) && (NULL != p_buffer)) { + HSUART_LOG(p_context, HS_UART_PB_ENT, p_buffer->fullness , 0 ); + + spin_lock_irqsave(&(p_context->lock), flags); + p_context->rx_ttl += p_buffer->fullness; + spin_unlock_irqrestore(&(p_context->lock), flags); + /* + * Check the used list, if it is not empty, move + * the buffer from it to the full list. + * Put the new buffer into the tail of the used list. + */ + spin_lock_irqsave(&(p_context->rx_lists.lock), flags); + fullness = p_buffer->fullness; + + if (fullness) { + empty = list_empty(&(p_context->rx_lists.used)); + if (!empty) { + p_last_buffer = list_first_entry(&(p_context->rx_lists.used), + struct buffer_item, + list_item); + list_del(&(p_last_buffer->list_item)); + BUG_ON(NULL == p_last_buffer); + HSUART_DEBUG("we have buffer 0x%x in the used list, push it to full\n",(uint32_t)p_last_buffer); + list_add_tail(&(p_last_buffer->list_item), &(p_context->rx_lists.full)); + } + + list_add_tail(&(p_buffer->list_item), &(p_context->rx_lists.used)); + } + + else { + list_add_tail(&(p_buffer->list_item), &(p_context->rx_lists.empty)); + p_context->rx_lists.vacant_buffers++; + } + + spin_unlock_irqrestore(&(p_context->rx_lists.lock), flags); + + if (fullness) + wake_up_interruptible(&(p_context->got_rx_buffer)); + + HSUART_LOG(p_context, HS_UART_PB_EXT, 0 , 0 ); + } + else { + HSUART_ERR("%s: %s, invalid parameters p_context 0x%x, p_buffer 0x%x!!!\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (uint32_t) p_context, + (uint32_t) p_buffer); + } + + HSUART_EXIT(); +} + +static struct buffer_item* +__tx_get_buffer_cbk(void* p_data ) +{ + + struct dev_ctxt* p_context; + int ret; + struct buffer_item* p_buffer = NULL; + unsigned long flags; + + p_context = (struct dev_ctxt*)p_data; + + HSUART_LOG(p_context, HS_UART_TX_GET_BUFF_EVT, 0 , 0 ); + + spin_lock_irqsave(&(p_context->tx_lists.lock), flags); + ret = __hsuart_rxtx_get_next_full_buf(&(p_context->tx_lists), &p_buffer); + spin_unlock_irqrestore(&(p_context->tx_lists.lock), flags); + + HSUART_LOG(p_context, HS_UART_TX_GET_BUFF_EVT, 1 , ret ); + + return p_buffer; +} +static void +__tx_put_buffer_cbk(void* p_data, struct buffer_item* p_buffer, int transaction_size) +{ + struct dev_ctxt* p_context; + unsigned long flags; + HSUART_ENTER(); + + p_context = (struct dev_ctxt*)p_data; + + HSUART_LOG(p_context, HS_UART_TX_PUT_BUFF_EVT, 0 , transaction_size ); + + spin_lock_irqsave(&(p_context->lock), flags); + p_context->tx_ttl += transaction_size; + spin_unlock_irqrestore(&(p_context->lock), flags); + + spin_lock_irqsave(&(p_context->tx_lists.lock), flags); + /* + * No more data in the buffer...add it to the empty list + */ + list_add(&(p_buffer->list_item), + &p_context->tx_lists.empty); + p_context->tx_lists.vacant_buffers++; + BUG_ON(p_context->tx_lists.vacant_buffers < 0); + BUG_ON(p_context->tx_lists.vacant_buffers > p_context->tx_lists.buffer_cnt); + + spin_unlock_irqrestore(&(p_context->tx_lists.lock), + flags); + + HSUART_LOG(p_context, HS_UART_TX_PUT_BUFF_EVT, 1 , 0 ); + + HSUART_EXIT(); +} + +/** +* +* Helper function, allocate and initialize the HSUART port. +* +* @param[in][out] io_p_contxt - The device context to use. +* +* @return 0 for success -1 otherwise. +* +* @Note call the platform specific function to open a uart +* port and set it up with the default configuration. +* +*/ +static int +hsuart_uart_port_init(struct dev_ctxt* io_p_contxt) +{ + int ret = 0; + struct hsuart_config cfg = {0}; + struct buffer_item* p_buffer = NULL; + int max_packet_size = io_p_contxt->pdata->max_packet_size; + + HSUART_ENTER(); + + /* + * TODO:amir + * Right now, we call the MSM specific code, need to change it so it will + * call virtual function which will be filled in the board file. + */ + /* + * Get the desired UART port ID from the context into + * the configuration request. + */ + cfg.port_id = io_p_contxt->uart_port_number; + + if(io_p_contxt->pdata->options & HSUART_OPTION_TX_PIO) + { + cfg.flags |= HSUART_CFG_TX_PIO; + } + + if(io_p_contxt->pdata->options & HSUART_OPTION_RX_PIO) + { + cfg.flags |= HSUART_CFG_RX_PIO; + } + + if(io_p_contxt->pdata->options & HSUART_OPTION_TX_DM) + { + cfg.flags |= HSUART_CFG_TX_DM; + } + + if(io_p_contxt->pdata->options & HSUART_OPTION_RX_DM) + { + cfg.flags |= HSUART_CFG_RX_DM; + } + + if(io_p_contxt->pdata->options & HSUART_OPTION_SCHED_RT) + { + cfg.flags |= HSUART_CFG_SCHED_RT; + } + + cfg.rx_get_buffer.p_cbk = __rx_get_buffer_cbk; + cfg.rx_get_buffer.p_data = io_p_contxt; + cfg.rx_put_buffer.p_cbk = __rx_put_buffer_cbk; + cfg.rx_put_buffer.p_data = io_p_contxt; + + cfg.tx_get_buffer.p_cbk = __tx_get_buffer_cbk; + cfg.tx_get_buffer.p_data = io_p_contxt; + cfg.tx_put_buffer.p_cbk = __tx_put_buffer_cbk; + cfg.tx_put_buffer.p_data = io_p_contxt; + + cfg.max_packet_size = max_packet_size; + cfg.min_packet_size = io_p_contxt->pdata->min_packet_size; + cfg.rx_latency = io_p_contxt->pdata->rx_latency; + + cfg.p_board_pin_mux_cb = io_p_contxt->pdata->p_board_pin_mux_cb; + cfg.p_board_gsbi_config_cb = io_p_contxt->pdata->p_board_config_gsbi_cb; + cfg.p_board_rts_pin_deassert_cb = io_p_contxt->pdata->p_board_rts_pin_deassert_cb; + + + ret = msm_hsuart_open_context(&cfg, &(io_p_contxt->hsuart_id)); + /*TODO: consider flushing the existing fifo*/ + HSUART_DEBUG("%s: %s, allocated platform hsuart, handle_0x%x\n", + io_p_contxt->dev_name, + __PRETTY_FUNCTION__, + (unsigned int)io_p_contxt->hsuart_id); + + msm_hsuart_set_flow( + io_p_contxt->hsuart_id, + io_p_contxt->uart_flags & HSUART_MODE_FLOW_CTRL_MASK); + + msm_hsuart_set_parity( + io_p_contxt->hsuart_id, + io_p_contxt->uart_flags & HSUART_MODE_PARITY_MASK); + + msm_hsuart_set_baud_rate( + io_p_contxt->hsuart_id, + io_p_contxt->uart_speed); + /* + * Start the reader, to make sure we start listening on the port + * and collect all incoming data. + */ + p_buffer = __rx_get_buffer_cbk(io_p_contxt, max_packet_size); + + //TODO: should be called enable/initiate read or something like that + msm_hsuart_read(io_p_contxt->hsuart_id, p_buffer); + + HSUART_EXIT(); + + return ret; +} + +/** +* +* Helper function, release allocated handle of the HSUART port. +* +* @param[in][out] io_p_contxt - The device context to use. +* +* @return 0 for success -1 otherwise. +* +* @Note call the platform specific function to close uart +* port. +*/ +static int +hsuart_uart_port_release(struct dev_ctxt* io_p_contxt) +{ + int ret = 0; + + HSUART_ENTER(); + + ret = msm_hsuart_close_context(io_p_contxt->hsuart_id); + + HSUART_EXIT(); + + return ret; +} + + +/************************************************************************ + * + * IO calls + * + ************************************************************************/ +/* +* +* Helper function, copy from the specified buffer_item to user-space pointer. +* +* @param[in][out] io_p_buffer - The buffer to copy from. +* @param[out] o_p_buf - The destination buffer. +* @param[in] count - The number of bytes to copy. +* +* @return negative number - error code, +* 0 or positive - the number of bytes that we copied. +* +*/ +static ssize_t +hsuart_copy_buf_to_user(struct buffer_item* io_p_buffer, + char* o_p_buf, int count) +{ + int ret = 0; + int ret_cnt = 0; + struct buffer_item* p_buffer = io_p_buffer; + HSUART_ENTER(); + + BUG_ON(count > p_buffer->fullness); + if (!p_buffer->fullness) { + ret_cnt = 0; + } + else if (p_buffer->read_index < p_buffer->write_index) { + HSUART_DEBUG("%s: %s, about to copy %d bytes read_index %d to 0x%x\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + count, + p_buffer->read_index, + (unsigned int)o_p_buf); + + ret = copy_to_user( + o_p_buf, + &(p_buffer->p_vaddr[p_buffer->read_index]), + count); + if (ret) { + HSUART_ERR("%s: %s, failed copying data to user err 0x%x line %d.\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + __LINE__, + ret); + } + else { + ret_cnt = count; + p_buffer->fullness -= ret_cnt; + p_buffer->read_index += ret_cnt; + if (p_buffer->read_index >= p_buffer->size) { + p_buffer->read_index -= p_buffer->size; + } + } + } + else { + /* + * In this case, we have chunks to copy.... + */ + int length1; + int length2; + + length1 = p_buffer->size - p_buffer->read_index; + length2 = p_buffer->write_index; + + /* + * In case we need to read from both chunks, make sure + * to adjust the number of bytes to read from the 2nd + * chunk, to account the data we read in the first. + */ + if (count > length1) { + HSUART_DEBUG("%s: %s, about to copy %d bytes read_index %d to 0x%x", + DRIVER_NAME, + __PRETTY_FUNCTION__, + length1, + p_buffer->read_index, + (unsigned int)o_p_buf); + + length2 = (count - length1); + + ret = copy_to_user( + o_p_buf, + &p_buffer->p_vaddr[p_buffer->read_index], + length1); + if (!ret) { + ret_cnt += length1; + p_buffer->fullness -= ret_cnt; + p_buffer->read_index += ret_cnt; + if (p_buffer->read_index >= p_buffer->size) { + p_buffer->read_index -= p_buffer->size; + } + + HSUART_DEBUG("%s: %s, about to copy %d bytes read_index %d to 0x%x", + DRIVER_NAME, + __PRETTY_FUNCTION__, + length2, + p_buffer->read_index, + (unsigned int)o_p_buf); + + ret = copy_to_user( + o_p_buf + length1, + &p_buffer->p_vaddr[0], + length2); + if (!ret) { + ret_cnt += length2; + p_buffer->fullness -= length2; + p_buffer->read_index += length2; + if (p_buffer->read_index >= p_buffer->size) { + p_buffer->read_index -= p_buffer->size; + } + } + } + } + /* + * In case we need to read only one chunk, make sure + * we don't read more than requested. + */ + else { + HSUART_DEBUG("%s: %s, about to copy %d bytes read_index %d to 0x%x", + DRIVER_NAME, + __PRETTY_FUNCTION__, + length1, + p_buffer->read_index, + (unsigned int)o_p_buf); + + length1 = count; + + ret = copy_to_user( + o_p_buf, + &p_buffer->p_vaddr[p_buffer->read_index], + length1); + ret_cnt = length1; + p_buffer->fullness -= ret_cnt; + p_buffer->read_index += ret_cnt; + if (p_buffer->read_index >= p_buffer->size) { + p_buffer->read_index -= p_buffer->size; + } + } + if (ret) { + HSUART_ERR("%s: %s, failed copying data to user err 0x%x line %d.\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)__LINE__, + ret); + } + } + + /* + * In case we didnt face any error, return the number of bytes copied. + */ + if (!ret) { + ret = ret_cnt; + } + HSUART_EXIT(); + return ret; +} +static ssize_t +hsuart_read(struct file *file, char __user* o_p_buf, size_t count, + loff_t *ppos) +{ + struct dev_ctxt* p_contxt; + unsigned long flags; + int ret; + struct buffer_item* p_buffer = NULL; + int copied_cnt = 0; + +// p_contxt = container_of(file->f_op, struct dev_ctxt, fops); + p_contxt = file->private_data; + + HSUART_ENTER(); + HSUART_DEBUG("%s: %s called, count %d\n", + p_contxt->dev_name, __PRETTY_FUNCTION__, count); + + HSUART_LOG(p_contxt, HS_UART_READ_ENT, count , 0 ); + + for (;count > copied_cnt;) { + int bytes_to_copy; + /* + * Try to get the next buffer which contains data. + */ + HSUART_DEBUG("%s, count %d, copy_cnt %d\n",__FUNCTION__, count, copied_cnt); + ret = hsuart_rxtx_get_next_full_buf(&(p_contxt->rx_lists), + &p_buffer); + HSUART_DEBUG("ret %d, pbuffer0x%x\n",ret, (unsigned int)p_buffer); + if ((0 == ret) && (NULL != p_buffer)) { + bytes_to_copy = min(p_buffer->fullness, (int)(count - copied_cnt)); + + ret = hsuart_copy_buf_to_user( + p_buffer, + o_p_buf + copied_cnt, + bytes_to_copy); + if (0 <= ret) { + BUG_ON(ret != bytes_to_copy); + copied_cnt += ret; + } + else { + HSUART_ERR("%s:%s, copy to user failed p_buffer0x%x, rd_idx0x%x, o_p_buf0x%x, copied_cnt%d\n", + p_contxt->dev_name, + __PRETTY_FUNCTION__, + (unsigned int)p_buffer, + (unsigned int)p_buffer->read_index, + (unsigned int)o_p_buf, + copied_cnt); + ret = -EFAULT; + goto done_read_copy; + } + /* + * The buffer is now empty, move it over to the + * empty list. + */ + //TODO: push me into a function... + spin_lock_irqsave(&(p_contxt->rx_lists.lock), flags); + + if (0 == p_buffer->fullness) { + p_buffer->read_index = 0; + p_buffer->write_index = 0; + list_add_tail(&(p_buffer->list_item), + &(p_contxt->rx_lists.empty)); + p_contxt->rx_lists.vacant_buffers++; + } + else { + /* + * Put the buffer to the head of the full list + */ + list_add(&(p_buffer->list_item), + &p_contxt->rx_lists.full); + } + spin_unlock_irqrestore(&(p_contxt->rx_lists.lock), flags); + } + else { + /* + * No buffer is available + */ + if( file->f_flags & O_NONBLOCK) { + if (0 == copied_cnt) { + copied_cnt = -EAGAIN; + } + goto done_read_copy; + } + ret = wait_event_interruptible( + p_contxt->got_rx_buffer, + (1 == hsuart_rx_data_exist(&(p_contxt->rx_lists)))); + HSUART_DEBUG("%s, got 'got-rx-buffer' event\n",__FUNCTION__); + if (ret) { + printk(KERN_ERR"%s %d ret %d\n",__func__, __LINE__, ret); + // TODO add better cleanup. + goto done_read_copy; + } + } + } + +done_read_copy: + HSUART_DEBUG("--->%s exit %d<---\n",__FUNCTION__, copied_cnt); + + HSUART_LOG(p_contxt, HS_UART_READ_EXT, copied_cnt , 0 ); + + return copied_cnt; +} + + +/** +* +* Helper function, copy from user-space to the provided buffer_item. +* +* @param[in][out] io_p_buffer - The buffer to copy to. +* @param[out] i_p_buf - The source buffer. +* @param[in] count - The number of bytes to copy. +* +* @return negative number - error code, +* 0 or positive - the number of bytes that we copied. +* +*/ +static ssize_t +hsuart_copy_user_to_buf(struct buffer_item* io_p_buffer, const char* i_p_buf, int count) +{ + struct buffer_item* p_buffer = io_p_buffer; + int ret = 0; + int ret_cnt = 0; + int err = 0; + + HSUART_DEBUG("%s: %s called, io_p_buffer 0x%x, i_p_buf 0x%x, count %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (uint32_t)io_p_buffer, + (uint32_t)i_p_buf, + count); + HSUART_DEBUG("%s: %s called, read_index %d, write_index %d, fullness %d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + p_buffer->read_index, + p_buffer->write_index, + p_buffer->fullness); + + BUG_ON(NULL == io_p_buffer); + BUG_ON(NULL == i_p_buf); + /* + * Write pointer is smaller than the read pointer, we can copy + * the whole thing... + */ + if (p_buffer->read_index <= p_buffer->write_index) { + err = copy_from_user( + &(p_buffer->p_vaddr[p_buffer->write_index]), + i_p_buf, + count); + if (err) { + HSUART_ERR("%s:%s, copy from user failed p_vaddr0x%x, write_index0x%x, i_p_buf0x%x, count%d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_buffer->p_vaddr, + (unsigned int)p_buffer->write_index, + (unsigned int)i_p_buf, + count); + ret = -EFAULT; + goto done_copy; + } + else { + p_buffer->write_index += count; + p_buffer->fullness += count; + ret_cnt += count; + } + } + /* + * We may need to copy in two chunks... + */ + else { + int length1 = p_buffer->size - p_buffer->write_index; + err = copy_from_user( + &(p_buffer->p_vaddr[p_buffer->write_index]), + i_p_buf, + length1); + if (!err) { + err = copy_from_user( + p_buffer->p_vaddr, + i_p_buf + length1, + count - length1); + if (err) { + HSUART_ERR("%s:%s, copy from user failed p_vaddr0x%x, write_index0x%x, i_p_buf0x%x, count%d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_buffer->p_vaddr, + (unsigned int)p_buffer->write_index, + (unsigned int)i_p_buf + length1, + count - length1); + ret = -EFAULT; + goto done_copy; + + } + else { + p_buffer->write_index = count - length1; + p_buffer->fullness += count; + ret_cnt += count; + } + } + else { + HSUART_ERR("%s:%s, copy from user failed p_vaddr0x%x, write_index0x%x, i_p_buf0x%x, count%d\n", + DRIVER_NAME, + __PRETTY_FUNCTION__, + (unsigned int)p_buffer->p_vaddr, + (unsigned int)p_buffer->write_index, + (unsigned int)i_p_buf, + count); + ret = -EFAULT; + } + } +done_copy: + if (!ret) { + ret = ret_cnt; + } + HSUART_EXIT(); + + return ret; +} + +static ssize_t +hsuart_write(struct file *file, const char __user* i_p_buf, size_t count, + loff_t *ppos) +{ + struct dev_ctxt* p_context; + ssize_t ret = 0; + int copied_cnt = 0; + + /* + * The number of available bytes in the current buffer. + */ + int available_bytes; + /* + * The num of remaining bytes to copy. + */ + int bytes_to_copy = count; + int empty; + struct buffer_item* p_buffer = NULL; + unsigned long flags; + +// p_context = container_of(file->f_op, struct dev_ctxt, fops); + p_context = file->private_data; + HSUART_DEBUG("%s: %s called, count %d\n", + p_context->dev_name, __PRETTY_FUNCTION__, count); + + HSUART_LOG(p_context, HS_UART_WRITE_ENT, count , 0 ); + + for (;bytes_to_copy > 0;) { + int cnt; + empty = hsuart_tx_buf_get_empty(&(p_context->tx_lists), 0, &p_buffer); + if ((!empty) && (NULL != p_buffer)) { + /* + * Now, lets copy the data + */ + available_bytes = p_buffer->size - p_buffer->fullness; + cnt = min(bytes_to_copy, available_bytes); + ret = hsuart_copy_user_to_buf( + p_buffer, + i_p_buf + copied_cnt, + cnt); + if (ret >= 0) { + BUG_ON(cnt != ret); + copied_cnt += ret; + bytes_to_copy -= ret; + /* + * Place the buffer back into the appropriate list + */ + //TODO: push me toa function. + spin_lock_irqsave(&(p_context->tx_lists.lock), + flags); + /* + * future optimization... + if (p_buffer->fullness == p_buffer->size) { + printk(KERN_ERR"the buffer is full\n"); + list_add_tail(&(p_buffer->list_item), &(p_contxt->tx_lists.full)); + } + else { + printk(KERN_ERR"the buffer is not full yet - move it to used list\n"); + list_add_tail(&(p_buffer->list_item), &(p_contxt->tx_lists.used)); + } + */ + list_add_tail(&(p_buffer->list_item), + &(p_context->tx_lists.full)); + spin_unlock_irqrestore(&(p_context->tx_lists.lock), + flags); + + msm_hsuart_write(p_context->hsuart_id); + } + else { + printk(KERN_ERR"%s, error %d\n",__FUNCTION__, ret); + } + } + else { + if (file->f_flags & O_NONBLOCK) { + HSUART_DEBUG("%s:%s, no more free space, try again later...\n", + p_context->dev_name, + __PRETTY_FUNCTION__); + ret = -EAGAIN; + printk(KERN_ERR"%s, %d, p_buffer 0x%x, empty %d\n",__FUNCTION__, __LINE__, (uint32_t)p_buffer, empty); + goto hsuart_write_done; + } + ret = wait_event_interruptible( + p_context->got_tx_buffer, + hsuart_vacant_tx_buf_exist(&(p_context->tx_lists))); + if (ret) { + HSUART_ERR("%s:%s, failed getting got_tx_buffer event\n", + DRIVER_NAME, + __PRETTY_FUNCTION__); + // TODO: add err handlingand some cleanup + goto hsuart_write_done; + } + } + } + +hsuart_write_done: + /* + * In case that we didn't get any error (ret is not -ErrCode) + * assign the number of bytes that were copied as the ret value. + */ + if (ret >= 0) { + ret = copied_cnt; + } + + HSUART_DEBUG("---> %s ret %d<---\n",__FUNCTION__,ret); + + HSUART_EXIT(); + return ret; +} + +static unsigned int +hsuart_poll(struct file* file, struct poll_table_struct* wait) +{ + unsigned long flags; + unsigned int mask = 0; + struct dev_ctxt* p_context; + int rx_rdy; + int tx_rdy; + + HSUART_ENTER(); +// p_context = container_of(file->f_op, struct dev_ctxt, fops); + p_context = file->private_data; + + poll_wait(file, &(p_context->got_tx_buffer), wait); + poll_wait(file, &(p_context->got_rx_buffer), wait); + +//FIXME: context lock is useless... need to use lists lock + spin_lock_irqsave(&(p_context->lock), flags); + tx_rdy = hsuart_vacant_tx_buf_exist(&(p_context->tx_lists)); + if(tx_rdy > 0) { + mask |= POLLOUT | POLLWRNORM; + } + rx_rdy = hsuart_rx_data_exist(&(p_context->rx_lists)); + if(rx_rdy > 0) { + mask |= POLLIN | POLLRDNORM; + } + + HSUART_DEBUG("%s, tx_rdy %d, rx_rdy %d\n",__FUNCTION__, tx_rdy, rx_rdy); +// HSUART_ERR("%s, tx_rdy %d, rx_rdy %d\n",__FUNCTION__, tx_rdy, rx_rdy); + + spin_unlock_irqrestore(&(p_context->lock), flags); + + HSUART_EXIT(); + + return mask; + +} + +static void +hsuart_flush_rx_queue(struct dev_ctxt* io_p_context) +{ + unsigned long flags; + struct buffer_item* p_buffer = NULL; + int ret = 0; + + /* + * Stop RX + */ + spin_lock_irqsave(&(io_p_context->rx_lists.lock), flags); + + for (;;) { + ret = __hsuart_rxtx_get_next_full_buf( + &(io_p_context->rx_lists), + &p_buffer); + HSUART_ERR("%s, %d ret %d, p_buffer 0x%x\n", + __FUNCTION__, __LINE__,ret, (uint32_t)p_buffer); + + if (ret) { + break; + } + p_buffer->read_index = 0; + p_buffer->write_index = 0; + p_buffer->fullness = 0; + list_add(&(p_buffer->list_item), + &(io_p_context->rx_lists.empty)); + io_p_context->rx_lists.vacant_buffers++; + } + + spin_unlock_irqrestore(&(io_p_context->rx_lists.lock), flags); +} + +static void +hsuart_flush_tx_queue(struct dev_ctxt* io_p_context) +{ + unsigned long flags; + struct buffer_item* p_buffer = NULL; + int ret = 0; + + /* + * Disable TX + */ + spin_lock_irqsave(&(io_p_context->tx_lists.lock), flags); + + for (;;) { + ret = __hsuart_rxtx_get_next_full_buf( + &(io_p_context->tx_lists), + &p_buffer); + HSUART_ERR("%s, %d ret %d, p_buffer 0x%x\n", + __FUNCTION__, __LINE__,ret, (uint32_t)p_buffer); + + if (ret) { + break; + } + p_buffer->read_index = 0; + p_buffer->write_index = 0; + p_buffer->fullness = 0; + list_add(&(p_buffer->list_item), + &(io_p_context->tx_lists.empty)); + io_p_context->tx_lists.vacant_buffers++; + } + + spin_unlock_irqrestore(&(io_p_context->tx_lists.lock), flags); + +} + +static int +hsuart_ioctl_flush(struct dev_ctxt* io_p_context, int args) +{ + HSUART_DEBUG("%s called, args 0x%x\n",__FUNCTION__, args); + + if (args & (HSUART_TX_QUEUE | HSUART_TX_FIFO)) { + + if(args & HSUART_TX_QUEUE) { + hsuart_flush_tx_queue(io_p_context); + } + + if(args & HSUART_TX_FIFO) { + //TODO: implement + } + } + if (args & (HSUART_RX_QUEUE | HSUART_RX_FIFO)) { + + if (args & HSUART_RX_FIFO) { + /* + * The followign suspend function flushes teh current rx transaction then + * waits till the current tx transaction is complete + * and then suspends HS UART HW port. Hence tx transactions + * are not affected by suspend + */ + msm_hsuart_suspend(io_p_context->hsuart_id); + } + + if (args & HSUART_RX_QUEUE) { + hsuart_flush_rx_queue(io_p_context); + } + + if (args & HSUART_RX_FIFO) { + msm_hsuart_resume(io_p_context->hsuart_id); + } + + } + + HSUART_EXIT(); + return 0; +} + + +static int +hsuart_tx_do_drain(struct dev_ctxt* i_p_context, unsigned long timeout) +{ + int ret = 0; + + if( timeout == 0 ) { + // non blocking case + if(!hsuart_rxtx_is_empty(&(i_p_context->tx_lists))) + ret |= 2; + +// if(!hsuart_tx_fifo_is_empty ( ctxt )) +// rc |= 1; + + return ret; + } + + // timeout in jiffies + timeout = msecs_to_jiffies(timeout); + + ret = wait_event_interruptible_timeout( i_p_context->got_tx_buffer, + hsuart_rxtx_is_empty(&(i_p_context->tx_lists)), + timeout); + if (ret < 0) { + return ret; // interrupted by signal or error + } + + if (ret == 0) { + return 2; // expired but condition was not reached + } + + timeout = jiffies + ret; + while (time_before(jiffies, timeout)) { + if (hsuart_rxtx_is_empty(&(i_p_context->tx_lists))) { + return 0; + } + msleep (1); + } + return 1; +} + +static int +hsuart_ioctl_tx_drain(struct dev_ctxt* i_p_context, unsigned long timeout ) +{ + int ret; + HSUART_ENTER(); + ret = hsuart_tx_do_drain(i_p_context, timeout); + HSUART_EXIT_RET(ret); + return 0; +} + +static int +hsuart_ioctl_rx_bytes(struct dev_ctxt* i_p_context) +{ + int ret = 0; + int rx_data_exists = 0; + HSUART_ENTER(); + + rx_data_exists = hsuart_rx_data_exist(&(i_p_context->rx_lists)); + + if (rx_data_exists) { + //Buffers have data + ret |= 1; + } + + if ( msm_hsuart_rx_fifo_has_bytes(i_p_context->hsuart_id) ){ + //fifo has data + ret |= 2; + } + + HSUART_EXIT(); + return ret; +} + +static int +hsuart_ioctl_rx_flow ( struct dev_ctxt *ctxt, int opcode ) +{ + int ret = 0; + + HSUART_ENTER(); + + printk( KERN_ERR "hsuart_ioctl_rx_flow %X\n", opcode ); + + //Make sure we change rx flow only + opcode &= ~(HSUART_MODE_FLOW_DIRECTION_MASK); + opcode |= HSUART_MODE_FLOW_DIRECTION_RX_ONLY; + + ret = msm_hsuart_set_flow( + ctxt->hsuart_id, + opcode & HSUART_MODE_FLOW_CTRL_MASK); + + HSUART_EXIT(); + return ret; +} + + +static int +hsuart_ioctl_set_uart_mode(struct dev_ctxt* io_p_context, + void *usr_ptr, int usr_bytes ) +{ + + int ret = 0; + unsigned int changed; + struct hsuart_mode mode; + + HSUART_ENTER(); + if( copy_from_user ( &mode, usr_ptr, usr_bytes )) { + ret = -EFAULT; + goto Done; + } + HSUART_DEBUG("%s, speed 0x%x, flags 0x%x\n", + __FUNCTION__,mode.speed, mode.flags); + + if(mode.speed != io_p_context->uart_speed ) { + ret = msm_hsuart_set_baud_rate(io_p_context->hsuart_id, + mode.speed ); + if(ret != 0){ + goto Done; + } + io_p_context->uart_speed = mode.speed; + } + + changed = io_p_context->uart_flags ^ mode.flags; + + if (changed & HSUART_MODE_FLOW_CTRL_MASK) { + /* + * flow control changed + */ + msm_hsuart_set_flow(io_p_context->hsuart_id, + mode.flags & HSUART_MODE_FLOW_CTRL_MASK); + io_p_context->uart_flags &= ~HSUART_MODE_FLOW_CTRL_MASK; + io_p_context->uart_flags |= + mode.flags & HSUART_MODE_FLOW_CTRL_MASK; + } + + if(changed & HSUART_MODE_PARITY_MASK) { + /* + * parity changed + */ + msm_hsuart_set_parity( io_p_context->hsuart_id, + (mode.flags & HSUART_MODE_PARITY_MASK)); + io_p_context->uart_flags &= ~HSUART_MODE_PARITY_MASK; + io_p_context->uart_flags |= mode.flags & HSUART_MODE_PARITY_MASK; + } +Done: + HSUART_EXIT(); + return ret; + +} + +static long +hsuart_ioctl(struct file *file, + unsigned int cmd, unsigned long args) +{ + int ret = 0; + struct dev_ctxt* p_contxt; + void * usr_ptr = (void*) (args); + int usr_bytes = _IOC_SIZE(cmd); + + p_contxt = container_of(file->f_op, struct dev_ctxt, fops); + + HSUART_ENTER(); + HSUART_DEBUG("%s, cmd 0x%x, args 0x%lx\n", + __FUNCTION__, cmd, args); + switch ( cmd ) { + case HSUART_IOCTL_GET_VERSION: + { + int ver = DRIVER_VERSION; + if( copy_to_user(usr_ptr, &ver, usr_bytes)) { + ret = -EFAULT; + goto Done; + } + } break; + + case HSUART_IOCTL_GET_BUF_INF: + { + struct hsuart_buf_inf binf; + + binf.rx_buf_num = p_contxt->rx_buf_num; + binf.tx_buf_num = p_contxt->tx_buf_num; + binf.rx_buf_size = p_contxt->rx_buf_size; + binf.tx_buf_size = p_contxt->tx_buf_size; + if( copy_to_user (usr_ptr, &binf, usr_bytes)) { + ret = -EFAULT; + goto Done; + } + } + break; + + case HSUART_IOCTL_GET_STATS: + { + struct hsuart_stat stat; + stat.tx_bytes = p_contxt->tx_ttl; + stat.rx_bytes = p_contxt->rx_ttl; + stat.rx_dropped = p_contxt->rx_dropped; + if( copy_to_user ( usr_ptr, &stat, usr_bytes )) { + ret = -EFAULT; + goto Done; + } + } break; + + case HSUART_IOCTL_GET_UARTMODE: + { + struct hsuart_mode mode; + mode.speed = p_contxt->uart_speed; + mode.flags = p_contxt->uart_flags; + if( copy_to_user ( usr_ptr, &mode, usr_bytes )) { + ret = -EFAULT; + goto Done; + } + } break; + + case HSUART_IOCTL_SET_RXLAT: + p_contxt->pdata->rx_latency = args; + // hsuart_recalc_timeout( ctxt ); + break; + + case HSUART_IOCTL_SET_UARTMODE: + ret = hsuart_ioctl_set_uart_mode(p_contxt, usr_ptr, usr_bytes ); + break; + + case HSUART_IOCTL_CLEAR_FIFO: + case HSUART_IOCTL_FLUSH: + ret = hsuart_ioctl_flush(p_contxt, args); + break; + + case HSUART_IOCTL_TX_DRAIN: + ret = hsuart_ioctl_tx_drain(p_contxt, args); + break; + + case HSUART_IOCTL_RX_BYTES: + ret = hsuart_ioctl_rx_bytes(p_contxt); + break; + + case HSUART_IOCTL_RX_FLOW: + ret = hsuart_ioctl_rx_flow(p_contxt, args); + break; + + case HSUART_IOCTL_RESET_UART: + HSUART_INFO("%s: reset_uart\n", __FUNCTION__ ); + break; + + } +Done: + return ret; +} + +static int +hsuart_open(struct inode *inode, struct file *file) +{ + struct dev_ctxt* p_context; + int ret; + + HSUART_ENTER(); + + p_context = container_of(file->f_op, struct dev_ctxt, fops); + + HSUART_DEBUG( " Hsuart open id %d opened %d initialized %d\n" , + p_context->hsuart_id, + (int) p_context->is_opened, + (int) p_context->is_initilized ); + /* + * check if it is in use + */ + if (test_and_set_bit (0, &(p_context->is_opened))) { + return -EBUSY; + } + + if (0 == p_context->is_initilized) { + + p_context->uart_flags = p_context->pdata->uart_mode; + p_context->uart_speed = p_context->pdata->uart_speed; + + ret = hsuart_uart_port_init(p_context); + + + if (ret) { + clear_bit(0, &(p_context->is_opened)); + return ret; + } + + p_context->is_initilized = 1; + + HSUART_DEBUG( " Hsuart port init id %d opened %d initialized %d\n" , + p_context->hsuart_id, + (int) p_context->is_opened, + (int) p_context->is_initilized ); + // hsuart_start_rx_xfer ( ctxt ); + + } + /* + * attach private data to the file handle for future use + */ + file->private_data = p_context; + p_context->tx_ttl = 0; + p_context->rx_ttl = 0; + p_context->rx_dropped = 0; + + ret = nonseekable_open(inode, file); + + HSUART_EXIT(); + + return ret; +} + +static int +hsuart_close(struct inode *inode, struct file *file) +{ + struct dev_ctxt* p_contxt; + int ret = 0; + + HSUART_ENTER(); + + p_contxt = container_of(file->f_op, struct dev_ctxt, fops); + + HSUART_DEBUG( " Hsuart close id %d opened %d initialized %d\n" , + p_contxt->hsuart_id, + (int)p_contxt->is_opened, + (int)p_contxt->is_initilized ); + + if ( 0 != p_contxt->is_initilized ) { + ret = hsuart_uart_port_release(p_contxt); + p_contxt->is_initilized = 0; + } + + /* mark it as unused */ + clear_bit(0, &(p_contxt)->is_opened); + + HSUART_EXIT(); + + return ret; +} + +static struct file_operations hsuart_fops = { + .llseek = no_llseek, + .read = hsuart_read, + .write = hsuart_write, +// .fsync = hsuart_fsync, + .poll = hsuart_poll, + .unlocked_ioctl = hsuart_ioctl, + .open = hsuart_open, + .release = hsuart_close, +}; + + + + +static int __devexit +hsuart_remove ( struct platform_device *dev ) +{ + struct dev_ctxt* p_contxt; + int ret = 0; + + HSUART_ENTER(); + + /* + * TODO: fixme, add proper cleanups!!! + */ + + /* + * Remove sysfs entries. + */ + device_remove_file(&(dev->dev), &dev_attr_dbg_lvl); + + p_contxt = platform_get_drvdata(dev); + if(NULL != p_contxt) { + platform_set_drvdata (dev, NULL); + } + return ret; +} + + +static int __devinit +hsuart_probe(struct platform_device *dev) +{ + struct hsuart_platform_data* p_data; + struct dev_ctxt* p_contxt = NULL; + int ret; + + HSUART_ENTER(); + + p_data = dev->dev.platform_data; + if(p_data == NULL) { + HSUART_ERR("%s: no platform data\n", DRIVER_NAME); + return -ENODEV; + } + + p_contxt = kzalloc (sizeof(struct dev_ctxt), GFP_KERNEL); + if(NULL == p_contxt) { + return -ENOMEM; + } + /* Attach the context to its device */ + platform_set_drvdata(dev, p_contxt); + + /* Attach platfor-device and data to the context */ + p_contxt->pdev = dev; + p_contxt->pdata = p_data; + + ret = device_create_file(&(dev->dev), &dev_attr_dbg_lvl); + if(ret) + goto probe_cleanup; + + /* + * Init main spin lock + */ + spin_lock_init(&(p_contxt->lock)); + + /* + * Import data from the board file + */ + + /* Get the name */ + if (p_data->dev_name) { + p_contxt->dev_name = p_data->dev_name; + } + else { + p_contxt->dev_name = dev->name; + } + + /* Get the port number */ + p_contxt->uart_port_number = dev->id; + + /* Init Rx/Tx buffer sub-system */ + p_contxt->rx_buf_size = p_data->rx_buf_size; + p_contxt->rx_buf_num = p_data->rx_buf_num; + + p_contxt->tx_buf_size = p_data->tx_buf_size; + p_contxt->tx_buf_num = p_data->tx_buf_num; + + p_contxt->min_packet_sz = p_data->min_packet_size; + + ret = hsuart_rxtx_buf_init(p_contxt); + if (ret) { + goto probe_cleanup; + } + + memcpy(&p_contxt->fops, &hsuart_fops, sizeof(struct file_operations)); + + /* Init & register misc device */ + p_contxt->mdev.name = p_contxt->dev_name; + p_contxt->mdev.minor = MISC_DYNAMIC_MINOR; + p_contxt->mdev.fops = &p_contxt->fops; + + ret = misc_register(&p_contxt->mdev); + if (ret) { + goto probe_cleanup; + } + + p_contxt->uart_flags = p_data->uart_mode; + p_contxt->uart_speed = p_data->uart_speed; + + init_waitqueue_head(&(p_contxt->got_rx_buffer)); + init_waitqueue_head(&(p_contxt->got_tx_buffer)); + + HSUART_INFO("%s:created '%s' device on UART %d\n", + DRIVER_NAME, + p_contxt->dev_name, + p_contxt->uart_port_number ); + + /* Handle non-defferred initialization */ + if(!(p_contxt->pdata->options & HSUART_OPTION_DEFERRED_LOAD)) { +/* + ret = hsuart_init_uart ( ctxt ); + if( ret ) { + goto err_misc_unregister; + } + hsuart_start_rx_xfer ( ctxt ); +*/ + } + + return 0; + +probe_cleanup: + /* TODO: break this into multiple steps of clean upand jump to each + step based on where we failed */ + HSUART_ERR("%s: FIXME\n", DRIVER_NAME); + HSUART_ERR("%s: Failed (%d) to initialize device\n", + DRIVER_NAME, ret ); + + return ret; +} + +#ifdef CONFIG_PM +static int +hsuart_suspend(struct platform_device *dev, pm_message_t state) +{ + int ret = 0; + + struct dev_ctxt* io_p_contxt = (struct dev_ctxt*) platform_get_drvdata(dev); + + HSUART_ENTER(); + + HSUART_DEBUG( " Hsuart suspend id %d opened %d initialized %d\n" , + io_p_contxt->hsuart_id, + (int)io_p_contxt->is_opened, + (int)io_p_contxt->is_initilized ); + + if ( !io_p_contxt->is_opened || !io_p_contxt->is_initilized ) { + goto out; + } + + ret = msm_hsuart_suspend(io_p_contxt->hsuart_id); + + if (io_p_contxt->pdata->options & HSUART_OPTION_RX_FLUSH_QUEUE_ON_SUSPEND ) { + hsuart_flush_rx_queue(io_p_contxt); + } + + if (io_p_contxt->pdata->options & HSUART_OPTION_TX_FLUSH_QUEUE_ON_SUSPEND ) { + hsuart_flush_tx_queue(io_p_contxt); + } + +out: + HSUART_EXIT(); + + return ret; +} + +static int +hsuart_resume (struct platform_device *dev) +{ + int ret = 0; + + struct dev_ctxt* io_p_contxt = (struct dev_ctxt*) platform_get_drvdata(dev); + + HSUART_ENTER(); + + HSUART_DEBUG( " Hsuart resume id %d opened %d initialized %d\n" , + io_p_contxt->hsuart_id, + (int)io_p_contxt->is_opened, + (int)io_p_contxt->is_initilized ); + + if ( !io_p_contxt->is_opened || !io_p_contxt->is_initilized ) { + goto out; + } + + ret = msm_hsuart_resume(io_p_contxt->hsuart_id); +out: + HSUART_EXIT(); + + return ret; +} +#else +#define hsuart_suspend NULL +#define hsuart_resume NULL +#endif /* CONFIG_PM */ + +static struct platform_driver hsuart_driver = { + .driver = { + .name = DRIVER_NAME, + }, + .probe = hsuart_probe, + .remove = __devexit_p(hsuart_remove), + .suspend = hsuart_suspend, + .resume = hsuart_resume, +}; + +/* + * + */ +static int __init +hsuart_init(void) +{ + return platform_driver_register(&hsuart_driver); +} + +/* + * + */ +static void __exit +hsuart_exit(void) +{ + platform_driver_unregister ( &hsuart_driver ); +} + +module_init(hsuart_init); +module_exit(hsuart_exit); + diff --git a/drivers/misc/user-pins.c b/drivers/misc/user-pins.c new file mode 100644 index 00000000000..4d35cc201c5 --- /dev/null +++ b/drivers/misc/user-pins.c @@ -0,0 +1,1014 @@ +/* + * drivers/user-pins.c + * + * Copyright (C) 2008 Palm, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_PINMUX +#include +#endif + +#include +#include +#include +#include + +#undef MODDEBUG +//#define MODDEBUG 1 + +#ifdef MODDEBUG +#define PDBG(args...) printk(args) +#else +#define PDBG(args...) +#endif + +#define DRIVER_NAME "user-pins" + +enum event { + USER_PIN_EVENT_IRQ, + USER_PIN_EVENT_IRQ_READ, +}; + +struct user_pins_log_event { + ktime_t timestamp; + enum event event; + int gpio; +}; + +#if defined(CONFIG_DEBUG_FS) +#define NR_LOG_ENTRIES 512 + +static struct user_pins_log_event user_pins_log[NR_LOG_ENTRIES]; +static int user_pins_log_idx; + +static DEFINE_SPINLOCK(debug_lock); + +static char debug_buffer[PAGE_SIZE]; + +static void user_pins_log_event(int gpio, enum event event) +{ + unsigned long flags; + spin_lock_irqsave(&debug_lock, flags); + user_pins_log[user_pins_log_idx].timestamp = ktime_get(); + user_pins_log[user_pins_log_idx].event = event; + user_pins_log[user_pins_log_idx].gpio = gpio; + user_pins_log_idx += 1; + if (user_pins_log_idx == NR_LOG_ENTRIES) { + user_pins_log_idx = 0; + } + spin_unlock_irqrestore(&debug_lock, flags); +} + +static int debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t debug_show_log(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char *buf = debug_buffer; + int i, n = 0; + const char *event; + unsigned long flags; + + spin_lock_irqsave(&debug_lock, flags); + + for (i = 0; i < user_pins_log_idx; i++) { + switch (user_pins_log[i].event) { + case USER_PIN_EVENT_IRQ: + event = "IRQ"; + break; + case USER_PIN_EVENT_IRQ_READ: + event = "IRQ_READ"; + break; + default: + event = ""; + break; + } + n += scnprintf(buf + n, PAGE_SIZE - n, + "%010llu: %-8d %s\n", + ktime_to_ns(user_pins_log[i].timestamp), + user_pins_log[i].gpio, event); + } + + user_pins_log_idx = 0; + + spin_unlock_irqrestore(&debug_lock, flags); + + return simple_read_from_buffer(ubuf, count, ppos, buf, n); +} + +static const struct file_operations debug_log_fops = { + .open = debug_open, + .read = debug_show_log, +}; + +static void user_pins_debug_init(void) +{ + struct dentry *dent; + dent = debugfs_create_dir(DRIVER_NAME, 0); + if (IS_ERR(dent)) { + return; + } + debugfs_create_file("log", 0444, dent, NULL, &debug_log_fops); + user_pins_log_idx = 0; +} +#else +static void user_pins_log_event(int gpio, enum event event) { } +static void user_pins_debug_init(void) { } +#endif + +static struct kobject *user_hw_kobj; +static struct kobject *pins_kobj; + +DEFINE_SPINLOCK(pins_lock); + +static int __init user_hw_init(void) +{ + user_hw_kobj = kobject_create_and_add("user_hw", NULL); + if (user_hw_kobj == NULL) { + return -ENOMEM; + } + return 0; +} + +arch_initcall(user_hw_init); + +struct pin_attribute { + struct attribute attr; + ssize_t (*show) (struct pin_attribute *attr, char *buf); + ssize_t (*store)(struct pin_attribute *attr, const char *buf, size_t count); +}; + +#define to_pin_attr(_attr) container_of(_attr, struct pin_attribute, attr) + +static ssize_t +pin_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct pin_attribute * pin_attr = to_pin_attr(attr); + if (pin_attr->show) { + return pin_attr->show(pin_attr, buf); + } else { + return -EIO; + } + return 0; +} + +static ssize_t +pin_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + struct pin_attribute * pin_attr = to_pin_attr(attr); + if (pin_attr->store) { + return pin_attr->store (pin_attr, buf, count); + } else { + return -EIO; + } +} + +static struct sysfs_ops pin_sysfs_ops = { + .show = pin_attr_show, + .store = pin_attr_store, +}; + +static struct kobj_type ktype_pin = { + .release = NULL, + .sysfs_ops = &pin_sysfs_ops, +}; + +struct gpio_pin { + int gpio; + int options; + int direction; + int act_level; + int def_level; + int active_power_collapse; + irqreturn_t (*irq_handler)(int irq, void *data); + int (*pinmux)(int gpio, int mode); + atomic_t irq_count; + int irq_config; + int irq_masked; + int irq_requested; + const char * name; + int irq_handle_mode; + atomic_t irqs_during_suspend; + struct sysfs_dirent *sd; + struct pin_attribute attr_gpio; + struct pin_attribute attr_level; + struct pin_attribute attr_active; + struct pin_attribute attr_direction; + struct pin_attribute attr_irq; + struct pin_attribute attr_irqconfig; + struct pin_attribute attr_irqrequest; + struct pin_attribute attr_irqmask; + struct pin_attribute attr_irq_handle_mode; + struct pin_attribute attr_active_power_collapse; + struct attribute *attr_ptr_arr[11]; +}; + +struct gpio_pin_set_item { + struct attribute_group attr_grp; + struct gpio_pin pin; +}; + +struct gpio_pin_set { + const char *set_name; + struct kobject kobj; + int num_pins; + struct gpio_pin_set_item pins[]; +}; + +struct gpio_pin_dev_ctxt { + int num_sets; + struct gpio_pin_set *sets[]; +}; + +/* + * Show irq handle mode + * + * If AUTO, irq will be handled by irq_handler + */ +static int +pin_show_irq_mode ( struct pin_attribute *attr, char *buf) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_irq_handle_mode ); + return sprintf(buf, "%d\n", pin->irq_handle_mode ); +} + +/* + * Set irq handle mode for specified pin + * + */ +static ssize_t +pin_store_irq_mode( struct pin_attribute *attr, const char * buf, size_t count) +{ + int irq_handle_mode; + unsigned long flags; + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_irq_handle_mode ); + sscanf(buf, "%d", &irq_handle_mode); + + spin_lock_irqsave(&pins_lock, flags); + + // reset irq count to detect user suspend and kernel suspend + pin->irq_handle_mode = irq_handle_mode; + if(pin->irq_handle_mode == IRQ_HANDLE_OFF) + atomic_set(&pin->irqs_during_suspend, 0); + + spin_unlock_irqrestore(&pins_lock, flags); + + printk(KERN_INFO"USERPIN: setting irq handle mode of pin gpio %d to %d\n", + pin->gpio, irq_handle_mode); + + return count; +} + +static irqreturn_t user_pins_irq(int irq, void *data) +{ + unsigned long flags; + struct gpio_pin *pin = (struct gpio_pin *)data; + + user_pins_log_event(pin->gpio, USER_PIN_EVENT_IRQ); + + atomic_inc(&pin->irq_count); + + spin_lock_irqsave(&pins_lock, flags); + + if (pin->irq_handle_mode & IRQ_HANDLE_OFF) + atomic_inc(&pin->irqs_during_suspend); + + spin_unlock_irqrestore(&pins_lock, flags); + + if (pin->sd != NULL) { + sysfs_notify_dirent(pin->sd); + } + + if (pin->irq_handler != NULL) { + pin->irq_handler(irq, NULL); + } + + return IRQ_HANDLED; +} + +static int user_pins_irq_request(struct gpio_pin *pin) +{ + int rc = 0; + + printk("user-pins: configuring irq for gpio %d\n", pin->gpio); + + rc = request_irq(gpio_to_irq(pin->gpio), user_pins_irq, + pin->irq_config, "userpins", pin); + if (rc) { + printk("user-pins: failed to request irq\n"); + } + + return rc; +} + +/* + * Show gpio direction + */ +static int pin_show_direction(struct pin_attribute *attr, char *buf) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_direction); + return sprintf(buf, "%d\n", pin->direction); +} + +/* + * Show active level for specified pin + */ +static int pin_show_active(struct pin_attribute *attr, char *buf) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_active); + return sprintf(buf, "%d\n", pin->act_level); +} + +static int pin_show_active_power_collapse(struct pin_attribute *attr, char *buf) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_active_power_collapse); + return sprintf(buf, "%d\n", pin->active_power_collapse); +} + +/* + * Show gpio number + */ +static int pin_show_gpio(struct pin_attribute *attr, char *buf) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_gpio); + return sprintf(buf, "%d\n", pin->gpio); +} + +/* + * Show current for specified pin + */ +static int pin_show_level(struct pin_attribute *attr, char *buf) +{ + int val; + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_level); + + val = gpio_get_value(pin->gpio); + PDBG("get: gpio[%d] = %d\n", pin->gpio, val); + if (val) { + return sprintf(buf, "1\n" ); + } else { + return sprintf(buf, "0\n" ); + } +} + +/* + * Set level for specified pin + */ +static ssize_t +pin_store_level(struct pin_attribute *attr, const char *buf, size_t count) +{ + int i = 0, len, val = -1; + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_level); + + if (pin->options & PIN_READ_ONLY) { + return count; // just ignore writes + } + + /* skip leading white spaces */ + while (i < count && isspace(buf[i])) { + i++; + } + + len = count - i; + if (len >= 1 && strncmp(buf+i, "1", 1) == 0) { + val = 1; + goto set; + } + + if (len >= 1 && strncmp(buf+i, "0", 1) == 0) { + val = 0; + goto set; + } + + if (len >= 4 && strncmp(buf+i, "high", 4) == 0) { + val = 1; + goto set; + } + + if (len >= 3 && strncmp(buf+i, "low", 3) == 0) { + val = 0; + goto set; + } + + return count; + +set: + PDBG("set: gpio[%d] = %d\n", pin->gpio, val); + gpio_set_value(pin->gpio, val); + return count; +} + + +static ssize_t +pin_store_active_power_collapse(struct pin_attribute *attr, const char *buf, size_t count) +{ + int i = 0, len, val = -1; + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_active_power_collapse); + + if (pin->options & PIN_READ_ONLY) { + return count; // just ignore writes + } + + /* skip leading white spaces */ + while (i < count && isspace(buf[i])) { + i++; + } + + len = count - i; + if (len >= 1 && strncmp(buf+i, "1", 1) == 0) { + val = 1; + } else if (len >= 1 && strncmp(buf+i, "0", 1) == 0) { + val = 0; + } else { /* invalid input */ + goto end; + } + + PDBG("set: active_power_collapse[%d] = %d\n", pin->gpio, val); + +#ifdef CONFIG_PINMUX + // This is an old legacy mechanism for muxing pins, it's not that clean + // and creates dependency on pinmux driver that is not really + // used on some systems. + pinmux_set_power_collapse(pin->name, val); +#endif + + pin->active_power_collapse = val; + +end: + return count; + +} + +static int pin_show_irq(struct pin_attribute *attr, char *buf) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_irq); + int count = atomic_xchg(&pin->irq_count, 0); + user_pins_log_event(pin->gpio, USER_PIN_EVENT_IRQ_READ); + return sprintf(buf, "%d\n", count); +} + +static int pin_show_irqconfig(struct pin_attribute *attr, char *buf) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_irqconfig); + return sprintf(buf, "%d\n", pin->irq_config); +} + +static ssize_t +pin_store_irqconfig(struct pin_attribute *attr, const char *buf, size_t count) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_irqconfig); + unsigned long flags; + int config; + + config = simple_strtoul(buf, NULL, 10) & IRQF_TRIGGER_MASK; + + spin_lock_irqsave(&pins_lock, flags); + pin->irq_config = config; + spin_unlock_irqrestore(&pins_lock, flags); + + return count; +} + +static int pin_show_irqrequest(struct pin_attribute *attr, char *buf) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_irqrequest); + return sprintf(buf, "%d\n", !!pin->irq_requested); +} + +static ssize_t +pin_store_irqrequest(struct pin_attribute *attr, const char *buf, size_t count) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_irqrequest); + unsigned long flags; + int request; + int rc = 0; + + if ((count > 0) && (buf[0] == '1')) { + request = 1; + } else { + request = 0; + } + + if (request != pin->irq_requested) { + if (request) { + rc = user_pins_irq_request(pin); + if (rc) { + goto fail; + } + } else { + free_irq(gpio_to_irq(pin->gpio), pin); + } + + spin_lock_irqsave(&pins_lock, flags); + pin->irq_requested = request; + pin->irq_masked = 0; + spin_unlock_irqrestore(&pins_lock, flags); + } + return count; +fail: + return rc; +} + +static int pin_show_irqmask(struct pin_attribute *attr, char *buf) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_irqmask); + return sprintf(buf, "%d\n", !!pin->irq_masked); +} + +static ssize_t +pin_store_irqmask(struct pin_attribute *attr, const char *buf, size_t count) +{ + struct gpio_pin *pin = container_of(attr, struct gpio_pin, attr_irqmask); + unsigned long flags; + int mask; + + if ((count > 0) && (buf[0] == '1')) { + mask = 1; + } else { + mask = 0; + } + + spin_lock_irqsave(&pins_lock, flags); + if (mask != pin->irq_masked) { + if (mask) { + disable_irq(gpio_to_irq(pin->gpio)); + } else { + enable_irq(gpio_to_irq(pin->gpio)); + } + pin->irq_masked = mask; + } + spin_unlock_irqrestore(&pins_lock, flags); + + return count; +} + +static void +pin_set_item_init(struct gpio_pin_set_item *psi, struct user_pin *up) +{ + psi->pin.name = up->name; + psi->pin.gpio = up->gpio; + psi->pin.options = up->options; + psi->pin.direction = up->direction; + psi->pin.act_level = up->act_level; + psi->pin.def_level = up->def_level; + psi->pin.irq_handler = up->irq_handler; + psi->pin.pinmux = up->pinmux; + psi->pin.irq_config = up->irq_config; + psi->pin.irq_handle_mode = up->irq_handle_mode; + atomic_set(&psi->pin.irqs_during_suspend, 0); + atomic_set(&psi->pin.irq_count, 0); + psi->pin.irq_requested = 0; + psi->pin.irq_masked = 0; + + // gpio attr + psi->pin.attr_gpio.attr.name = "gpio"; + psi->pin.attr_gpio.attr.mode = 0444; + psi->pin.attr_gpio.show = pin_show_gpio; + + // level attr + psi->pin.attr_level.attr.name = "level"; + psi->pin.attr_level.attr.mode = 0644; + psi->pin.attr_level.show = pin_show_level; + psi->pin.attr_level.store = pin_store_level; + + // active attr + psi->pin.attr_active.attr.name = "active"; + psi->pin.attr_active.attr.mode = 0444; + psi->pin.attr_active.show = pin_show_active; + + // direction + psi->pin.attr_direction.attr.name = "direction"; + psi->pin.attr_direction.attr.mode = 0444; + psi->pin.attr_direction.show = pin_show_direction; + + psi->pin.attr_active_power_collapse.attr.name = "active_power_collapse"; + psi->pin.attr_active_power_collapse.attr.mode = 0644; + psi->pin.attr_active_power_collapse.show = pin_show_active_power_collapse; + psi->pin.attr_active_power_collapse.store = pin_store_active_power_collapse; + + if (psi->pin.options & PIN_IRQ) { + // irq + psi->pin.attr_irq.attr.name = "irq"; + psi->pin.attr_irq.attr.mode = 0444; + psi->pin.attr_irq.show = pin_show_irq; + + // irqconfig + psi->pin.attr_irqconfig.attr.name = "irqconfig"; + psi->pin.attr_irqconfig.attr.mode = 0666; + psi->pin.attr_irqconfig.show = pin_show_irqconfig; + psi->pin.attr_irqconfig.store = pin_store_irqconfig; + + // irqrequest + psi->pin.attr_irqrequest.attr.name = "irqrequest"; + psi->pin.attr_irqrequest.attr.mode = 0666; + psi->pin.attr_irqrequest.show = pin_show_irqrequest; + psi->pin.attr_irqrequest.store = pin_store_irqrequest; + + // irqmask + psi->pin.attr_irqmask.attr.name = "irqmask"; + psi->pin.attr_irqmask.attr.mode = 0666; + psi->pin.attr_irqmask.show = pin_show_irqmask; + psi->pin.attr_irqmask.store = pin_store_irqmask; + + // irq handle mode + psi->pin.attr_irq_handle_mode.attr.name = "irq_handle_mode"; + psi->pin.attr_irq_handle_mode.attr.mode = 0644; + psi->pin.attr_irq_handle_mode.show = pin_show_irq_mode; + psi->pin.attr_irq_handle_mode.store = pin_store_irq_mode; + } + + // setup attr pointer array + { + int i = 0; + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_gpio.attr; + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_level.attr; + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_active.attr; + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_direction.attr; + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_active_power_collapse.attr; + if (psi->pin.options & PIN_IRQ) { + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_irq.attr; + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_irqconfig.attr; + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_irqrequest.attr; + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_irqmask.attr; + psi->pin.attr_ptr_arr[i++] = &psi->pin.attr_irq_handle_mode.attr; + } + psi->pin.attr_ptr_arr[i++] = NULL; + + /* if this is triggered, then we need a larger attr_ptr_arr array */ + BUG_ON(i > ARRAY_SIZE(psi->pin.attr_ptr_arr)); + } + + // setup attribute group + psi->attr_grp.name = psi->pin.name; + psi->attr_grp.attrs = psi->pin.attr_ptr_arr; + + return; +} + +/* + * + */ +static struct gpio_pin_set * pin_set_alloc(struct user_pin_set *ups) +{ + int i; + struct gpio_pin_set *gps = NULL; + + gps = kzalloc(sizeof(struct gpio_pin_set) + + ups->num_pins * sizeof(struct gpio_pin_set_item), GFP_KERNEL); + if (gps == NULL) { + return NULL; + } + + gps->num_pins = ups->num_pins; + gps->set_name = ups->set_name; + + for (i = 0; i < gps->num_pins; i++) { + pin_set_item_init(&gps->pins[i], &ups->pins[i]); + } + + return gps; +} + +/* + * Registers specified pin set + */ +static int pin_set_register(struct gpio_pin_set *s) +{ + int rc, i; + struct sysfs_dirent *grp_sd; + + if (s == NULL) { + return -EINVAL; + } + + rc = kobject_init_and_add(&s->kobj, &ktype_pin, pins_kobj, s->set_name); + if (rc) { + printk (KERN_ERR "Failed to register kobject (%s)\n", s->set_name); + return -ENODEV; + } + + /* for all pins */ + for (i = 0; i < s->num_pins; i++) { + rc = gpio_request(s->pins[i].pin.gpio, "gpio"); + if (rc) { + printk(KERN_ERR "Failed to request gpio (%d)\n", + s->pins[i].pin.gpio); + continue; + } + + if (s->pins[i].pin.direction != -1) { // direction is set + if (s->pins[i].pin.direction == 0) { // an output + /* A setting of def_level == -1 means that we + * keep the current level of the GPIO. + * Otherwise we set def_level. + */ + int level = (-1 == s->pins[i].pin.def_level) ? + gpio_get_value(s->pins[i].pin.gpio) : + s->pins[i].pin.def_level; + + gpio_direction_output(s->pins[i].pin.gpio, level); + } else { // an input + gpio_direction_input(s->pins[i].pin.gpio); + } + } + + // create attribute group + rc = sysfs_create_group(&s->kobj, &s->pins[i].attr_grp); + if (rc) { + printk(KERN_ERR "Failed to create sysfs attr group (%s)\n", + s->pins[i].pin.name ); + } + + grp_sd = sysfs_get_dirent(s->kobj.sd, NULL, s->pins[i].attr_grp.name); + if (grp_sd == NULL) { + printk(KERN_ERR "user-pins: failed to get sd for %s\n", + s->pins[i].attr_grp.name); + } else { + s->pins[i].pin.sd = sysfs_get_dirent(grp_sd, NULL, "irq"); + if ((s->pins[i].pin.sd == NULL) && + (s->pins[i].pin.options & PIN_IRQ)) { + printk(KERN_ERR "user-pins: failed to get sd for %s/irq\n", + s->pins[i].attr_grp.name); + } + } + } + + return 0; +} + +static void pin_set_unregister(struct gpio_pin_set *s) +{ + int i; + + if (s == NULL) { + return; + } + + /* for all pins */ + for (i = 0; s->num_pins; i++) { + if ((s->pins[i].pin.options & PIN_IRQ) && + (s->pins[i].pin.irq_requested != 0)) { + free_irq(gpio_to_irq(s->pins[i].pin.gpio), &s->pins[i].pin); + } + + sysfs_remove_group(&s->kobj, &s->pins[i].attr_grp); + gpio_free(s->pins[i].pin.gpio); + } + kobject_del(&s->kobj); + kfree(s); +} + +static int user_pins_probe(struct platform_device *pdev) +{ + int i, rc; + struct user_pins_platform_data *pdata; + struct gpio_pin_dev_ctxt *dev_ctxt; + + pdata = pdev->dev.platform_data; + if( pdata == NULL ) { + return -ENODEV; + } + + dev_ctxt = kzalloc(sizeof (struct gpio_pin_dev_ctxt) + + pdata->num_sets * sizeof(struct gpio_pin_set *), GFP_KERNEL ); + if (dev_ctxt == NULL) { + return -ENOMEM; + } + dev_ctxt->num_sets = pdata->num_sets; + + for (i = 0; i < dev_ctxt->num_sets; i++) { + dev_ctxt->sets[i] = pin_set_alloc (pdata->sets + i); + if (dev_ctxt->sets[i] == NULL) { + printk(KERN_ERR "Failed to init pin set '%s'\n", + pdata->sets[i].set_name ); + continue; + } + rc = pin_set_register(dev_ctxt->sets[i]); + if (rc) { + printk(KERN_ERR "Failed to register pin set '%s'\n", + pdata->sets[i].set_name ); + } + } + + dev_set_drvdata(&pdev->dev, dev_ctxt); + + return 0; +} + +/* + * + */ +static int user_pins_remove(struct platform_device *pdev) +{ + int i; + struct gpio_pin_dev_ctxt *dev_ctxt; + + dev_ctxt = dev_get_drvdata(&pdev->dev); + if (dev_ctxt == NULL) { + return 0; + } + + for (i = 0; i < dev_ctxt->num_sets; i++) { + pin_set_unregister(dev_ctxt->sets[i]); + } + + dev_set_drvdata(&pdev->dev, NULL); + kfree(dev_ctxt); + + return 0; +} + +#ifdef CONFIG_PM + +static int user_pins_suspend(struct platform_device *pdev, pm_message_t state) +{ + int i, j; + struct gpio_pin_dev_ctxt *dev_ctxt; + struct gpio_pin_set *pset; + struct gpio_pin *pin; + unsigned long flags; + + dev_ctxt = platform_get_drvdata(pdev); + if (dev_ctxt == NULL) { + return 0; + } + + spin_lock_irqsave ( &pins_lock, flags ); + + /* + * The first loop is to check for any pending interrupts from + * the time starting IRQ_HANDLE_OFF is set (from user space). + * If so, fail the suspend, and let the system to go back up + * to userspace. + */ + + for( i = 0; i < dev_ctxt->num_sets; i++ ) { + pset = dev_ctxt->sets[i]; + for( j = 0; j < pset->num_pins; j++ ) { + pin = &(pset->pins[j].pin); + + if ((pin->options & PIN_IRQ) && + (pin->irq_handle_mode & IRQ_HANDLE_OFF) && + (atomic_read(&pin->irqs_during_suspend) != 0)) { + + atomic_set(&pin->irqs_during_suspend, 0); + spin_unlock_irqrestore ( &pins_lock, flags ); + + printk(KERN_INFO"%s: not suspending due to pending irqs for gpio %d\n", + __func__, pin->gpio); + + return -EBUSY; + } + } + } + + for (i = 0; i < dev_ctxt->num_sets; i++) { + pset = dev_ctxt->sets[i]; + for (j = 0; j < pset->num_pins; j++) { + pin = &(pset->pins[j].pin); + + if (pin->options & PIN_WAKEUP_SOURCE) { + int irq = gpio_to_irq(pin->gpio); + + if ((pin->options & PIN_IRQ) && + pin->irq_requested && + !pin->irq_masked) { + disable_irq(gpio_to_irq(pin->gpio)); + } + enable_irq_wake(irq); + } + + // If machine installed pinmux hook for the pin, call it + // to mux it into suspended mode. Exception is made for + // pins that are specifically configured to stay active + // even during suspend periods. + if (pin->pinmux && !pin->active_power_collapse) + pin->pinmux(pin->gpio, PIN_MODE_SUSPENDED); + } + } + + spin_unlock_irqrestore ( &pins_lock, flags ); + + + return 0; +} + +static int user_pins_resume(struct platform_device *pdev) +{ + int i, j; + unsigned long flags; + struct gpio_pin_dev_ctxt *dev_ctxt; + struct gpio_pin_set *pset; + struct gpio_pin *pin; + + dev_ctxt = platform_get_drvdata(pdev); + if (dev_ctxt == NULL) { + return 0; + } + + spin_lock_irqsave(&pins_lock, flags); + + for (i = 0; i < dev_ctxt->num_sets; i++) { + pset = dev_ctxt->sets[i]; + for (j = 0; j < pset->num_pins; j++) { + pin = &(pset->pins[j].pin); + + // If machine installed pinmux hook for the pin, call it + // to mux it into active mode. If the pin is configured + // to be active during suspend periods we assume we don't + // need to remux it into active state again. + if (pin->pinmux && !pin->active_power_collapse) + pin->pinmux(pin->gpio, PIN_MODE_ACTIVE); + + if (pin->options & PIN_WAKEUP_SOURCE) { + int irq = gpio_to_irq(pin->gpio); + disable_irq_wake(irq); + if ((pin->options & PIN_IRQ) && + pin->irq_requested && + !pin->irq_masked) { + enable_irq(gpio_to_irq(pin->gpio)); + } + } + atomic_set(&pin->irqs_during_suspend, 0); + } + } + + spin_unlock_irqrestore(&pins_lock, flags); + + return 0; +} + +#else +#define user_pins_suspend NULL +#define user_pins_resume NULL +#endif + +static struct platform_driver user_pins_driver = { + .driver = { + .name = DRIVER_NAME, + }, + .probe = user_pins_probe, + .remove = __devexit_p(user_pins_remove), + .suspend = user_pins_suspend, + .resume = user_pins_resume, +}; + +static int __init user_pins_init(void) +{ + int rc; + + if (user_hw_kobj == NULL) { + return -ENOMEM; + } + + pins_kobj = kobject_create_and_add("pins", user_hw_kobj); + if (pins_kobj == NULL) { + return -ENOMEM; + } + + /* register pins platform device */ + rc = platform_driver_register(&user_pins_driver); + if (rc) { + kobject_del(pins_kobj); + } + + user_pins_debug_init(); + + return rc; +} + +static void __exit +user_pins_exit(void) +{ + kobject_del(pins_kobj); +} + +module_init(user_pins_init); +module_exit(user_pins_exit); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/include/linux/cy8ctma395.h b/include/linux/cy8ctma395.h new file mode 100644 index 00000000000..2ab4afaddd8 --- /dev/null +++ b/include/linux/cy8ctma395.h @@ -0,0 +1,16 @@ +#define CY8CTMA395_DEVICE "cy8ctma395" +#define CY8CTMA395_DRIVER "cy8ctma395" + +struct cy8ctma395_platform_data { + int (*swdck_request)(int request); + int (*swdio_request)(int request); + void (*vdd_enable)(int enable); + unsigned xres; + unsigned long xres_us; + unsigned swdck; + unsigned swdio; + int swd_wait_retries; + int port_acquire_retries; + int status_reg_timeout_ms; + int nr_blocks; +}; diff --git a/include/linux/hsuart.h b/include/linux/hsuart.h new file mode 100644 index 00000000000..2901aa0a58d --- /dev/null +++ b/include/linux/hsuart.h @@ -0,0 +1,153 @@ +/* + * include/linux/hsuart.h - High speed UART driver APIs + * + * Copyright (C) 2008 Palm Inc, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Author: Amir Frenkel (amir.frenkel@palm.com) + * Based on existing Palm HSUART driver interface + * + */ +#ifndef __HSUART_INCLUDED__ +#define __HSUART_INCLUDED__ + +/**/ +#define HSUART_VER_MAJOR(v) (((v)>>8) & 0xFF) +#define HSUART_VER_MINOR(v) (((v)) & 0xFF) + +/* known set of uart speed settings */ +#define HSUART_SPEED_38K 38400 +#define HSUART_SPEED_115K 115200 +#define HSUART_SPEED_1228K 1228800 +#define HSUART_SPEED_3686K 3686400 + +/* + Specifies target HSUART_IOCTL_CLEAR_FIFO/HSUART_IOCTL_FLUSH +*/ +#define HSUART_RX_FIFO (1 << 0) // UART RX FIFO +#define HSUART_TX_FIFO (1 << 1) // UART TX FIFO +#define HSUART_TX_QUEUE (1 << 2) // RX BUFFERS +#define HSUART_RX_QUEUE (1 << 3) // TX BUFFERS + +/* + * Rx flow control + */ +#define HSUART_RX_FLOW_OFF 0 // DEPRECATED +#define HSUART_RX_FLOW_AUTO 0 +#define HSUART_RX_FLOW_ON 1 + +/* */ +struct hsuart_buf_inf { + int rx_buf_num; // total number of tx buffers + int rx_buf_size; // size of tx buffer + int tx_buf_num; // total number of rx buffers + int tx_buf_size; // size of rx buffer +}; + +struct hsuart_mode { + int speed; + int flags; +}; + +struct hsuart_stat { + unsigned long tx_bytes; + unsigned long rx_bytes; + unsigned long rx_dropped; +}; + +#define HSUART_MODE_LOOPBACK (1 << 8) + +#define HSUART_MODE_FLOW_CTRL_BIT (0) +#define HSUART_MODE_FLOW_CTRL_MODE_MASK (3 << HSUART_MODE_FLOW_CTRL_BIT) +#define HSUART_MODE_FLOW_CTRL_NONE (0 << HSUART_MODE_FLOW_CTRL_BIT) +#define HSUART_MODE_FLOW_CTRL_HW (1 << HSUART_MODE_FLOW_CTRL_BIT) +#define HSUART_MODE_FLOW_CTRL_SW (2 << HSUART_MODE_FLOW_CTRL_BIT) + +#define HSUART_MODE_FLOW_STATE_BIT (9) +#define HSUART_MODE_FLOW_STATE_MASK (1 << HSUART_MODE_FLOW_STATE_BIT) +#define HSUART_MODE_FLOW_STATE_ASSERT (0 << HSUART_MODE_FLOW_STATE_BIT) +#define HSUART_MODE_FLOW_STATE_DEASSERT (1 << HSUART_MODE_FLOW_STATE_BIT) + +#define HSUART_MODE_FLOW_DIRECTION_BIT (10) +#define HSUART_MODE_FLOW_DIRECTION_MASK (3 << HSUART_MODE_FLOW_DIRECTION_BIT) +#define HSUART_MODE_FLOW_DIRECTION_RX_TX (0 << HSUART_MODE_FLOW_DIRECTION_BIT) +#define HSUART_MODE_FLOW_DIRECTION_RX_ONLY (1 << HSUART_MODE_FLOW_DIRECTION_BIT) +#define HSUART_MODE_FLOW_DIRECTION_TX_ONLY (2 << HSUART_MODE_FLOW_DIRECTION_BIT) + +#define HSUART_MODE_FLOW_CTRL_MASK ( HSUART_MODE_FLOW_CTRL_MODE_MASK | HSUART_MODE_FLOW_STATE_MASK | HSUART_MODE_FLOW_DIRECTION_MASK ) + +#define HSUART_MODE_PARITY_BIT (2) +#define HSUART_MODE_PARITY_MASK (3 << HSUART_MODE_PARITY_BIT) +#define HSUART_MODE_PARITY_NONE (0 << HSUART_MODE_PARITY_BIT) +#define HSUART_MODE_PARITY_ODD (1 << HSUART_MODE_PARITY_BIT) +#define HSUART_MODE_PARITY_EVEN (2 << HSUART_MODE_PARITY_BIT) + + +/* IOCTLs */ +#define HSUART_IOCTL_GET_VERSION _IOR('h', 0x01, int) +#define HSUART_IOCTL_GET_BUF_INF _IOR('h', 0x02, struct hsuart_buf_inf ) +#define HSUART_IOCTL_GET_UARTMODE _IOR('h', 0x04, struct hsuart_mode ) +#define HSUART_IOCTL_SET_UARTMODE _IOW('h', 0x05, struct hsuart_mode ) +#define HSUART_IOCTL_RESET_UART _IO ('h', 0x06) +#define HSUART_IOCTL_CLEAR_FIFO _IOW('h', 0x07, int) // DEPRECATED use HSUART_IOCTL_FLUSH instead +#define HSUART_IOCTL_GET_STATS _IOW('h', 0x08, struct hsuart_stat ) +#define HSUART_IOCTL_SET_RXLAT _IOW('h', 0x09, int) +#define HSUART_IOCTL_TX_DRAIN _IOW('h', 0x0b, int) +#define HSUART_IOCTL_RX_BYTES _IOW('h', 0x0c, int) +#define HSUART_IOCTL_RX_FLOW _IOW('h', 0x0d, int) +#define HSUART_IOCTL_FLUSH _IOW('h', 0x0e, int) + +#ifdef __KERNEL__ + + +/* + * The UART port initialization and start receiving data is not done + * automatically when the driver is loaded but delayed to when the 'open' + * is called. + */ +#define HSUART_OPTION_DEFERRED_LOAD (1 << 0) +#define HSUART_OPTION_MODEM_DEVICE (1 << 1) +#define HSUART_OPTION_TX_PIO (1 << 2) +#define HSUART_OPTION_RX_PIO (1 << 3) +#define HSUART_OPTION_TX_DM (1 << 4) +#define HSUART_OPTION_RX_DM (1 << 5) +#define HSUART_OPTION_RX_FLUSH_QUEUE_ON_SUSPEND (1 << 6) +#define HSUART_OPTION_TX_FLUSH_QUEUE_ON_SUSPEND (1 << 7) +#define HSUART_OPTION_SCHED_RT (1 << 8) + +struct hsuart_platform_data { + const char *dev_name; + int uart_mode; // default uart mode + int uart_speed; // default uart speed + int options; // operation options + int tx_buf_size; // size of tx buffer + int tx_buf_num; // number of preallocated tx buffers + int rx_buf_size; // size of rx buffer + int rx_buf_num; // number of preallocated rx buffers + int max_packet_size; // max packet size + int min_packet_size; // min packet size + int rx_latency; // in bytes at current speed + int rts_pin; // uart rts line pin + char *rts_act_mode; // uart rts line active mode + char *rts_gpio_mode; // uart rts line gpio mode + int idle_timeout; // idle timeout + int idle_poll_timeout; // idle poll timeout + int dbg_level; // default debug level. + + int (*p_board_pin_mux_cb) ( int on ); + int (*p_board_config_gsbi_cb) ( void ); + int (*p_board_rts_pin_deassert_cb) ( int deassert ); +}; + +#endif + +#endif // __HSUART_INCLUDED__ + + diff --git a/include/linux/user-pins.h b/include/linux/user-pins.h new file mode 100644 index 00000000000..499b250c11f --- /dev/null +++ b/include/linux/user-pins.h @@ -0,0 +1,44 @@ +#ifndef __USER_PINS_INCLUDED__ +#define __USER_PINS_INCLUDED__ + +typedef enum { + PIN_MODE_ACTIVE, + PIN_MODE_SUSPENDED +} PIN_MODE; + +struct user_pin { + const char *name; // pin name + int gpio; // gpio num/id + int options; // options + int act_level; // active level + int direction; // 1 - an input, 0 - output + int def_level; // default level: 0, 1 or -1 if undefined + int sysfs_mask; // sysfs file mode + char *pin_mode; // board specific pin mode + irqreturn_t (*irq_handler)(int irq, void *data); + int (*pinmux)(int gpio, int mode); + int irq_config; + int irq_handle_mode; +}; + +struct user_pin_set { + const char *set_name; // pin set name + int num_pins; // number of pins in the group + struct user_pin *pins; // pins array. +}; + +struct user_pins_platform_data { + int num_sets; // number of pin sets + struct user_pin_set *sets; // pin sets. +}; + +/* Pin option constants */ +#define PIN_READ_ONLY (1 << 0) // pin is read only +#define PIN_WAKEUP_SOURCE (1 << 1) // pin is a wakeup source +#define PIN_IRQ (1 << 2) // pin generates irq + +#define IRQ_HANDLE_NONE (0) // IRQ handling is not defined +#define IRQ_HANDLE_AUTO (1 << 0) // IRQ handling is automatic +#define IRQ_HANDLE_OFF (1 << 1) // IRQ handling is off + +#endif // __USER_PINS_INCLUDED__