diff options
Diffstat (limited to 'drivers/video')
29 files changed, 9714 insertions, 63 deletions
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 2e937bdace6..0838d4402dc 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -23,6 +23,8 @@ source "drivers/gpu/drm/Kconfig" source "drivers/gpu/host1x/Kconfig" +source "drivers/gpu/pvr/Kconfig" + config VGASTATE tristate default n @@ -2475,6 +2477,7 @@ source "drivers/video/omap2/Kconfig" source "drivers/video/exynos/Kconfig" source "drivers/video/mmp/Kconfig" source "drivers/video/backlight/Kconfig" +source "drivers/video/adf/Kconfig" if VT source "drivers/video/console/Kconfig" diff --git a/drivers/video/Makefile b/drivers/video/Makefile index e8bae8dd480..2babdef5fe0 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -12,6 +12,7 @@ fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ modedb.o fbcvt.o fb-objs := $(fb-y) +obj-$(CONFIG_ADF) += adf/ obj-$(CONFIG_VT) += console/ obj-$(CONFIG_LOGO) += logo/ obj-y += backlight/ diff --git a/drivers/video/adf/Kconfig b/drivers/video/adf/Kconfig new file mode 100644 index 00000000000..33858b73d8b --- /dev/null +++ b/drivers/video/adf/Kconfig @@ -0,0 +1,14 @@ +menuconfig ADF + depends on SYNC + depends on DMA_SHARED_BUFFER + tristate "Atomic Display Framework" + +menuconfig ADF_FBDEV + depends on ADF + depends on FB + tristate "Helper for implementing the fbdev API in ADF drivers" + +menuconfig ADF_MEMBLOCK + depends on ADF + depends on HAVE_MEMBLOCK + tristate "Helper for using memblocks as buffers in ADF drivers" diff --git a/drivers/video/adf/Makefile b/drivers/video/adf/Makefile new file mode 100644 index 00000000000..78d0915122f --- /dev/null +++ b/drivers/video/adf/Makefile @@ -0,0 +1,15 @@ +ccflags-y := -Idrivers/staging/android + +CFLAGS_adf.o := -I$(src) + +obj-$(CONFIG_ADF) += adf.o \ + adf_client.o \ + adf_fops.o \ + adf_format.o \ + adf_sysfs.o + +obj-$(CONFIG_COMPAT) += adf_fops32.o + +obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o + +obj-$(CONFIG_ADF_MEMBLOCK) += adf_memblock.o diff --git a/drivers/video/adf/adf.c b/drivers/video/adf/adf.c new file mode 100644 index 00000000000..933e74ac809 --- /dev/null +++ b/drivers/video/adf/adf.c @@ -0,0 +1,1166 @@ +/* + * Copyright (C) 2013 Google, Inc. + * adf_modeinfo_{set_name,set_vrefresh} modified from + * drivers/gpu/drm/drm_modes.c + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/idr.h> +#include <linux/highmem.h> +#include <linux/memblock.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <video/adf_format.h> + +#include "sw_sync.h" +#include "sync.h" + +#include "adf.h" +#include "adf_fops.h" +#include "adf_sysfs.h" + +#define CREATE_TRACE_POINTS +#include "adf_trace.h" + +#define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC) +#define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC) + +static DEFINE_IDR(adf_devices); + +static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence) +{ + /* sync_fence_wait() dumps debug information on timeout. Experience + has shown that if the pipeline gets stuck, a short timeout followed + by a longer one provides useful information for debugging. */ + int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT); + if (err >= 0) + return; + + if (err == -ETIME) + err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT); + + if (err < 0) + dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err); +} + +void adf_buffer_cleanup(struct adf_buffer *buf) +{ + size_t i; + for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++) + if (buf->dma_bufs[i]) + dma_buf_put(buf->dma_bufs[i]); + + if (buf->acquire_fence) + sync_fence_put(buf->acquire_fence); +} + +void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping, + struct adf_buffer *buf) +{ + /* calling adf_buffer_mapping_cleanup() is safe even if mapping is + uninitialized or partially-initialized, as long as it was + zeroed on allocation */ + size_t i; + for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) { + if (mapping->sg_tables[i]) + dma_buf_unmap_attachment(mapping->attachments[i], + mapping->sg_tables[i], DMA_TO_DEVICE); + if (mapping->attachments[i]) + dma_buf_detach(buf->dma_bufs[i], + mapping->attachments[i]); + } +} + +void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post) +{ + size_t i; + + if (post->state) + dev->ops->state_free(dev, post->state); + + for (i = 0; i < post->config.n_bufs; i++) { + adf_buffer_mapping_cleanup(&post->config.mappings[i], + &post->config.bufs[i]); + adf_buffer_cleanup(&post->config.bufs[i]); + } + + kfree(post->config.custom_data); + kfree(post->config.mappings); + kfree(post->config.bufs); + kfree(post); +} + +static void adf_sw_advance_timeline(struct adf_device *dev) +{ +#ifdef CONFIG_SW_SYNC + sw_sync_timeline_inc(dev->timeline, 1); +#else + BUG(); +#endif +} + +static void adf_post_work_func(struct kthread_work *work) +{ + struct adf_device *dev = + container_of(work, struct adf_device, post_work); + struct adf_pending_post *post, *next; + struct list_head saved_list; + + mutex_lock(&dev->post_lock); + memcpy(&saved_list, &dev->post_list, sizeof(saved_list)); + list_replace_init(&dev->post_list, &saved_list); + mutex_unlock(&dev->post_lock); + + list_for_each_entry_safe(post, next, &saved_list, head) { + int i; + + for (i = 0; i < post->config.n_bufs; i++) { + struct sync_fence *fence = + post->config.bufs[i].acquire_fence; + if (fence) + adf_fence_wait(dev, fence); + } + + dev->ops->post(dev, &post->config, post->state); + + if (dev->ops->advance_timeline) + dev->ops->advance_timeline(dev, &post->config, + post->state); + else + adf_sw_advance_timeline(dev); + + list_del(&post->head); + if (dev->onscreen) + adf_post_cleanup(dev, dev->onscreen); + dev->onscreen = post; + } +} + +void adf_attachment_free(struct adf_attachment_list *attachment) +{ + list_del(&attachment->head); + kfree(attachment); +} + +struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj, + enum adf_event_type type) +{ + struct rb_root *root = &obj->event_refcount; + struct rb_node **new = &(root->rb_node); + struct rb_node *parent = NULL; + struct adf_event_refcount *refcount; + + while (*new) { + refcount = container_of(*new, struct adf_event_refcount, node); + parent = *new; + + if (refcount->type > type) + new = &(*new)->rb_left; + else if (refcount->type < type) + new = &(*new)->rb_right; + else + return refcount; + } + + refcount = kzalloc(sizeof(*refcount), GFP_KERNEL); + if (!refcount) + return NULL; + refcount->type = type; + + rb_link_node(&refcount->node, parent, new); + rb_insert_color(&refcount->node, root); + return refcount; +} + +/** + * adf_event_get - increase the refcount for an event + * + * @obj: the object that produces the event + * @type: the event type + * + * ADF will call the object's set_event() op if needed. ops are allowed + * to sleep, so adf_event_get() must NOT be called from an atomic context. + * + * Returns 0 if successful, or -%EINVAL if the object does not support the + * requested event type. + */ +int adf_event_get(struct adf_obj *obj, enum adf_event_type type) +{ + struct adf_event_refcount *refcount; + int old_refcount; + int ret; + + ret = adf_obj_check_supports_event(obj, type); + if (ret < 0) + return ret; + + mutex_lock(&obj->event_lock); + + refcount = adf_obj_find_event_refcount(obj, type); + if (!refcount) { + ret = -ENOMEM; + goto done; + } + + old_refcount = refcount->refcount++; + + if (old_refcount == 0) { + obj->ops->set_event(obj, type, true); + trace_adf_event_enable(obj, type); + } + +done: + mutex_unlock(&obj->event_lock); + return ret; +} +EXPORT_SYMBOL(adf_event_get); + +/** + * adf_event_put - decrease the refcount for an event + * + * @obj: the object that produces the event + * @type: the event type + * + * ADF will call the object's set_event() op if needed. ops are allowed + * to sleep, so adf_event_put() must NOT be called from an atomic context. + * + * Returns 0 if successful, -%EINVAL if the object does not support the + * requested event type, or -%EALREADY if the refcount is already 0. + */ +int adf_event_put(struct adf_obj *obj, enum adf_event_type type) +{ + struct adf_event_refcount *refcount; + int old_refcount; + int ret; + + ret = adf_obj_check_supports_event(obj, type); + if (ret < 0) + return ret; + + + mutex_lock(&obj->event_lock); + + refcount = adf_obj_find_event_refcount(obj, type); + if (!refcount) { + ret = -ENOMEM; + goto done; + } + + old_refcount = refcount->refcount--; + + if (WARN_ON(old_refcount == 0)) { + refcount->refcount++; + ret = -EALREADY; + } else if (old_refcount == 1) { + obj->ops->set_event(obj, type, false); + trace_adf_event_disable(obj, type); + } + +done: + mutex_unlock(&obj->event_lock); + return ret; +} +EXPORT_SYMBOL(adf_event_put); + +/** + * adf_vsync_wait - wait for a vsync event on a display interface + * + * @intf: the display interface + * @timeout: timeout in jiffies (0 = wait indefinitely) + * + * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context. + * + * This function returns -%ERESTARTSYS if it is interrupted by a signal. + * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then + * this function returns the number of remaining jiffies or -%ETIMEDOUT on + * timeout. + */ +int adf_vsync_wait(struct adf_interface *intf, long timeout) +{ + ktime_t timestamp; + int ret; + unsigned long flags; + + read_lock_irqsave(&intf->vsync_lock, flags); + timestamp = intf->vsync_timestamp; + read_unlock_irqrestore(&intf->vsync_lock, flags); + + adf_vsync_get(intf); + if (timeout) { + ret = wait_event_interruptible_timeout(intf->vsync_wait, + !ktime_equal(timestamp, + intf->vsync_timestamp), + msecs_to_jiffies(timeout)); + if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp)) + ret = -ETIMEDOUT; + } else { + ret = wait_event_interruptible(intf->vsync_wait, + !ktime_equal(timestamp, + intf->vsync_timestamp)); + } + adf_vsync_put(intf); + + return ret; +} +EXPORT_SYMBOL(adf_vsync_wait); + +static void adf_event_queue(struct adf_obj *obj, struct adf_event *event) +{ + struct adf_file *file; + unsigned long flags; + + trace_adf_event(obj, event->type); + + spin_lock_irqsave(&obj->file_lock, flags); + + list_for_each_entry(file, &obj->file_list, head) + if (test_bit(event->type, file->event_subscriptions)) + adf_file_queue_event(file, event); + + spin_unlock_irqrestore(&obj->file_lock, flags); +} + +/** + * adf_event_notify - notify userspace of a driver-private event + * + * @obj: the ADF object that produced the event + * @event: the event + * + * adf_event_notify() may be called safely from an atomic context. It will + * copy @event if needed, so @event may point to a variable on the stack. + * + * Drivers must NOT call adf_event_notify() for vsync and hotplug events. + * ADF provides adf_vsync_notify() and + * adf_hotplug_notify_{connected,disconnected}() for these events. + */ +int adf_event_notify(struct adf_obj *obj, struct adf_event *event) +{ + if (WARN_ON(event->type == ADF_EVENT_VSYNC || + event->type == ADF_EVENT_HOTPLUG)) + return -EINVAL; + + adf_event_queue(obj, event); + return 0; +} +EXPORT_SYMBOL(adf_event_notify); + +/** + * adf_vsync_notify - notify ADF of a display interface's vsync event + * + * @intf: the display interface + * @timestamp: the time the vsync occurred + * + * adf_vsync_notify() may be called safely from an atomic context. + */ +void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp) +{ + unsigned long flags; + struct adf_vsync_event event; + + write_lock_irqsave(&intf->vsync_lock, flags); + intf->vsync_timestamp = timestamp; + write_unlock_irqrestore(&intf->vsync_lock, flags); + + wake_up_interruptible_all(&intf->vsync_wait); + + event.base.type = ADF_EVENT_VSYNC; + event.base.length = sizeof(event); + event.timestamp = ktime_to_ns(timestamp); + adf_event_queue(&intf->base, &event.base); +} +EXPORT_SYMBOL(adf_vsync_notify); + +void adf_hotplug_notify(struct adf_interface *intf, bool connected, + struct drm_mode_modeinfo *modelist, size_t n_modes) +{ + unsigned long flags; + struct adf_hotplug_event event; + struct drm_mode_modeinfo *old_modelist; + + write_lock_irqsave(&intf->hotplug_modelist_lock, flags); + old_modelist = intf->modelist; + intf->hotplug_detect = connected; + intf->modelist = modelist; + intf->n_modes = n_modes; + write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags); + + kfree(old_modelist); + + event.base.length = sizeof(event); + event.base.type = ADF_EVENT_HOTPLUG; + event.connected = connected; + adf_event_queue(&intf->base, &event.base); +} + +/** + * adf_hotplug_notify_connected - notify ADF of a display interface being + * connected to a display + * + * @intf: the display interface + * @modelist: hardware modes supported by display + * @n_modes: length of modelist + * + * @modelist is copied as needed, so it may point to a variable on the stack. + * + * adf_hotplug_notify_connected() may NOT be called safely from an atomic + * context. + * + * Returns 0 on success or error code (<0) on error. + */ +int adf_hotplug_notify_connected(struct adf_interface *intf, + struct drm_mode_modeinfo *modelist, size_t n_modes) +{ + struct drm_mode_modeinfo *modelist_copy; + + if (n_modes > ADF_MAX_MODES) + return -ENOMEM; + + modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes, + GFP_KERNEL); + if (!modelist_copy) + return -ENOMEM; + memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes); + + adf_hotplug_notify(intf, true, modelist_copy, n_modes); + return 0; +} +EXPORT_SYMBOL(adf_hotplug_notify_connected); + +/** + * adf_hotplug_notify_disconnected - notify ADF of a display interface being + * disconnected from a display + * + * @intf: the display interface + * + * adf_hotplug_notify_disconnected() may be called safely from an atomic + * context. + */ +void adf_hotplug_notify_disconnected(struct adf_interface *intf) +{ + adf_hotplug_notify(intf, false, NULL, 0); +} +EXPORT_SYMBOL(adf_hotplug_notify_disconnected); + +static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type, + struct idr *idr, struct adf_device *parent, + const struct adf_obj_ops *ops, const char *fmt, va_list args) +{ + int ret; + + if (ops && ops->supports_event && !ops->set_event) { + pr_err("%s: %s implements supports_event but not set_event\n", + __func__, adf_obj_type_str(type)); + return -EINVAL; + } + + ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL); + if (ret < 0) { + pr_err("%s: allocating object id failed: %d\n", __func__, ret); + return ret; + } + obj->id = ret; + + vscnprintf(obj->name, sizeof(obj->name), fmt, args); + + obj->type = type; + obj->ops = ops; + obj->parent = parent; + mutex_init(&obj->event_lock); + obj->event_refcount = RB_ROOT; + spin_lock_init(&obj->file_lock); + INIT_LIST_HEAD(&obj->file_list); + return 0; +} + +static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr) +{ + struct rb_node *node = rb_first(&obj->event_refcount); + + while (node) { + struct adf_event_refcount *refcount = + container_of(node, struct adf_event_refcount, + node); + kfree(refcount); + node = rb_first(&obj->event_refcount); + } + + mutex_destroy(&obj->event_lock); + idr_remove(idr, obj->id); +} + +/** + * adf_device_init - initialize ADF-internal data for a display device + * and create sysfs entries + * + * @dev: the display device + * @parent: the device's parent device + * @ops: the device's associated ops + * @fmt: formatting string for the display device's name + * + * @fmt specifies the device's sysfs filename and the name returned to + * userspace through the %ADF_GET_DEVICE_DATA ioctl. + * + * Returns 0 on success or error code (<0) on failure. + */ +int adf_device_init(struct adf_device *dev, struct device *parent, + const struct adf_device_ops *ops, const char *fmt, ...) +{ + int ret; + va_list args; + + if (!ops->validate || !ops->post) { + pr_err("%s: device must implement validate and post\n", + __func__); + return -EINVAL; + } + + if (!ops->complete_fence && !ops->advance_timeline) { + if (!IS_ENABLED(CONFIG_SW_SYNC)) { + pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n", + __func__); + return -EINVAL; + } + } else if (!(ops->complete_fence && ops->advance_timeline)) { + pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n", + __func__); + return -EINVAL; + } + + memset(dev, 0, sizeof(*dev)); + + va_start(args, fmt); + ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev, + &ops->base, fmt, args); + va_end(args); + if (ret < 0) + return ret; + + dev->dev = parent; + dev->ops = ops; + idr_init(&dev->overlay_engines); + idr_init(&dev->interfaces); + mutex_init(&dev->client_lock); + INIT_LIST_HEAD(&dev->post_list); + mutex_init(&dev->post_lock); + init_kthread_worker(&dev->post_worker); + INIT_LIST_HEAD(&dev->attached); + INIT_LIST_HEAD(&dev->attach_allowed); + + dev->post_thread = kthread_run(kthread_worker_fn, + &dev->post_worker, dev->base.name); + if (IS_ERR(dev->post_thread)) { + ret = PTR_ERR(dev->post_thread); + dev->post_thread = NULL; + + pr_err("%s: failed to run config posting thread: %d\n", + __func__, ret); + goto err; + } + init_kthread_work(&dev->post_work, adf_post_work_func); + + ret = adf_device_sysfs_init(dev); + if (ret < 0) + goto err; + + return 0; + +err: + adf_device_destroy(dev); + return ret; +} +EXPORT_SYMBOL(adf_device_init); + +/** + * adf_device_destroy - clean up ADF-internal data for a display device + * + * @dev: the display device + */ +void adf_device_destroy(struct adf_device *dev) +{ + struct adf_attachment_list *entry, *next; + + idr_destroy(&dev->interfaces); + idr_destroy(&dev->overlay_engines); + + if (dev->post_thread) { + flush_kthread_worker(&dev->post_worker); + kthread_stop(dev->post_thread); + } + + if (dev->onscreen) + adf_post_cleanup(dev, dev->onscreen); + adf_device_sysfs_destroy(dev); + list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) { + adf_attachment_free(entry); + } + list_for_each_entry_safe(entry, next, &dev->attached, head) { + adf_attachment_free(entry); + } + mutex_destroy(&dev->post_lock); + mutex_destroy(&dev->client_lock); + adf_obj_destroy(&dev->base, &adf_devices); +} +EXPORT_SYMBOL(adf_device_destroy); + +/** + * adf_interface_init - initialize ADF-internal data for a display interface + * and create sysfs entries + * + * @intf: the display interface + * @dev: the interface's "parent" display device + * @type: interface type (see enum @adf_interface_type) + * @idx: which interface of type @type; + * e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1 + * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values) + * @ops: the interface's associated ops + * @fmt: formatting string for the display interface's name + * + * @dev must have previously been initialized with adf_device_init(). + * + * @fmt affects the name returned to userspace through the + * %ADF_GET_INTERFACE_DATA ioctl. It does not affect the sysfs filename, + * which is derived from @dev's name. + * + * Returns 0 on success or error code (<0) on failure. + */ +int adf_interface_init(struct adf_interface *intf, struct adf_device *dev, + enum adf_interface_type type, u32 idx, u32 flags, + const struct adf_interface_ops *ops, const char *fmt, ...) +{ + int ret; + va_list args; + const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY | + ADF_INTF_FLAG_EXTERNAL; + + if (dev->n_interfaces == ADF_MAX_INTERFACES) { + pr_err("%s: parent device %s has too many interfaces\n", + __func__, dev->base.name); + return -ENOMEM; + } + + if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) { + pr_err("%s: invalid interface type %u\n", __func__, type); + return -EINVAL; + } + + if (flags & ~allowed_flags) { + pr_err("%s: invalid interface flags 0x%X\n", __func__, + flags & ~allowed_flags); + return -EINVAL; + } + + memset(intf, 0, sizeof(*intf)); + + va_start(args, fmt); + ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces, + dev, ops ? &ops->base : NULL, fmt, args); + va_end(args); + if (ret < 0) + return ret; + + intf->type = type; + intf->idx = idx; + intf->flags = flags; + intf->ops = ops; + intf->dpms_state = DRM_MODE_DPMS_OFF; + init_waitqueue_head(&intf->vsync_wait); + rwlock_init(&intf->vsync_lock); + rwlock_init(&intf->hotplug_modelist_lock); + + ret = adf_interface_sysfs_init(intf); + if (ret < 0) + goto err; + dev->n_interfaces++; + + return 0; + +err: + adf_obj_destroy(&intf->base, &dev->interfaces); + return ret; +} +EXPORT_SYMBOL(adf_interface_init); + +/** + * adf_interface_destroy - clean up ADF-internal data for a display interface + * + * @intf: the display interface + */ +void adf_interface_destroy(struct adf_interface *intf) +{ + struct adf_device *dev = adf_interface_parent(intf); + struct adf_attachment_list *entry, *next; + + mutex_lock(&dev->client_lock); + list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) { + if (entry->attachment.interface == intf) { + adf_attachment_free(entry); + dev->n_attach_allowed--; + } + } + list_for_each_entry_safe(entry, next, &dev->attached, head) { + if (entry->attachment.interface == intf) { + adf_device_detach_op(dev, + entry->attachment.overlay_engine, intf); + adf_attachment_free(entry); + dev->n_attached--; + } + } + kfree(intf->modelist); + adf_interface_sysfs_destroy(intf); + adf_obj_destroy(&intf->base, &dev->interfaces); + dev->n_interfaces--; + mutex_unlock(&dev->client_lock); +} +EXPORT_SYMBOL(adf_interface_destroy); + +static bool adf_overlay_engine_has_custom_formats( + const struct adf_overlay_engine_ops *ops) +{ + size_t i; + for (i = 0; i < ops->n_supported_formats; i++) + if (!adf_format_is_standard(ops->supported_formats[i])) + return true; + return false; +} + +/** + * adf_overlay_engine_init - initialize ADF-internal data for an + * overlay engine and create sysfs entries + * + * @eng: the overlay engine + * @dev: the overlay engine's "parent" display device + * @ops: the overlay engine's associated ops + * @fmt: formatting string for the overlay engine's name + * + * @dev must have previously been initialized with adf_device_init(). + * + * @fmt affects the name returned to userspace through the + * %ADF_GET_OVERLAY_ENGINE_DATA ioctl. It does not affect the sysfs filename, + * which is derived from @dev's name. + * + * Returns 0 on success or error code (<0) on failure. + */ +int adf_overlay_engine_init(struct adf_overlay_engine *eng, + struct adf_device *dev, + const struct adf_overlay_engine_ops *ops, const char *fmt, ...) +{ + int ret; + va_list args; + + if (!ops->supported_formats) { + pr_err("%s: overlay engine must support at least one format\n", + __func__); + return -EINVAL; + } + + if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) { + pr_err("%s: overlay engine supports too many formats\n", + __func__); + return -EINVAL; + } + + if (adf_overlay_engine_has_custom_formats(ops) && + !dev->ops->validate_custom_format) { + pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n", + __func__, dev->base.name); + return -EINVAL; + } + + memset(eng, 0, sizeof(*eng)); + + va_start(args, fmt); + ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE, + &dev->overlay_engines, dev, &ops->base, fmt, args); + va_end(args); + if (ret < 0) + return ret; + + eng->ops = ops; + + ret = adf_overlay_engine_sysfs_init(eng); + if (ret < 0) + goto err; + + return 0; + +err: + adf_obj_destroy(&eng->base, &dev->overlay_engines); + return ret; +} +EXPORT_SYMBOL(adf_overlay_engine_init); + +/** + * adf_interface_destroy - clean up ADF-internal data for an overlay engine + * + * @eng: the overlay engine + */ +void adf_overlay_engine_destroy(struct adf_overlay_engine *eng) +{ + struct adf_device *dev = adf_overlay_engine_parent(eng); + struct adf_attachment_list *entry, *next; + + mutex_lock(&dev->client_lock); + list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) { + if (entry->attachment.overlay_engine == eng) { + adf_attachment_free(entry); + dev->n_attach_allowed--; + } + } + list_for_each_entry_safe(entry, next, &dev->attached, head) { + if (entry->attachment.overlay_engine == eng) { + adf_device_detach_op(dev, eng, + entry->attachment.interface); + adf_attachment_free(entry); + dev->n_attached--; + } + } + adf_overlay_engine_sysfs_destroy(eng); + adf_obj_destroy(&eng->base, &dev->overlay_engines); + mutex_unlock(&dev->client_lock); +} +EXPORT_SYMBOL(adf_overlay_engine_destroy); + +struct adf_attachment_list *adf_attachment_find(struct list_head *list, + struct adf_overlay_engine *eng, struct adf_interface *intf) +{ + struct adf_attachment_list *entry; + list_for_each_entry(entry, list, head) { + if (entry->attachment.interface == intf && + entry->attachment.overlay_engine == eng) + return entry; + } + return NULL; +} + +int adf_attachment_validate(struct adf_device *dev, + struct adf_overlay_engine *eng, struct adf_interface *intf) +{ + struct adf_device *intf_dev = adf_interface_parent(intf); + struct adf_device *eng_dev = adf_overlay_engine_parent(eng); + + if (intf_dev != dev) { + dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n", + intf->base.name, intf_dev->base.name); + return -EINVAL; + } + + if (eng_dev != dev) { + dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n", + eng->base.name, eng_dev->base.name); + return -EINVAL; + } + + return 0; +} + +/** + * adf_attachment_allow - add a new entry to the list of allowed + * attachments + * + * @dev: the parent device + * @eng: the overlay engine + * @intf: the interface + * + * adf_attachment_allow() indicates that the underlying display hardware allows + * @intf to scan out @eng's output. It is intended to be called at + * driver initialization for each supported overlay engine + interface pair. + * + * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on + * any other failure. + */ +int adf_attachment_allow(struct adf_device *dev, + struct adf_overlay_engine *eng, struct adf_interface *intf) +{ + int ret; + struct adf_attachment_list *entry = NULL; + + ret = adf_attachment_validate(dev, eng, intf); + if (ret < 0) + return ret; + + mutex_lock(&dev->client_lock); + + if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) { + ret = -ENOMEM; + goto done; + } + + if (adf_attachment_find(&dev->attach_allowed, eng, intf)) { + ret = -EALREADY; + goto done; + } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + goto done; + } + + entry->attachment.interface = intf; + entry->attachment.overlay_engine = eng; + list_add_tail(&entry->head, &dev->attach_allowed); + dev->n_attach_allowed++; + +done: + mutex_unlock(&dev->client_lock); + if (ret < 0) + kfree(entry); + + return ret; +} + +/** + * adf_obj_type_str - string representation of an adf_obj_type + * + * @type: the object type + */ +const char *adf_obj_type_str(enum adf_obj_type type) +{ + switch (type) { + case ADF_OBJ_OVERLAY_ENGINE: + return "overlay engine"; + + case ADF_OBJ_INTERFACE: + return "interface"; + + case ADF_OBJ_DEVICE: + return "device"; + + default: + return "unknown"; + } +} +EXPORT_SYMBOL(adf_obj_type_str); + +/** + * adf_interface_type_str - string representation of an adf_interface's type + * + * @intf: the interface + */ +const char *adf_interface_type_str(struct adf_interface *intf) +{ + switch (intf->type) { + case ADF_INTF_DSI: + return "DSI"; + + case ADF_INTF_eDP: + return "eDP"; + + case ADF_INTF_DPI: + return "DPI"; + + case ADF_INTF_VGA: + return "VGA"; + + case ADF_INTF_DVI: + return "DVI"; + + case ADF_INTF_HDMI: + return "HDMI"; + + case ADF_INTF_MEMORY: + return "memory"; + + default: + if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) { + if (intf->ops && intf->ops->type_str) + return intf->ops->type_str(intf); + return "custom"; + } + return "unknown"; + } +} +EXPORT_SYMBOL(adf_interface_type_str); + +/** + * adf_event_type_str - string representation of an adf_event_type + * + * @obj: ADF object that produced the event + * @type: event type + */ +const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type) +{ + switch (type) { + case ADF_EVENT_VSYNC: + return "vsync"; + + case ADF_EVENT_HOTPLUG: + return "hotplug"; + + default: + if (type >= ADF_EVENT_DEVICE_CUSTOM) { + if (obj->ops && obj->ops->event_type_str) + return obj->ops->event_type_str(obj, type); + return "custom"; + } + return "unknown"; + } +} +EXPORT_SYMBOL(adf_event_type_str); + +/** + * adf_format_str - string representation of an ADF/DRM fourcc format + * + * @format: format fourcc + * @buf: target buffer for the format's string representation + */ +void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE]) +{ + buf[0] = format & 0xFF; + buf[1] = (format >> 8) & 0xFF; + buf[2] = (format >> 16) & 0xFF; + buf[3] = (format >> 24) & 0xFF; + buf[4] = '\0'; +} +EXPORT_SYMBOL(adf_format_str); + +/** + * adf_format_validate_yuv - validate the number and size of planes in buffers + * with a custom YUV format. + * + * @dev: ADF device performing the validation + * @buf: buffer to validate + * @num_planes: expected number of planes + * @hsub: expected horizontal chroma subsampling factor, in pixels + * @vsub: expected vertical chroma subsampling factor, in pixels + * @cpp: expected bytes per pixel for each plane (length @num_planes) + * + * adf_format_validate_yuv() is intended to be called as a helper from @dev's + * validate_custom_format() op. + * + * Returns 0 if @buf has the expected number of planes and each plane + * has sufficient size, or -EINVAL otherwise. + */ +int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf, + u8 num_planes, u8 hsub, u8 vsub, u8 cpp[]) +{ + u8 i; + + if (num_planes != buf->n_planes) { + char format_str[ADF_FORMAT_STR_SIZE]; + adf_format_str(buf->format, format_str); + dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n", + num_planes, format_str, buf->n_planes); + return -EINVAL; + } + + if (buf->w == 0 || buf->w % hsub) { + dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w); + return -EINVAL; + } + + if (buf->h == 0 || buf->h % vsub) { + dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h); + return -EINVAL; + } + + for (i = 0; i < num_planes; i++) { + u32 width = buf->w / (i != 0 ? hsub : 1); + u32 height = buf->h / (i != 0 ? vsub : 1); + u8 cpp = adf_format_plane_cpp(buf->format, i); + + if (buf->pitch[i] < (u64) width * cpp) { + dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n", + i, buf->pitch[i], width, cpp * 8); + return -EINVAL; + } + + if ((u64) height * buf->pitch[i] + buf->offset[i] > + buf->dma_bufs[i]->size) { + dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n", + i, height, buf->pitch[i], + buf->offset[i], buf->dma_bufs[i]->size); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL(adf_format_validate_yuv); + +/** + * adf_modeinfo_set_name - sets the name of a mode from its display resolution + * + * @mode: mode + * + * adf_modeinfo_set_name() fills in @mode->name in the format + * "[hdisplay]x[vdisplay](i)". It is intended to help drivers create + * ADF/DRM-style modelists from other mode formats. + */ +void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode) +{ + bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + + snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s", + mode->hdisplay, mode->vdisplay, + interlaced ? "i" : ""); +} +EXPORT_SYMBOL(adf_modeinfo_set_name); + +/** + * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other + * timing data + * + * @mode: mode + * + * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from + * @mode->{h,v}display and @mode->flags. It is intended to help drivers + * create ADF/DRM-style modelists from other mode formats. + */ +void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode) +{ + int refresh = 0; + unsigned int calc_val; + + if (mode->vrefresh > 0) + return; + + if (mode->htotal <= 0 || mode->vtotal <= 0) + return; + + /* work out vrefresh the value will be x1000 */ + calc_val = (mode->clock * 1000); + calc_val /= mode->htotal; + refresh = (calc_val + mode->vtotal / 2) / mode->vtotal; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + refresh *= 2; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + refresh /= 2; + if (mode->vscan > 1) + refresh /= mode->vscan; + + mode->vrefresh = refresh; +} +EXPORT_SYMBOL(adf_modeinfo_set_vrefresh); + +static int __init adf_init(void) +{ + int err; + + err = adf_sysfs_init(); + if (err < 0) + return err; + + return 0; +} + +static void __exit adf_exit(void) +{ + adf_sysfs_destroy(); +} + +module_init(adf_init); +module_exit(adf_exit); diff --git a/drivers/video/adf/adf.h b/drivers/video/adf/adf.h new file mode 100644 index 00000000000..3bcf1fabc23 --- /dev/null +++ b/drivers/video/adf/adf.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __VIDEO_ADF_ADF_H +#define __VIDEO_ADF_ADF_H + +#include <linux/idr.h> +#include <linux/list.h> +#include <video/adf.h> +#include "sync.h" + +struct adf_event_refcount { + struct rb_node node; + enum adf_event_type type; + int refcount; +}; + +void adf_buffer_cleanup(struct adf_buffer *buf); +void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping, + struct adf_buffer *buf); +void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post); + +struct adf_attachment_list *adf_attachment_find(struct list_head *list, + struct adf_overlay_engine *eng, struct adf_interface *intf); +int adf_attachment_validate(struct adf_device *dev, + struct adf_overlay_engine *eng, struct adf_interface *intf); +void adf_attachment_free(struct adf_attachment_list *attachment); + +struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj, + enum adf_event_type type); + +static inline int adf_obj_check_supports_event(struct adf_obj *obj, + enum adf_event_type type) +{ + if (!obj->ops || !obj->ops->supports_event) + return -EOPNOTSUPP; + if (!obj->ops->supports_event(obj, type)) + return -EINVAL; + return 0; +} + +static inline int adf_device_attach_op(struct adf_device *dev, + struct adf_overlay_engine *eng, struct adf_interface *intf) +{ + if (!dev->ops->attach) + return 0; + + return dev->ops->attach(dev, eng, intf); +} + +static inline int adf_device_detach_op(struct adf_device *dev, + struct adf_overlay_engine *eng, struct adf_interface *intf) +{ + if (!dev->ops->detach) + return 0; + + return dev->ops->detach(dev, eng, intf); +} + +#endif /* __VIDEO_ADF_ADF_H */ diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c new file mode 100644 index 00000000000..8061d8e6b9f --- /dev/null +++ b/drivers/video/adf/adf_client.c @@ -0,0 +1,811 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kthread.h> +#include <linux/mutex.h> +#include <linux/slab.h> + +#include "sw_sync.h" + +#include <video/adf.h> +#include <video/adf_client.h> +#include <video/adf_format.h> + +#include "adf.h" + +static inline bool vsync_active(u8 state) +{ + return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY; +} + +/** + * adf_interface_blank - set interface's DPMS state + * + * @intf: the interface + * @state: one of %DRM_MODE_DPMS_* + * + * Returns 0 on success or -errno on failure. + */ +int adf_interface_blank(struct adf_interface *intf, u8 state) +{ + struct adf_device *dev = adf_interface_parent(intf); + u8 prev_state; + bool disable_vsync; + bool enable_vsync; + int ret = 0; + struct adf_event_refcount *vsync_refcount; + + if (!intf->ops || !intf->ops->blank) + return -EOPNOTSUPP; + + if (state > DRM_MODE_DPMS_OFF) + return -EINVAL; + + mutex_lock(&dev->client_lock); + if (state != DRM_MODE_DPMS_ON) + flush_kthread_worker(&dev->post_worker); + mutex_lock(&intf->base.event_lock); + + vsync_refcount = adf_obj_find_event_refcount(&intf->base, + ADF_EVENT_VSYNC); + if (!vsync_refcount) { + ret = -ENOMEM; + goto done; + } + + prev_state = intf->dpms_state; + if (prev_state == state) { + ret = -EBUSY; + goto done; + } + + disable_vsync = vsync_active(prev_state) && + !vsync_active(state) && + vsync_refcount->refcount; + enable_vsync = !vsync_active(prev_state) && + vsync_active(state) && + vsync_refcount->refcount; + + if (disable_vsync) + intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC, + false); + + ret = intf->ops->blank(intf, state); + if (ret < 0) { + if (disable_vsync) + intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC, + true); + goto done; + } + + if (enable_vsync) + intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC, + true); + + intf->dpms_state = state; +done: + mutex_unlock(&intf->base.event_lock); + mutex_unlock(&dev->client_lock); + return ret; +} +EXPORT_SYMBOL(adf_interface_blank); + +/** + * adf_interface_blank - get interface's current DPMS state + * + * @intf: the interface + * + * Returns one of %DRM_MODE_DPMS_*. + */ +u8 adf_interface_dpms_state(struct adf_interface *intf) +{ + struct adf_device *dev = adf_interface_parent(intf); + u8 dpms_state; + + mutex_lock(&dev->client_lock); + dpms_state = intf->dpms_state; + mutex_unlock(&dev->client_lock); + + return dpms_state; +} +EXPORT_SYMBOL(adf_interface_dpms_state); + +/** + * adf_interface_current_mode - get interface's current display mode + * + * @intf: the interface + * @mode: returns the current mode + */ +void adf_interface_current_mode(struct adf_interface *intf, + struct drm_mode_modeinfo *mode) +{ + struct adf_device *dev = adf_interface_parent(intf); + + mutex_lock(&dev->client_lock); + memcpy(mode, &intf->current_mode, sizeof(*mode)); + mutex_unlock(&dev->client_lock); +} +EXPORT_SYMBOL(adf_interface_current_mode); + +/** + * adf_interface_modelist - get interface's modelist + * + * @intf: the interface + * @modelist: storage for the modelist (optional) + * @n_modes: length of @modelist + * + * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes + * modelist entries into @modelist. + * + * Returns the length of the modelist. + */ +size_t adf_interface_modelist(struct adf_interface *intf, + struct drm_mode_modeinfo *modelist, size_t n_modes) +{ + unsigned long flags; + size_t retval; + + read_lock_irqsave(&intf->hotplug_modelist_lock, flags); + if (modelist) + memcpy(modelist, intf->modelist, sizeof(modelist[0]) * + min(n_modes, intf->n_modes)); + retval = intf->n_modes; + read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags); + + return retval; +} +EXPORT_SYMBOL(adf_interface_modelist); + +/** + * adf_interface_set_mode - set interface's display mode + * + * @intf: the interface + * @mode: the new mode + * + * Returns 0 on success or -errno on failure. + */ +int adf_interface_set_mode(struct adf_interface *intf, + struct drm_mode_modeinfo *mode) +{ + struct adf_device *dev = adf_interface_parent(intf); + int ret = 0; + + if (!intf->ops || !intf->ops->modeset) + return -EOPNOTSUPP; + + mutex_lock(&dev->client_lock); + flush_kthread_worker(&dev->post_worker); + + ret = intf->ops->modeset(intf, mode); + if (ret < 0) + goto done; + + memcpy(&intf->current_mode, mode, sizeof(*mode)); +done: + mutex_unlock(&dev->client_lock); + return ret; +} +EXPORT_SYMBOL(adf_interface_set_mode); + +/** + * adf_interface_screen_size - get size of screen connected to interface + * + * @intf: the interface + * @width_mm: returns the screen width in mm + * @height_mm: returns the screen width in mm + * + * Returns 0 on success or -errno on failure. + */ +int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm, + u16 *height_mm) +{ + struct adf_device *dev = adf_interface_parent(intf); + int ret; + + if (!intf->ops || !intf->ops->screen_size) + return -EOPNOTSUPP; + + mutex_lock(&dev->client_lock); + ret = intf->ops->screen_size(intf, width_mm, height_mm); + mutex_unlock(&dev->client_lock); + + return ret; +} +EXPORT_SYMBOL(adf_interface_get_screen_size); + +/** + * adf_overlay_engine_supports_format - returns whether a format is in an + * overlay engine's supported list + * + * @eng: the overlay engine + * @format: format fourcc + */ +bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng, + u32 format) +{ + size_t i; + for (i = 0; i < eng->ops->n_supported_formats; i++) + if (format == eng->ops->supported_formats[i]) + return true; + + return false; +} +EXPORT_SYMBOL(adf_overlay_engine_supports_format); + +static int adf_buffer_validate(struct adf_buffer *buf) +{ + struct adf_overlay_engine *eng = buf->overlay_engine; + struct device *dev = &eng->base.dev; + struct adf_device *parent = adf_overlay_engine_parent(eng); + u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i; + + if (!adf_overlay_engine_supports_format(eng, buf->format)) { + char format_str[ADF_FORMAT_STR_SIZE]; + adf_format_str(buf->format, format_str); + dev_err(dev, "unsupported format %s\n", format_str); + return -EINVAL; + } + + if (!adf_format_is_standard(buf->format)) + return parent->ops->validate_custom_format(parent, buf); + + hsub = adf_format_horz_chroma_subsampling(buf->format); + vsub = adf_format_vert_chroma_subsampling(buf->format); + num_planes = adf_format_num_planes(buf->format); + for (i = 0; i < num_planes; i++) + cpp[i] = adf_format_plane_cpp(buf->format, i); + + return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub, + cpp); +} + +static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf, + struct adf_buffer_mapping *mapping) +{ + int ret = 0; + size_t i; + + for (i = 0; i < buf->n_planes; i++) { + struct dma_buf_attachment *attachment; + struct sg_table *sg_table; + + attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev); + if (IS_ERR(attachment)) { + ret = PTR_ERR(attachment); + dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n", + i, ret); + goto done; + } + mapping->attachments[i] = attachment; + + sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE); + if (IS_ERR(sg_table)) { + ret = PTR_ERR(sg_table); + dev_err(&dev->base.dev, "mapping plane %zu failed: %d", + i, ret); + goto done; + } else if (!sg_table) { + ret = -ENOMEM; + dev_err(&dev->base.dev, "mapping plane %zu failed\n", + i); + goto done; + } + mapping->sg_tables[i] = sg_table; + } + +done: + if (ret < 0) + adf_buffer_mapping_cleanup(mapping, buf); + + return ret; +} + +static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev) +{ + struct sync_pt *pt; + struct sync_fence *complete_fence; + + if (!dev->timeline) { + dev->timeline = sw_sync_timeline_create(dev->base.name); + if (!dev->timeline) + return ERR_PTR(-ENOMEM); + dev->timeline_max = 1; + } + + dev->timeline_max++; + pt = sw_sync_pt_create(dev->timeline, dev->timeline_max); + if (!pt) + goto err_pt_create; + complete_fence = sync_fence_create(dev->base.name, pt); + if (!complete_fence) + goto err_fence_create; + + return complete_fence; + +err_fence_create: + sync_pt_free(pt); +err_pt_create: + dev->timeline_max--; + return ERR_PTR(-ENOSYS); +} + +/** + * adf_device_post - flip to a new set of buffers + * + * @dev: device targeted by the flip + * @intfs: interfaces targeted by the flip + * @n_intfs: number of targeted interfaces + * @bufs: description of buffers displayed + * @n_bufs: number of buffers displayed + * @custom_data: driver-private data + * @custom_data_size: size of driver-private data + * + * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may + * point to variables on the stack. adf_device_post() also takes its own + * reference on each of the dma-bufs in @bufs. The adf_device_post_nocopy() + * variant transfers ownership of these resources to ADF instead. + * + * On success, returns a sync fence which signals when the buffers are removed + * from the screen. On failure, returns ERR_PTR(-errno). + */ +struct sync_fence *adf_device_post(struct adf_device *dev, + struct adf_interface **intfs, size_t n_intfs, + struct adf_buffer *bufs, size_t n_bufs, void *custom_data, + size_t custom_data_size) +{ + struct adf_interface **intfs_copy = NULL; + struct adf_buffer *bufs_copy = NULL; + void *custom_data_copy = NULL; + struct sync_fence *ret; + size_t i; + + intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL); + if (!intfs_copy) + return ERR_PTR(-ENOMEM); + + bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL); + if (!bufs_copy) { + ret = ERR_PTR(-ENOMEM); + goto err_alloc; + } + + custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL); + if (!custom_data_copy) { + ret = ERR_PTR(-ENOMEM); + goto err_alloc; + } + + for (i = 0; i < n_bufs; i++) { + size_t j; + for (j = 0; j < bufs[i].n_planes; j++) + get_dma_buf(bufs[i].dma_bufs[j]); + } + + memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs); + memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs); + memcpy(custom_data_copy, custom_data, custom_data_size); + + ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy, + n_bufs, custom_data_copy, custom_data_size); + if (IS_ERR(ret)) + goto err_post; + + return ret; + +err_post: + for (i = 0; i < n_bufs; i++) { + size_t j; + for (j = 0; j < bufs[i].n_planes; j++) + dma_buf_put(bufs[i].dma_bufs[j]); + } +err_alloc: + kfree(custom_data_copy); + kfree(bufs_copy); + kfree(intfs_copy); + return ret; +} +EXPORT_SYMBOL(adf_device_post); + +/** + * adf_device_post_nocopy - flip to a new set of buffers + * + * adf_device_post_nocopy() has the same behavior as adf_device_post(), + * except ADF does not copy @intfs, @bufs, or @custom_data, and it does + * not take an extra reference on the dma-bufs in @bufs. + * + * @intfs, @bufs, and @custom_data must point to buffers allocated by + * kmalloc(). On success, ADF takes ownership of these buffers and the dma-bufs + * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed. + * On failure, adf_device_post_nocopy() does NOT take ownership of these + * buffers or the dma-bufs, and the caller must clean them up. + * + * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls. + * Clients may find the nocopy variant useful in limited cases, but most should + * call adf_device_post() instead. + */ +struct sync_fence *adf_device_post_nocopy(struct adf_device *dev, + struct adf_interface **intfs, size_t n_intfs, + struct adf_buffer *bufs, size_t n_bufs, + void *custom_data, size_t custom_data_size) +{ + struct adf_pending_post *cfg; + struct adf_buffer_mapping *mappings; + struct sync_fence *ret; + size_t i; + int err; + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return ERR_PTR(-ENOMEM); + + mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL); + if (!mappings) { + ret = ERR_PTR(-ENOMEM); + goto err_alloc; + } + + mutex_lock(&dev->client_lock); + + for (i = 0; i < n_bufs; i++) { + err = adf_buffer_validate(&bufs[i]); + if (err < 0) { + ret = ERR_PTR(err); + goto err_buf; + } + + err = adf_buffer_map(dev, &bufs[i], &mappings[i]); + if (err < 0) { + ret = ERR_PTR(err); + goto err_buf; + } + } + + INIT_LIST_HEAD(&cfg->head); + cfg->config.n_bufs = n_bufs; + cfg->config.bufs = bufs; + cfg->config.mappings = mappings; + cfg->config.custom_data = custom_data; + cfg->config.custom_data_size = custom_data_size; + + err = dev->ops->validate(dev, &cfg->config, &cfg->state); + if (err < 0) { + ret = ERR_PTR(err); + goto err_buf; + } + + mutex_lock(&dev->post_lock); + + if (dev->ops->complete_fence) + ret = dev->ops->complete_fence(dev, &cfg->config, + cfg->state); + else + ret = adf_sw_complete_fence(dev); + + if (IS_ERR(ret)) + goto err_fence; + + list_add_tail(&cfg->head, &dev->post_list); + queue_kthread_work(&dev->post_worker, &dev->post_work); + mutex_unlock(&dev->post_lock); + mutex_unlock(&dev->client_lock); + kfree(intfs); + return ret; + +err_fence: + mutex_unlock(&dev->post_lock); + +err_buf: + for (i = 0; i < n_bufs; i++) + adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]); + + mutex_unlock(&dev->client_lock); + kfree(mappings); + +err_alloc: + kfree(cfg); + return ret; +} +EXPORT_SYMBOL(adf_device_post_nocopy); + +static void adf_attachment_list_to_array(struct adf_device *dev, + struct list_head *src, struct adf_attachment *dst, size_t size) +{ + struct adf_attachment_list *entry; + size_t i = 0; + + if (!dst) + return; + + list_for_each_entry(entry, src, head) { + if (i == size) + return; + dst[i] = entry->attachment; + i++; + } +} + +/** + * adf_device_attachments - get device's list of active attachments + * + * @dev: the device + * @attachments: storage for the attachment list (optional) + * @n_attachments: length of @attachments + * + * If @attachments is not NULL, adf_device_attachments() will copy up to + * @n_attachments entries into @attachments. + * + * Returns the length of the active attachment list. + */ +size_t adf_device_attachments(struct adf_device *dev, + struct adf_attachment *attachments, size_t n_attachments) +{ + size_t retval; + + mutex_lock(&dev->client_lock); + adf_attachment_list_to_array(dev, &dev->attached, attachments, + n_attachments); + retval = dev->n_attached; + mutex_unlock(&dev->client_lock); + + return retval; +} +EXPORT_SYMBOL(adf_device_attachments); + +/** + * adf_device_attachments_allowed - get device's list of allowed attachments + * + * @dev: the device + * @attachments: storage for the attachment list (optional) + * @n_attachments: length of @attachments + * + * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to + * @n_attachments entries into @attachments. + * + * Returns the length of the allowed attachment list. + */ +size_t adf_device_attachments_allowed(struct adf_device *dev, + struct adf_attachment *attachments, size_t n_attachments) +{ + size_t retval; + + mutex_lock(&dev->client_lock); + adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments, + n_attachments); + retval = dev->n_attach_allowed; + mutex_unlock(&dev->client_lock); + + return retval; +} +EXPORT_SYMBOL(adf_device_attachments_allowed); + +/** + * adf_device_attached - return whether an overlay engine and interface are + * attached + * + * @dev: the parent device + * @eng: the overlay engine + * @intf: the interface + */ +bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng, + struct adf_interface *intf) +{ + struct adf_attachment_list *attachment; + + mutex_lock(&dev->client_lock); + attachment = adf_attachment_find(&dev->attached, eng, intf); + mutex_unlock(&dev->client_lock); + + return attachment != NULL; +} +EXPORT_SYMBOL(adf_device_attached); + +/** + * adf_device_attach_allowed - return whether the ADF device supports attaching + * an overlay engine and interface + * + * @dev: the parent device + * @eng: the overlay engine + * @intf: the interface + */ +bool adf_device_attach_allowed(struct adf_device *dev, + struct adf_overlay_engine *eng, struct adf_interface *intf) +{ + struct adf_attachment_list *attachment; + + mutex_lock(&dev->client_lock); + attachment = adf_attachment_find(&dev->attach_allowed, eng, intf); + mutex_unlock(&dev->client_lock); + + return attachment != NULL; +} +EXPORT_SYMBOL(adf_device_attach_allowed); +/** + * adf_device_attach - attach an overlay engine to an interface + * + * @dev: the parent device + * @eng: the overlay engine + * @intf: the interface + * + * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed, + * -%EALREADY if @intf and @eng are already attached, or -errno on any other + * failure. + */ +int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng, + struct adf_interface *intf) +{ + int ret; + struct adf_attachment_list *attachment = NULL; + + ret = adf_attachment_validate(dev, eng, intf); + if (ret < 0) + return ret; + + mutex_lock(&dev->client_lock); + + if (dev->n_attached == ADF_MAX_ATTACHMENTS) { + ret = -ENOMEM; + goto done; + } + + if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) { + ret = -EINVAL; + goto done; + } + + if (adf_attachment_find(&dev->attached, eng, intf)) { + ret = -EALREADY; + goto done; + } + + ret = adf_device_attach_op(dev, eng, intf); + if (ret < 0) + goto done; + + attachment = kzalloc(sizeof(*attachment), GFP_KERNEL); + if (!attachment) { + ret = -ENOMEM; + goto done; + } + + attachment->attachment.interface = intf; + attachment->attachment.overlay_engine = eng; + list_add_tail(&attachment->head, &dev->attached); + dev->n_attached++; + +done: + mutex_unlock(&dev->client_lock); + if (ret < 0) + kfree(attachment); + + return ret; +} +EXPORT_SYMBOL(adf_device_attach); + +/** + * adf_device_detach - detach an overlay engine from an interface + * + * @dev: the parent device + * @eng: the overlay engine + * @intf: the interface + * + * Returns 0 on success, -%EINVAL if @intf and @eng are not attached, + * or -errno on any other failure. + */ +int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng, + struct adf_interface *intf) +{ + int ret; + struct adf_attachment_list *attachment; + + ret = adf_attachment_validate(dev, eng, intf); + if (ret < 0) + return ret; + + mutex_lock(&dev->client_lock); + + attachment = adf_attachment_find(&dev->attached, eng, intf); + if (!attachment) { + ret = -EINVAL; + goto done; + } + + ret = adf_device_detach_op(dev, eng, intf); + if (ret < 0) + goto done; + + adf_attachment_free(attachment); + dev->n_attached--; +done: + mutex_unlock(&dev->client_lock); + return ret; +} +EXPORT_SYMBOL(adf_device_detach); + +/** + * adf_interface_simple_buffer_alloc - allocate a simple buffer + * + * @intf: target interface + * @w: width in pixels + * @h: height in pixels + * @format: format fourcc + * @dma_buf: returns the allocated buffer + * @offset: returns the byte offset of the allocated buffer's first pixel + * @pitch: returns the allocated buffer's pitch + * + * See &struct adf_simple_buffer_alloc for a description of simple buffers and + * their limitations. + * + * Returns 0 on success or -errno on failure. + */ +int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h, + u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch) +{ + if (!intf->ops || !intf->ops->alloc_simple_buffer) + return -EOPNOTSUPP; + + if (!adf_format_is_rgb(format)) + return -EINVAL; + + return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf, + offset, pitch); +} +EXPORT_SYMBOL(adf_interface_simple_buffer_alloc); + +/** + * adf_interface_simple_post - flip to a single buffer + * + * @intf: interface targeted by the flip + * @buf: buffer to display + * + * adf_interface_simple_post() can be used generically for simple display + * configurations, since the client does not need to provide any driver-private + * configuration data. + * + * adf_interface_simple_post() has the same copying semantics as + * adf_device_post(). + * + * On success, returns a sync fence which signals when the buffer is removed + * from the screen. On failure, returns ERR_PTR(-errno). + */ +struct sync_fence *adf_interface_simple_post(struct adf_interface *intf, + struct adf_buffer *buf) +{ + size_t custom_data_size = 0; + void *custom_data = NULL; + struct sync_fence *ret; + + if (intf->ops && intf->ops->describe_simple_post) { + int err; + + custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL); + if (!custom_data) { + ret = ERR_PTR(-ENOMEM); + goto done; + } + + err = intf->ops->describe_simple_post(intf, buf, custom_data, + &custom_data_size); + if (err < 0) { + ret = ERR_PTR(err); + goto done; + } + } + + ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1, + custom_data, custom_data_size); +done: + kfree(custom_data); + return ret; +} +EXPORT_SYMBOL(adf_interface_simple_post); diff --git a/drivers/video/adf/adf_fbdev.c b/drivers/video/adf/adf_fbdev.c new file mode 100644 index 00000000000..cac34d14cbc --- /dev/null +++ b/drivers/video/adf/adf_fbdev.c @@ -0,0 +1,651 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/vmalloc.h> + +#include <video/adf.h> +#include <video/adf_client.h> +#include <video/adf_fbdev.h> +#include <video/adf_format.h> + +#include "adf.h" + +struct adf_fbdev_format { + u32 fourcc; + u32 bpp; + u32 r_length; + u32 g_length; + u32 b_length; + u32 a_length; + u32 r_offset; + u32 g_offset; + u32 b_offset; + u32 a_offset; +}; + +static const struct adf_fbdev_format format_table[] = { + {DRM_FORMAT_RGB332, 8, 3, 3, 2, 0, 5, 2, 0, 0}, + {DRM_FORMAT_BGR233, 8, 3, 3, 2, 0, 0, 3, 5, 0}, + + {DRM_FORMAT_XRGB4444, 16, 4, 4, 4, 0, 8, 4, 0, 0}, + {DRM_FORMAT_XBGR4444, 16, 4, 4, 4, 0, 0, 4, 8, 0}, + {DRM_FORMAT_RGBX4444, 16, 4, 4, 4, 0, 12, 8, 4, 0}, + {DRM_FORMAT_BGRX4444, 16, 4, 4, 4, 0, 0, 4, 8, 0}, + + {DRM_FORMAT_ARGB4444, 16, 4, 4, 4, 4, 8, 4, 0, 12}, + {DRM_FORMAT_ABGR4444, 16, 4, 4, 4, 4, 0, 4, 8, 12}, + {DRM_FORMAT_RGBA4444, 16, 4, 4, 4, 4, 12, 8, 4, 0}, + {DRM_FORMAT_BGRA4444, 16, 4, 4, 4, 4, 0, 4, 8, 0}, + + {DRM_FORMAT_XRGB1555, 16, 5, 5, 5, 0, 10, 5, 0, 0}, + {DRM_FORMAT_XBGR1555, 16, 5, 5, 5, 0, 0, 5, 10, 0}, + {DRM_FORMAT_RGBX5551, 16, 5, 5, 5, 0, 11, 6, 1, 0}, + {DRM_FORMAT_BGRX5551, 16, 5, 5, 5, 0, 1, 6, 11, 0}, + + {DRM_FORMAT_ARGB1555, 16, 5, 5, 5, 1, 10, 5, 0, 15}, + {DRM_FORMAT_ABGR1555, 16, 5, 5, 5, 1, 0, 5, 10, 15}, + {DRM_FORMAT_RGBA5551, 16, 5, 5, 5, 1, 11, 6, 1, 0}, + {DRM_FORMAT_BGRA5551, 16, 5, 5, 5, 1, 1, 6, 11, 0}, + + {DRM_FORMAT_RGB565, 16, 5, 6, 5, 0, 11, 5, 0, 0}, + {DRM_FORMAT_BGR565, 16, 5, 6, 5, 0, 0, 5, 11, 0}, + + {DRM_FORMAT_RGB888, 24, 8, 8, 8, 0, 16, 8, 0, 0}, + {DRM_FORMAT_BGR888, 24, 8, 8, 8, 0, 0, 8, 16, 0}, + + {DRM_FORMAT_XRGB8888, 32, 8, 8, 8, 0, 16, 8, 0, 0}, + {DRM_FORMAT_XBGR8888, 32, 8, 8, 8, 0, 0, 8, 16, 0}, + {DRM_FORMAT_RGBX8888, 32, 8, 8, 8, 0, 24, 16, 8, 0}, + {DRM_FORMAT_BGRX8888, 32, 8, 8, 8, 0, 8, 16, 24, 0}, + + {DRM_FORMAT_ARGB8888, 32, 8, 8, 8, 8, 16, 8, 0, 24}, + {DRM_FORMAT_ABGR8888, 32, 8, 8, 8, 8, 0, 8, 16, 24}, + {DRM_FORMAT_RGBA8888, 32, 8, 8, 8, 8, 24, 16, 8, 0}, + {DRM_FORMAT_BGRA8888, 32, 8, 8, 8, 8, 8, 16, 24, 0}, + + {DRM_FORMAT_XRGB2101010, 32, 10, 10, 10, 0, 20, 10, 0, 0}, + {DRM_FORMAT_XBGR2101010, 32, 10, 10, 10, 0, 0, 10, 20, 0}, + {DRM_FORMAT_RGBX1010102, 32, 10, 10, 10, 0, 22, 12, 2, 0}, + {DRM_FORMAT_BGRX1010102, 32, 10, 10, 10, 0, 2, 12, 22, 0}, + + {DRM_FORMAT_ARGB2101010, 32, 10, 10, 10, 2, 20, 10, 0, 30}, + {DRM_FORMAT_ABGR2101010, 32, 10, 10, 10, 2, 0, 10, 20, 30}, + {DRM_FORMAT_RGBA1010102, 32, 10, 10, 10, 2, 22, 12, 2, 0}, + {DRM_FORMAT_BGRA1010102, 32, 10, 10, 10, 2, 2, 12, 22, 0}, +}; + +static u32 drm_fourcc_from_fb_var(struct fb_var_screeninfo *var) +{ + size_t i; + for (i = 0; i < ARRAY_SIZE(format_table); i++) { + const struct adf_fbdev_format *f = &format_table[i]; + if (var->red.length == f->r_length && + var->red.offset == f->r_offset && + var->green.length == f->g_length && + var->green.offset == f->g_offset && + var->blue.length == f->b_length && + var->blue.offset == f->b_offset && + var->transp.length == f->a_length && + (var->transp.length == 0 || + var->transp.offset == f->a_offset)) + return f->fourcc; + } + + return 0; +} + +static const struct adf_fbdev_format *fbdev_format_info(u32 format) +{ + size_t i; + for (i = 0; i < ARRAY_SIZE(format_table); i++) { + const struct adf_fbdev_format *f = &format_table[i]; + if (f->fourcc == format) + return f; + } + + BUG(); +} + +void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode, + struct fb_videomode *vmode) +{ + memset(vmode, 0, sizeof(*vmode)); + + vmode->refresh = mode->vrefresh; + + vmode->xres = mode->hdisplay; + vmode->yres = mode->vdisplay; + + vmode->pixclock = mode->clock ? KHZ2PICOS(mode->clock) : 0; + vmode->left_margin = mode->htotal - mode->hsync_end; + vmode->right_margin = mode->hsync_start - mode->hdisplay; + vmode->upper_margin = mode->vtotal - mode->vsync_end; + vmode->lower_margin = mode->vsync_start - mode->vdisplay; + vmode->hsync_len = mode->hsync_end - mode->hsync_start; + vmode->vsync_len = mode->vsync_end - mode->vsync_start; + + vmode->sync = 0; + if (mode->flags | DRM_MODE_FLAG_PHSYNC) + vmode->sync |= FB_SYNC_HOR_HIGH_ACT; + if (mode->flags | DRM_MODE_FLAG_PVSYNC) + vmode->sync |= FB_SYNC_VERT_HIGH_ACT; + if (mode->flags | DRM_MODE_FLAG_PCSYNC) + vmode->sync |= FB_SYNC_COMP_HIGH_ACT; + if (mode->flags | DRM_MODE_FLAG_BCAST) + vmode->sync |= FB_SYNC_BROADCAST; + + vmode->vmode = 0; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + vmode->vmode |= FB_VMODE_INTERLACED; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + vmode->vmode |= FB_VMODE_DOUBLE; +} +EXPORT_SYMBOL(adf_modeinfo_to_fb_videomode); + +void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode, + struct drm_mode_modeinfo *mode) +{ + memset(mode, 0, sizeof(*mode)); + + mode->hdisplay = vmode->xres; + mode->hsync_start = mode->hdisplay + vmode->right_margin; + mode->hsync_end = mode->hsync_start + vmode->hsync_len; + mode->htotal = mode->hsync_end + vmode->left_margin; + + mode->vdisplay = vmode->yres; + mode->vsync_start = mode->vdisplay + vmode->lower_margin; + mode->vsync_end = mode->vsync_start + vmode->vsync_len; + mode->vtotal = mode->vsync_end + vmode->upper_margin; + + mode->clock = vmode->pixclock ? PICOS2KHZ(vmode->pixclock) : 0; + + mode->flags = 0; + if (vmode->sync & FB_SYNC_HOR_HIGH_ACT) + mode->flags |= DRM_MODE_FLAG_PHSYNC; + if (vmode->sync & FB_SYNC_VERT_HIGH_ACT) + mode->flags |= DRM_MODE_FLAG_PVSYNC; + if (vmode->sync & FB_SYNC_COMP_HIGH_ACT) + mode->flags |= DRM_MODE_FLAG_PCSYNC; + if (vmode->sync & FB_SYNC_BROADCAST) + mode->flags |= DRM_MODE_FLAG_BCAST; + if (vmode->vmode & FB_VMODE_INTERLACED) + mode->flags |= DRM_MODE_FLAG_INTERLACE; + if (vmode->vmode & FB_VMODE_DOUBLE) + mode->flags |= DRM_MODE_FLAG_DBLSCAN; + + if (vmode->refresh) + mode->vrefresh = vmode->refresh; + else + adf_modeinfo_set_vrefresh(mode); + + if (vmode->name) + strlcpy(mode->name, vmode->name, sizeof(mode->name)); + else + adf_modeinfo_set_name(mode); +} +EXPORT_SYMBOL(adf_modeinfo_from_fb_videomode); + +static int adf_fbdev_post(struct adf_fbdev *fbdev) +{ + struct adf_buffer buf; + struct sync_fence *complete_fence; + int ret = 0; + + memset(&buf, 0, sizeof(buf)); + buf.overlay_engine = fbdev->eng; + buf.w = fbdev->info->var.xres; + buf.h = fbdev->info->var.yres; + buf.format = fbdev->format; + buf.dma_bufs[0] = fbdev->dma_buf; + buf.offset[0] = fbdev->offset + + fbdev->info->var.yoffset * fbdev->pitch + + fbdev->info->var.xoffset * + (fbdev->info->var.bits_per_pixel / 8); + buf.pitch[0] = fbdev->pitch; + buf.n_planes = 1; + + complete_fence = adf_interface_simple_post(fbdev->intf, &buf); + if (IS_ERR(complete_fence)) { + ret = PTR_ERR(complete_fence); + goto done; + } + + sync_fence_put(complete_fence); +done: + return ret; +} + +static const u16 vga_palette[][3] = { + {0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0xAAAA}, + {0x0000, 0xAAAA, 0x0000}, + {0x0000, 0xAAAA, 0xAAAA}, + {0xAAAA, 0x0000, 0x0000}, + {0xAAAA, 0x0000, 0xAAAA}, + {0xAAAA, 0x5555, 0x0000}, + {0xAAAA, 0xAAAA, 0xAAAA}, + {0x5555, 0x5555, 0x5555}, + {0x5555, 0x5555, 0xFFFF}, + {0x5555, 0xFFFF, 0x5555}, + {0x5555, 0xFFFF, 0xFFFF}, + {0xFFFF, 0x5555, 0x5555}, + {0xFFFF, 0x5555, 0xFFFF}, + {0xFFFF, 0xFFFF, 0x5555}, + {0xFFFF, 0xFFFF, 0xFFFF}, +}; + +static int adf_fb_alloc(struct adf_fbdev *fbdev) +{ + int ret; + + ret = adf_interface_simple_buffer_alloc(fbdev->intf, + fbdev->default_xres_virtual, + fbdev->default_yres_virtual, + fbdev->default_format, + &fbdev->dma_buf, &fbdev->offset, &fbdev->pitch); + if (ret < 0) { + dev_err(fbdev->info->dev, "allocating fb failed: %d\n", ret); + return ret; + } + + fbdev->vaddr = dma_buf_vmap(fbdev->dma_buf); + if (!fbdev->vaddr) { + ret = -ENOMEM; + dev_err(fbdev->info->dev, "vmapping fb failed\n"); + goto err_vmap; + } + fbdev->info->fix.line_length = fbdev->pitch; + fbdev->info->var.xres_virtual = fbdev->default_xres_virtual; + fbdev->info->var.yres_virtual = fbdev->default_yres_virtual; + fbdev->info->fix.smem_len = fbdev->dma_buf->size; + fbdev->info->screen_base = fbdev->vaddr; + + return 0; + +err_vmap: + dma_buf_put(fbdev->dma_buf); + return ret; +} + +static void adf_fb_destroy(struct adf_fbdev *fbdev) +{ + dma_buf_vunmap(fbdev->dma_buf, fbdev->vaddr); + dma_buf_put(fbdev->dma_buf); +} + +static void adf_fbdev_set_format(struct adf_fbdev *fbdev, u32 format) +{ + size_t i; + const struct adf_fbdev_format *info = fbdev_format_info(format); + for (i = 0; i < ARRAY_SIZE(vga_palette); i++) { + u16 r = vga_palette[i][0]; + u16 g = vga_palette[i][1]; + u16 b = vga_palette[i][2]; + + r >>= (16 - info->r_length); + g >>= (16 - info->g_length); + b >>= (16 - info->b_length); + + fbdev->pseudo_palette[i] = + (r << info->r_offset) | + (g << info->g_offset) | + (b << info->b_offset); + + if (info->a_length) { + u16 a = BIT(info->a_length) - 1; + fbdev->pseudo_palette[i] |= (a << info->a_offset); + } + } + + fbdev->info->var.bits_per_pixel = adf_format_bpp(format); + fbdev->info->var.red.length = info->r_length; + fbdev->info->var.red.offset = info->r_offset; + fbdev->info->var.green.length = info->g_length; + fbdev->info->var.green.offset = info->g_offset; + fbdev->info->var.blue.length = info->b_length; + fbdev->info->var.blue.offset = info->b_offset; + fbdev->info->var.transp.length = info->a_length; + fbdev->info->var.transp.offset = info->a_offset; + fbdev->format = format; +} + +static void adf_fbdev_fill_modelist(struct adf_fbdev *fbdev) +{ + struct drm_mode_modeinfo *modelist; + struct fb_videomode fbmode; + size_t n_modes, i; + int ret = 0; + + n_modes = adf_interface_modelist(fbdev->intf, NULL, 0); + modelist = kzalloc(sizeof(modelist[0]) * n_modes, GFP_KERNEL); + if (!modelist) { + dev_warn(fbdev->info->dev, "allocating new modelist failed; keeping old modelist\n"); + return; + } + adf_interface_modelist(fbdev->intf, modelist, n_modes); + + fb_destroy_modelist(&fbdev->info->modelist); + + for (i = 0; i < n_modes; i++) { + adf_modeinfo_to_fb_videomode(&modelist[i], &fbmode); + ret = fb_add_videomode(&fbmode, &fbdev->info->modelist); + if (ret < 0) + dev_warn(fbdev->info->dev, "adding mode %s to modelist failed: %d\n", + modelist[i].name, ret); + } + + kfree(modelist); +} + +/** + * adf_fbdev_open - default implementation of fbdev open op + */ +int adf_fbdev_open(struct fb_info *info, int user) +{ + struct adf_fbdev *fbdev = info->par; + int ret; + + if (!fbdev->open) { + struct drm_mode_modeinfo mode; + struct fb_videomode fbmode; + struct adf_device *dev = adf_interface_parent(fbdev->intf); + + ret = adf_device_attach(dev, fbdev->eng, fbdev->intf); + if (ret < 0 && ret != -EALREADY) + return ret; + + ret = adf_fb_alloc(fbdev); + if (ret < 0) + return ret; + + adf_interface_current_mode(fbdev->intf, &mode); + adf_modeinfo_to_fb_videomode(&mode, &fbmode); + fb_videomode_to_var(&fbdev->info->var, &fbmode); + + adf_fbdev_set_format(fbdev, fbdev->default_format); + adf_fbdev_fill_modelist(fbdev); + } + + ret = adf_fbdev_post(fbdev); + if (ret < 0) { + if (!fbdev->open) + adf_fb_destroy(fbdev); + return ret; + } + + fbdev->open = true; + return 0; +} +EXPORT_SYMBOL(adf_fbdev_open); + +/** + * adf_fbdev_release - default implementation of fbdev release op + */ +int adf_fbdev_release(struct fb_info *info, int user) +{ + struct adf_fbdev *fbdev = info->par; + adf_fb_destroy(fbdev); + fbdev->open = false; + return 0; +} +EXPORT_SYMBOL(adf_fbdev_release); + +/** + * adf_fbdev_check_var - default implementation of fbdev check_var op + */ +int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + struct adf_fbdev *fbdev = info->par; + bool valid_format = true; + u32 format = drm_fourcc_from_fb_var(var); + u32 pitch = var->xres_virtual * var->bits_per_pixel / 8; + + if (!format) { + dev_dbg(info->dev, "%s: unrecognized format\n", __func__); + valid_format = false; + } + + if (valid_format && var->grayscale) { + dev_dbg(info->dev, "%s: grayscale modes not supported\n", + __func__); + valid_format = false; + } + + if (valid_format && var->nonstd) { + dev_dbg(info->dev, "%s: nonstandard formats not supported\n", + __func__); + valid_format = false; + } + + if (valid_format && !adf_overlay_engine_supports_format(fbdev->eng, + format)) { + char format_str[ADF_FORMAT_STR_SIZE]; + adf_format_str(format, format_str); + dev_dbg(info->dev, "%s: format %s not supported by overlay engine %s\n", + __func__, format_str, fbdev->eng->base.name); + valid_format = false; + } + + if (valid_format && pitch > fbdev->pitch) { + dev_dbg(info->dev, "%s: fb pitch too small for var (pitch = %u, xres_virtual = %u, bits_per_pixel = %u)\n", + __func__, fbdev->pitch, var->xres_virtual, + var->bits_per_pixel); + valid_format = false; + } + + if (valid_format && var->yres_virtual > fbdev->default_yres_virtual) { + dev_dbg(info->dev, "%s: fb height too small for var (h = %u, yres_virtual = %u)\n", + __func__, fbdev->default_yres_virtual, + var->yres_virtual); + valid_format = false; + } + + if (valid_format) { + var->activate = info->var.activate; + var->height = info->var.height; + var->width = info->var.width; + var->accel_flags = info->var.accel_flags; + var->rotate = info->var.rotate; + var->colorspace = info->var.colorspace; + /* userspace can't change these */ + } else { + /* if any part of the format is invalid then fixing it up is + impractical, so save just the modesetting bits and + overwrite everything else */ + struct fb_videomode mode; + fb_var_to_videomode(&mode, var); + memcpy(var, &info->var, sizeof(*var)); + fb_videomode_to_var(var, &mode); + } + + return 0; +} +EXPORT_SYMBOL(adf_fbdev_check_var); + +/** + * adf_fbdev_set_par - default implementation of fbdev set_par op + */ +int adf_fbdev_set_par(struct fb_info *info) +{ + struct adf_fbdev *fbdev = info->par; + struct adf_interface *intf = fbdev->intf; + struct fb_videomode vmode; + struct drm_mode_modeinfo mode; + int ret; + u32 format = drm_fourcc_from_fb_var(&info->var); + + fb_var_to_videomode(&vmode, &info->var); + adf_modeinfo_from_fb_videomode(&vmode, &mode); + ret = adf_interface_set_mode(intf, &mode); + if (ret < 0) + return ret; + + ret = adf_fbdev_post(fbdev); + if (ret < 0) + return ret; + + if (format != fbdev->format) + adf_fbdev_set_format(fbdev, format); + + return 0; +} +EXPORT_SYMBOL(adf_fbdev_set_par); + +/** + * adf_fbdev_blank - default implementation of fbdev blank op + */ +int adf_fbdev_blank(int blank, struct fb_info *info) +{ + struct adf_fbdev *fbdev = info->par; + struct adf_interface *intf = fbdev->intf; + u8 dpms_state; + + switch (blank) { + case FB_BLANK_UNBLANK: + dpms_state = DRM_MODE_DPMS_ON; + break; + case FB_BLANK_NORMAL: + dpms_state = DRM_MODE_DPMS_STANDBY; + break; + case FB_BLANK_VSYNC_SUSPEND: + dpms_state = DRM_MODE_DPMS_SUSPEND; + break; + case FB_BLANK_HSYNC_SUSPEND: + dpms_state = DRM_MODE_DPMS_STANDBY; + break; + case FB_BLANK_POWERDOWN: + dpms_state = DRM_MODE_DPMS_OFF; + break; + default: + return -EINVAL; + } + + return adf_interface_blank(intf, dpms_state); +} +EXPORT_SYMBOL(adf_fbdev_blank); + +/** + * adf_fbdev_pan_display - default implementation of fbdev pan_display op + */ +int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) +{ + struct adf_fbdev *fbdev = info->par; + return adf_fbdev_post(fbdev); +} +EXPORT_SYMBOL(adf_fbdev_pan_display); + +/** + * adf_fbdev_mmap - default implementation of fbdev mmap op + */ +int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct adf_fbdev *fbdev = info->par; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return dma_buf_mmap(fbdev->dma_buf, vma, 0); +} +EXPORT_SYMBOL(adf_fbdev_mmap); + +/** + * adf_fbdev_init - initialize helper to wrap ADF device in fbdev API + * + * @fbdev: the fbdev helper + * @interface: the ADF interface that will display the framebuffer + * @eng: the ADF overlay engine that will scan out the framebuffer + * @xres_virtual: the virtual width of the framebuffer + * @yres_virtual: the virtual height of the framebuffer + * @format: the format of the framebuffer + * @fbops: the device's fbdev ops + * @fmt: formatting for the framebuffer identification string + * @...: variable arguments + * + * @format must be a standard, non-indexed RGB format, i.e., + * adf_format_is_rgb(@format) && @format != @DRM_FORMAT_C8. + * + * Returns 0 on success or -errno on failure. + */ +int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface, + struct adf_overlay_engine *eng, + u16 xres_virtual, u16 yres_virtual, u32 format, + struct fb_ops *fbops, const char *fmt, ...) +{ + struct adf_device *parent = adf_interface_parent(interface); + struct device *dev = &parent->base.dev; + u16 width_mm, height_mm; + va_list args; + int ret; + + if (!adf_format_is_rgb(format) || + format == DRM_FORMAT_C8) { + dev_err(dev, "fbdev helper does not support format %u\n", + format); + return -EINVAL; + } + + memset(fbdev, 0, sizeof(*fbdev)); + fbdev->intf = interface; + fbdev->eng = eng; + fbdev->info = framebuffer_alloc(0, dev); + if (!fbdev->info) { + dev_err(dev, "allocating framebuffer device failed\n"); + return -ENOMEM; + } + fbdev->default_xres_virtual = xres_virtual; + fbdev->default_yres_virtual = yres_virtual; + fbdev->default_format = format; + + fbdev->info->flags = FBINFO_FLAG_DEFAULT; + ret = adf_interface_get_screen_size(interface, &width_mm, &height_mm); + if (ret < 0) { + width_mm = 0; + height_mm = 0; + } + fbdev->info->var.width = width_mm; + fbdev->info->var.height = height_mm; + fbdev->info->var.activate = FB_ACTIVATE_VBL; + va_start(args, fmt); + vsnprintf(fbdev->info->fix.id, sizeof(fbdev->info->fix.id), fmt, args); + va_end(args); + fbdev->info->fix.type = FB_TYPE_PACKED_PIXELS; + fbdev->info->fix.visual = FB_VISUAL_TRUECOLOR; + fbdev->info->fix.xpanstep = 1; + fbdev->info->fix.ypanstep = 1; + INIT_LIST_HEAD(&fbdev->info->modelist); + fbdev->info->fbops = fbops; + fbdev->info->pseudo_palette = fbdev->pseudo_palette; + fbdev->info->par = fbdev; + + ret = register_framebuffer(fbdev->info); + if (ret < 0) { + dev_err(dev, "registering framebuffer failed: %d\n", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(adf_fbdev_init); + +/** + * adf_fbdev_destroy - destroy helper to wrap ADF device in fbdev API + * + * @fbdev: the fbdev helper + */ +void adf_fbdev_destroy(struct adf_fbdev *fbdev) +{ + unregister_framebuffer(fbdev->info); + if (WARN_ON(fbdev->open)) + adf_fb_destroy(fbdev); + framebuffer_release(fbdev->info); +} +EXPORT_SYMBOL(adf_fbdev_destroy); diff --git a/drivers/video/adf/adf_fops.c b/drivers/video/adf/adf_fops.c new file mode 100644 index 00000000000..abec58ea2ed --- /dev/null +++ b/drivers/video/adf/adf_fops.c @@ -0,0 +1,957 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/bitops.h> +#include <linux/circ_buf.h> +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/uaccess.h> + +#include <video/adf_client.h> +#include <video/adf_format.h> + +#include "sw_sync.h" +#include "sync.h" + +#include "adf.h" +#include "adf_fops.h" +#include "adf_sysfs.h" + +#ifdef CONFIG_COMPAT +#include "adf_fops32.h" +#endif + +static int adf_obj_set_event(struct adf_obj *obj, struct adf_file *file, + struct adf_set_event __user *arg) +{ + struct adf_set_event data; + bool enabled; + unsigned long flags; + int err; + + if (copy_from_user(&data, arg, sizeof(data))) + return -EFAULT; + + err = adf_obj_check_supports_event(obj, data.type); + if (err < 0) + return err; + + spin_lock_irqsave(&obj->file_lock, flags); + if (data.enabled) + enabled = test_and_set_bit(data.type, + file->event_subscriptions); + else + enabled = test_and_clear_bit(data.type, + file->event_subscriptions); + spin_unlock_irqrestore(&obj->file_lock, flags); + + if (data.enabled == enabled) + return -EALREADY; + + if (data.enabled) + adf_event_get(obj, data.type); + else + adf_event_put(obj, data.type); + + return 0; +} + +static int adf_obj_copy_custom_data_to_user(struct adf_obj *obj, + void __user *dst, size_t *dst_size) +{ + void *custom_data; + size_t custom_data_size; + int ret; + + if (!obj->ops || !obj->ops->custom_data) { + dev_dbg(&obj->dev, "%s: no custom_data op\n", __func__); + return 0; + } + + custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL); + if (!custom_data) + return -ENOMEM; + + ret = obj->ops->custom_data(obj, custom_data, &custom_data_size); + if (ret < 0) + goto done; + + if (copy_to_user(dst, custom_data, min(*dst_size, custom_data_size))) { + ret = -EFAULT; + goto done; + } + *dst_size = custom_data_size; + +done: + kfree(custom_data); + return ret; +} + +static int adf_eng_get_data(struct adf_overlay_engine *eng, + struct adf_overlay_engine_data __user *arg) +{ + struct adf_device *dev = adf_overlay_engine_parent(eng); + struct adf_overlay_engine_data data; + size_t n_supported_formats; + u32 *supported_formats = NULL; + int ret = 0; + + if (copy_from_user(&data, arg, sizeof(data))) + return -EFAULT; + + strlcpy(data.name, eng->base.name, sizeof(data.name)); + + if (data.n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) + return -EINVAL; + + n_supported_formats = data.n_supported_formats; + data.n_supported_formats = eng->ops->n_supported_formats; + + if (n_supported_formats) { + supported_formats = kzalloc(n_supported_formats * + sizeof(supported_formats[0]), GFP_KERNEL); + if (!supported_formats) + return -ENOMEM; + } + + memcpy(supported_formats, eng->ops->supported_formats, + sizeof(u32) * min(n_supported_formats, + eng->ops->n_supported_formats)); + + mutex_lock(&dev->client_lock); + ret = adf_obj_copy_custom_data_to_user(&eng->base, arg->custom_data, + &data.custom_data_size); + mutex_unlock(&dev->client_lock); + + if (ret < 0) + goto done; + + if (copy_to_user(arg, &data, sizeof(data))) { + ret = -EFAULT; + goto done; + } + + if (supported_formats && copy_to_user(arg->supported_formats, + supported_formats, + n_supported_formats * sizeof(supported_formats[0]))) + ret = -EFAULT; + +done: + kfree(supported_formats); + return ret; +} + +static int adf_buffer_import(struct adf_device *dev, + struct adf_buffer_config __user *cfg, struct adf_buffer *buf) +{ + struct adf_buffer_config user_buf; + size_t i; + int ret = 0; + + if (copy_from_user(&user_buf, cfg, sizeof(user_buf))) + return -EFAULT; + + memset(buf, 0, sizeof(*buf)); + + if (user_buf.n_planes > ADF_MAX_PLANES) { + dev_err(&dev->base.dev, "invalid plane count %u\n", + user_buf.n_planes); + return -EINVAL; + } + + buf->overlay_engine = idr_find(&dev->overlay_engines, + user_buf.overlay_engine); + if (!buf->overlay_engine) { + dev_err(&dev->base.dev, "invalid overlay engine id %u\n", + user_buf.overlay_engine); + return -ENOENT; + } + + buf->w = user_buf.w; + buf->h = user_buf.h; + buf->format = user_buf.format; + for (i = 0; i < user_buf.n_planes; i++) { + buf->dma_bufs[i] = dma_buf_get(user_buf.fd[i]); + if (IS_ERR(buf->dma_bufs[i])) { + ret = PTR_ERR(buf->dma_bufs[i]); + dev_err(&dev->base.dev, "importing dma_buf fd %llu failed: %d\n", + user_buf.fd[i], ret); + buf->dma_bufs[i] = NULL; + goto done; + } + buf->offset[i] = user_buf.offset[i]; + buf->pitch[i] = user_buf.pitch[i]; + } + buf->n_planes = user_buf.n_planes; + + if (user_buf.acquire_fence >= 0) { + buf->acquire_fence = sync_fence_fdget(user_buf.acquire_fence); + if (!buf->acquire_fence) { + dev_err(&dev->base.dev, "getting fence fd %lld failed\n", + user_buf.acquire_fence); + ret = -EINVAL; + goto done; + } + } + +done: + if (ret < 0) + adf_buffer_cleanup(buf); + return ret; +} + +static int adf_device_post_config(struct adf_device *dev, + struct adf_post_config __user *arg) +{ + struct sync_fence *complete_fence; + int complete_fence_fd; + struct adf_buffer *bufs = NULL; + struct adf_interface **intfs = NULL; + size_t n_intfs, n_bufs, i; + void *custom_data = NULL; + size_t custom_data_size; + int ret = 0; + + complete_fence_fd = get_unused_fd(); + if (complete_fence_fd < 0) + return complete_fence_fd; + + if (get_user(n_intfs, &arg->n_interfaces)) { + ret = -EFAULT; + goto err_get_user; + } + + if (n_intfs > ADF_MAX_INTERFACES) { + ret = -EINVAL; + goto err_get_user; + } + + if (get_user(n_bufs, &arg->n_bufs)) { + ret = -EFAULT; + goto err_get_user; + } + + if (n_bufs > ADF_MAX_BUFFERS) { + ret = -EINVAL; + goto err_get_user; + } + + if (get_user(custom_data_size, &arg->custom_data_size)) { + ret = -EFAULT; + goto err_get_user; + } + + if (custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) { + ret = -EINVAL; + goto err_get_user; + } + + if (n_intfs) { + intfs = kmalloc(sizeof(intfs[0]) * n_intfs, GFP_KERNEL); + if (!intfs) { + ret = -ENOMEM; + goto err_get_user; + } + } + + for (i = 0; i < n_intfs; i++) { + u32 intf_id; + if (get_user(intf_id, &arg->interfaces[i])) { + ret = -EFAULT; + goto err_get_user; + } + + intfs[i] = idr_find(&dev->interfaces, intf_id); + if (!intfs[i]) { + ret = -EINVAL; + goto err_get_user; + } + } + + if (n_bufs) { + bufs = kzalloc(sizeof(bufs[0]) * n_bufs, GFP_KERNEL); + if (!bufs) { + ret = -ENOMEM; + goto err_get_user; + } + } + + for (i = 0; i < n_bufs; i++) { + ret = adf_buffer_import(dev, &arg->bufs[i], &bufs[i]); + if (ret < 0) { + memset(&bufs[i], 0, sizeof(bufs[i])); + goto err_import; + } + } + + if (custom_data_size) { + custom_data = kzalloc(custom_data_size, GFP_KERNEL); + if (!custom_data) { + ret = -ENOMEM; + goto err_import; + } + + if (copy_from_user(custom_data, arg->custom_data, + custom_data_size)) { + ret = -EFAULT; + goto err_import; + } + } + + if (put_user(complete_fence_fd, &arg->complete_fence)) { + ret = -EFAULT; + goto err_import; + } + + complete_fence = adf_device_post_nocopy(dev, intfs, n_intfs, bufs, + n_bufs, custom_data, custom_data_size); + if (IS_ERR(complete_fence)) { + ret = PTR_ERR(complete_fence); + goto err_import; + } + + sync_fence_install(complete_fence, complete_fence_fd); + return 0; + +err_import: + for (i = 0; i < n_bufs; i++) + adf_buffer_cleanup(&bufs[i]); + +err_get_user: + kfree(custom_data); + kfree(bufs); + kfree(intfs); + put_unused_fd(complete_fence_fd); + return ret; +} + +static int adf_intf_simple_post_config(struct adf_interface *intf, + struct adf_simple_post_config __user *arg) +{ + struct adf_device *dev = intf->base.parent; + struct sync_fence *complete_fence; + int complete_fence_fd; + struct adf_buffer buf; + int ret = 0; + + complete_fence_fd = get_unused_fd(); + if (complete_fence_fd < 0) + return complete_fence_fd; + + ret = adf_buffer_import(dev, &arg->buf, &buf); + if (ret < 0) + goto err_import; + + if (put_user(complete_fence_fd, &arg->complete_fence)) { + ret = -EFAULT; + goto err_put_user; + } + + complete_fence = adf_interface_simple_post(intf, &buf); + if (IS_ERR(complete_fence)) { + ret = PTR_ERR(complete_fence); + goto err_put_user; + } + + sync_fence_install(complete_fence, complete_fence_fd); + return 0; + +err_put_user: + adf_buffer_cleanup(&buf); +err_import: + put_unused_fd(complete_fence_fd); + return ret; +} + +static int adf_intf_simple_buffer_alloc(struct adf_interface *intf, + struct adf_simple_buffer_alloc __user *arg) +{ + struct adf_simple_buffer_alloc data; + struct dma_buf *dma_buf; + int ret = 0; + + if (copy_from_user(&data, arg, sizeof(data))) + return -EFAULT; + + data.fd = get_unused_fd_flags(O_CLOEXEC); + if (data.fd < 0) + return data.fd; + + ret = adf_interface_simple_buffer_alloc(intf, data.w, data.h, + data.format, &dma_buf, &data.offset, &data.pitch); + if (ret < 0) + goto err_alloc; + + if (copy_to_user(arg, &data, sizeof(*arg))) { + ret = -EFAULT; + goto err_copy; + } + + fd_install(data.fd, dma_buf->file); + return 0; + +err_copy: + dma_buf_put(dma_buf); + +err_alloc: + put_unused_fd(data.fd); + return ret; +} + +static int adf_copy_attachment_list_to_user( + struct adf_attachment_config __user *to, size_t n_to, + struct adf_attachment *from, size_t n_from) +{ + struct adf_attachment_config *temp; + size_t n = min(n_to, n_from); + size_t i; + int ret = 0; + + if (!n) + return 0; + + temp = kzalloc(n * sizeof(temp[0]), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + for (i = 0; i < n; i++) { + temp[i].interface = from[i].interface->base.id; + temp[i].overlay_engine = from[i].overlay_engine->base.id; + } + + if (copy_to_user(to, temp, n * sizeof(to[0]))) { + ret = -EFAULT; + goto done; + } + +done: + kfree(temp); + return ret; +} + +static int adf_device_get_data(struct adf_device *dev, + struct adf_device_data __user *arg) +{ + struct adf_device_data data; + size_t n_attach; + struct adf_attachment *attach = NULL; + size_t n_allowed_attach; + struct adf_attachment *allowed_attach = NULL; + int ret = 0; + + if (copy_from_user(&data, arg, sizeof(data))) + return -EFAULT; + + if (data.n_attachments > ADF_MAX_ATTACHMENTS || + data.n_allowed_attachments > ADF_MAX_ATTACHMENTS) + return -EINVAL; + + strlcpy(data.name, dev->base.name, sizeof(data.name)); + + if (data.n_attachments) { + attach = kzalloc(data.n_attachments * sizeof(attach[0]), + GFP_KERNEL); + if (!attach) + return -ENOMEM; + } + n_attach = adf_device_attachments(dev, attach, data.n_attachments); + + if (data.n_allowed_attachments) { + allowed_attach = kzalloc(data.n_allowed_attachments * + sizeof(allowed_attach[0]), GFP_KERNEL); + if (!allowed_attach) { + ret = -ENOMEM; + goto done; + } + } + n_allowed_attach = adf_device_attachments_allowed(dev, allowed_attach, + data.n_allowed_attachments); + + mutex_lock(&dev->client_lock); + ret = adf_obj_copy_custom_data_to_user(&dev->base, arg->custom_data, + &data.custom_data_size); + mutex_unlock(&dev->client_lock); + + if (ret < 0) + goto done; + + ret = adf_copy_attachment_list_to_user(arg->attachments, + data.n_attachments, attach, n_attach); + if (ret < 0) + goto done; + + ret = adf_copy_attachment_list_to_user(arg->allowed_attachments, + data.n_allowed_attachments, allowed_attach, + n_allowed_attach); + if (ret < 0) + goto done; + + data.n_attachments = n_attach; + data.n_allowed_attachments = n_allowed_attach; + + if (copy_to_user(arg, &data, sizeof(data))) + ret = -EFAULT; + +done: + kfree(allowed_attach); + kfree(attach); + return ret; +} + +static int adf_device_handle_attachment(struct adf_device *dev, + struct adf_attachment_config __user *arg, bool attach) +{ + struct adf_attachment_config data; + struct adf_overlay_engine *eng; + struct adf_interface *intf; + + if (copy_from_user(&data, arg, sizeof(data))) + return -EFAULT; + + eng = idr_find(&dev->overlay_engines, data.overlay_engine); + if (!eng) { + dev_err(&dev->base.dev, "invalid overlay engine id %u\n", + data.overlay_engine); + return -EINVAL; + } + + intf = idr_find(&dev->interfaces, data.interface); + if (!intf) { + dev_err(&dev->base.dev, "invalid interface id %u\n", + data.interface); + return -EINVAL; + } + + if (attach) + return adf_device_attach(dev, eng, intf); + else + return adf_device_detach(dev, eng, intf); +} + +static int adf_intf_set_mode(struct adf_interface *intf, + struct drm_mode_modeinfo __user *arg) +{ + struct drm_mode_modeinfo mode; + + if (copy_from_user(&mode, arg, sizeof(mode))) + return -EFAULT; + + return adf_interface_set_mode(intf, &mode); +} + +static int adf_intf_get_data(struct adf_interface *intf, + struct adf_interface_data __user *arg) +{ + struct adf_device *dev = adf_interface_parent(intf); + struct adf_interface_data data; + struct drm_mode_modeinfo *modelist; + size_t modelist_size; + int err; + int ret = 0; + unsigned long flags; + + if (copy_from_user(&data, arg, sizeof(data))) + return -EFAULT; + + strlcpy(data.name, intf->base.name, sizeof(data.name)); + + data.type = intf->type; + data.id = intf->idx; + data.flags = intf->flags; + + err = adf_interface_get_screen_size(intf, &data.width_mm, + &data.height_mm); + if (err < 0) { + data.width_mm = 0; + data.height_mm = 0; + } + + modelist = kmalloc(sizeof(modelist[0]) * ADF_MAX_MODES, GFP_KERNEL); + if (!modelist) + return -ENOMEM; + + mutex_lock(&dev->client_lock); + read_lock_irqsave(&intf->hotplug_modelist_lock, flags); + data.hotplug_detect = intf->hotplug_detect; + modelist_size = min(data.n_available_modes, intf->n_modes) * + sizeof(intf->modelist[0]); + memcpy(modelist, intf->modelist, modelist_size); + data.n_available_modes = intf->n_modes; + read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags); + + if (copy_to_user(arg->available_modes, modelist, modelist_size)) { + ret = -EFAULT; + goto done; + } + + data.dpms_state = intf->dpms_state; + memcpy(&data.current_mode, &intf->current_mode, + sizeof(intf->current_mode)); + + ret = adf_obj_copy_custom_data_to_user(&intf->base, arg->custom_data, + &data.custom_data_size); +done: + mutex_unlock(&dev->client_lock); + kfree(modelist); + + if (ret < 0) + return ret; + + if (copy_to_user(arg, &data, sizeof(data))) + ret = -EFAULT; + + return ret; +} + +static inline long adf_obj_custom_ioctl(struct adf_obj *obj, unsigned int cmd, + unsigned long arg) +{ + if (obj->ops && obj->ops->ioctl) + return obj->ops->ioctl(obj, cmd, arg); + return -ENOTTY; +} + +static long adf_overlay_engine_ioctl(struct adf_overlay_engine *eng, + struct adf_file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case ADF_SET_EVENT: + return adf_obj_set_event(&eng->base, file, + (struct adf_set_event __user *)arg); + + case ADF_GET_OVERLAY_ENGINE_DATA: + return adf_eng_get_data(eng, + (struct adf_overlay_engine_data __user *)arg); + + case ADF_BLANK: + case ADF_POST_CONFIG: + case ADF_SET_MODE: + case ADF_GET_DEVICE_DATA: + case ADF_GET_INTERFACE_DATA: + case ADF_SIMPLE_POST_CONFIG: + case ADF_SIMPLE_BUFFER_ALLOC: + case ADF_ATTACH: + case ADF_DETACH: + return -EINVAL; + + default: + return adf_obj_custom_ioctl(&eng->base, cmd, arg); + } +} + +static long adf_interface_ioctl(struct adf_interface *intf, + struct adf_file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case ADF_SET_EVENT: + return adf_obj_set_event(&intf->base, file, + (struct adf_set_event __user *)arg); + + case ADF_BLANK: + return adf_interface_blank(intf, arg); + + case ADF_SET_MODE: + return adf_intf_set_mode(intf, + (struct drm_mode_modeinfo __user *)arg); + + case ADF_GET_INTERFACE_DATA: + return adf_intf_get_data(intf, + (struct adf_interface_data __user *)arg); + + case ADF_SIMPLE_POST_CONFIG: + return adf_intf_simple_post_config(intf, + (struct adf_simple_post_config __user *)arg); + + case ADF_SIMPLE_BUFFER_ALLOC: + return adf_intf_simple_buffer_alloc(intf, + (struct adf_simple_buffer_alloc __user *)arg); + + case ADF_POST_CONFIG: + case ADF_GET_DEVICE_DATA: + case ADF_GET_OVERLAY_ENGINE_DATA: + case ADF_ATTACH: + case ADF_DETACH: + return -EINVAL; + + default: + return adf_obj_custom_ioctl(&intf->base, cmd, arg); + } +} + +static long adf_device_ioctl(struct adf_device *dev, struct adf_file *file, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case ADF_SET_EVENT: + return adf_obj_set_event(&dev->base, file, + (struct adf_set_event __user *)arg); + + case ADF_POST_CONFIG: + return adf_device_post_config(dev, + (struct adf_post_config __user *)arg); + + case ADF_GET_DEVICE_DATA: + return adf_device_get_data(dev, + (struct adf_device_data __user *)arg); + + case ADF_ATTACH: + return adf_device_handle_attachment(dev, + (struct adf_attachment_config __user *)arg, + true); + + case ADF_DETACH: + return adf_device_handle_attachment(dev, + (struct adf_attachment_config __user *)arg, + false); + + case ADF_BLANK: + case ADF_SET_MODE: + case ADF_GET_INTERFACE_DATA: + case ADF_GET_OVERLAY_ENGINE_DATA: + case ADF_SIMPLE_POST_CONFIG: + case ADF_SIMPLE_BUFFER_ALLOC: + return -EINVAL; + + default: + return adf_obj_custom_ioctl(&dev->base, cmd, arg); + } +} + +static int adf_file_open(struct inode *inode, struct file *file) +{ + struct adf_obj *obj; + struct adf_file *fpriv = NULL; + unsigned long flags; + int ret = 0; + + obj = adf_obj_sysfs_find(iminor(inode)); + if (!obj) + return -ENODEV; + + dev_dbg(&obj->dev, "opening %s\n", dev_name(&obj->dev)); + + if (!try_module_get(obj->parent->ops->owner)) { + dev_err(&obj->dev, "getting owner module failed\n"); + return -ENODEV; + } + + fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); + if (!fpriv) { + ret = -ENOMEM; + goto done; + } + + INIT_LIST_HEAD(&fpriv->head); + fpriv->obj = obj; + init_waitqueue_head(&fpriv->event_wait); + + file->private_data = fpriv; + + if (obj->ops && obj->ops->open) { + ret = obj->ops->open(obj, inode, file); + if (ret < 0) + goto done; + } + + spin_lock_irqsave(&obj->file_lock, flags); + list_add_tail(&fpriv->head, &obj->file_list); + spin_unlock_irqrestore(&obj->file_lock, flags); + +done: + if (ret < 0) { + kfree(fpriv); + module_put(obj->parent->ops->owner); + } + return ret; +} + +static int adf_file_release(struct inode *inode, struct file *file) +{ + struct adf_file *fpriv = file->private_data; + struct adf_obj *obj = fpriv->obj; + enum adf_event_type event_type; + unsigned long flags; + + if (obj->ops && obj->ops->release) + obj->ops->release(obj, inode, file); + + spin_lock_irqsave(&obj->file_lock, flags); + list_del(&fpriv->head); + spin_unlock_irqrestore(&obj->file_lock, flags); + + for_each_set_bit(event_type, fpriv->event_subscriptions, + ADF_EVENT_TYPE_MAX) { + adf_event_put(obj, event_type); + } + + kfree(fpriv); + module_put(obj->parent->ops->owner); + + dev_dbg(&obj->dev, "released %s\n", dev_name(&obj->dev)); + return 0; +} + +long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct adf_file *fpriv = file->private_data; + struct adf_obj *obj = fpriv->obj; + long ret = -EINVAL; + + dev_dbg(&obj->dev, "%s ioctl %u\n", dev_name(&obj->dev), _IOC_NR(cmd)); + + switch (obj->type) { + case ADF_OBJ_OVERLAY_ENGINE: + ret = adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj), + fpriv, cmd, arg); + break; + + case ADF_OBJ_INTERFACE: + ret = adf_interface_ioctl(adf_obj_to_interface(obj), fpriv, cmd, + arg); + break; + + case ADF_OBJ_DEVICE: + ret = adf_device_ioctl(adf_obj_to_device(obj), fpriv, cmd, arg); + break; + } + + return ret; +} + +static inline bool adf_file_event_available(struct adf_file *fpriv) +{ + int head = fpriv->event_head; + int tail = fpriv->event_tail; + return CIRC_CNT(head, tail, sizeof(fpriv->event_buf)) != 0; +} + +void adf_file_queue_event(struct adf_file *fpriv, struct adf_event *event) +{ + int head = fpriv->event_head; + int tail = fpriv->event_tail; + size_t space = CIRC_SPACE(head, tail, sizeof(fpriv->event_buf)); + size_t space_to_end = + CIRC_SPACE_TO_END(head, tail, sizeof(fpriv->event_buf)); + + if (space < event->length) { + dev_dbg(&fpriv->obj->dev, + "insufficient buffer space for event %u\n", + event->type); + return; + } + + if (space_to_end >= event->length) { + memcpy(fpriv->event_buf + head, event, event->length); + } else { + memcpy(fpriv->event_buf + head, event, space_to_end); + memcpy(fpriv->event_buf, (u8 *)event + space_to_end, + event->length - space_to_end); + } + + smp_wmb(); + fpriv->event_head = (fpriv->event_head + event->length) & + (sizeof(fpriv->event_buf) - 1); + wake_up_interruptible_all(&fpriv->event_wait); +} + +static ssize_t adf_file_copy_to_user(struct adf_file *fpriv, + char __user *buffer, size_t buffer_size) +{ + int head, tail; + u8 *event_buf; + size_t cnt, cnt_to_end, copy_size = 0; + ssize_t ret = 0; + unsigned long flags; + + event_buf = kmalloc(min(buffer_size, sizeof(fpriv->event_buf)), + GFP_KERNEL); + if (!event_buf) + return -ENOMEM; + + spin_lock_irqsave(&fpriv->obj->file_lock, flags); + + if (!adf_file_event_available(fpriv)) + goto out; + + head = fpriv->event_head; + tail = fpriv->event_tail; + + cnt = CIRC_CNT(head, tail, sizeof(fpriv->event_buf)); + cnt_to_end = CIRC_CNT_TO_END(head, tail, sizeof(fpriv->event_buf)); + copy_size = min(buffer_size, cnt); + + if (cnt_to_end >= copy_size) { + memcpy(event_buf, fpriv->event_buf + tail, copy_size); + } else { + memcpy(event_buf, fpriv->event_buf + tail, cnt_to_end); + memcpy(event_buf + cnt_to_end, fpriv->event_buf, + copy_size - cnt_to_end); + } + + fpriv->event_tail = (fpriv->event_tail + copy_size) & + (sizeof(fpriv->event_buf) - 1); + +out: + spin_unlock_irqrestore(&fpriv->obj->file_lock, flags); + if (copy_size) { + if (copy_to_user(buffer, event_buf, copy_size)) + ret = -EFAULT; + else + ret = copy_size; + } + kfree(event_buf); + return ret; +} + +ssize_t adf_file_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset) +{ + struct adf_file *fpriv = filp->private_data; + int err; + + err = wait_event_interruptible(fpriv->event_wait, + adf_file_event_available(fpriv)); + if (err < 0) + return err; + + return adf_file_copy_to_user(fpriv, buffer, count); +} + +unsigned int adf_file_poll(struct file *filp, struct poll_table_struct *wait) +{ + struct adf_file *fpriv = filp->private_data; + unsigned int mask = 0; + + poll_wait(filp, &fpriv->event_wait, wait); + + if (adf_file_event_available(fpriv)) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +const struct file_operations adf_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = adf_file_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = adf_file_compat_ioctl, +#endif + .open = adf_file_open, + .release = adf_file_release, + .llseek = default_llseek, + .read = adf_file_read, + .poll = adf_file_poll, +}; diff --git a/drivers/video/adf/adf_fops.h b/drivers/video/adf/adf_fops.h new file mode 100644 index 00000000000..90a3a74796d --- /dev/null +++ b/drivers/video/adf/adf_fops.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __VIDEO_ADF_ADF_FOPS_H +#define __VIDEO_ADF_ADF_FOPS_H + +#include <linux/bitmap.h> +#include <linux/fs.h> + +extern const struct file_operations adf_fops; + +struct adf_file { + struct list_head head; + struct adf_obj *obj; + + DECLARE_BITMAP(event_subscriptions, ADF_EVENT_TYPE_MAX); + u8 event_buf[4096]; + int event_head; + int event_tail; + wait_queue_head_t event_wait; +}; + +void adf_file_queue_event(struct adf_file *file, struct adf_event *event); +long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + +#endif /* __VIDEO_ADF_ADF_FOPS_H */ diff --git a/drivers/video/adf/adf_fops32.c b/drivers/video/adf/adf_fops32.c new file mode 100644 index 00000000000..d299a816149 --- /dev/null +++ b/drivers/video/adf/adf_fops32.c @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/uaccess.h> +#include <video/adf.h> + +#include "adf_fops.h" +#include "adf_fops32.h" + +long adf_compat_post_config(struct file *file, + struct adf_post_config32 __user *arg) +{ + struct adf_post_config32 cfg32; + struct adf_post_config __user *cfg; + int ret; + + if (copy_from_user(&cfg32, arg, sizeof(cfg32))) + return -EFAULT; + + cfg = compat_alloc_user_space(sizeof(*cfg)); + if (!access_ok(VERIFY_WRITE, cfg, sizeof(*cfg))) + return -EFAULT; + + if (put_user(cfg32.n_interfaces, &cfg->n_interfaces) || + put_user(compat_ptr(cfg32.interfaces), + &cfg->interfaces) || + put_user(cfg32.n_bufs, &cfg->n_bufs) || + put_user(compat_ptr(cfg32.bufs), &cfg->bufs) || + put_user(cfg32.custom_data_size, + &cfg->custom_data_size) || + put_user(compat_ptr(cfg32.custom_data), + &cfg->custom_data)) + return -EFAULT; + + ret = adf_file_ioctl(file, ADF_POST_CONFIG, (unsigned long)cfg); + if (ret < 0) + return ret; + + if (copy_in_user(&arg->complete_fence, &cfg->complete_fence, + sizeof(cfg->complete_fence))) + return -EFAULT; + + return 0; +} + +long adf_compat_get_device_data(struct file *file, + struct adf_device_data32 __user *arg) +{ + struct adf_device_data32 data32; + struct adf_device_data __user *data; + int ret; + + if (copy_from_user(&data32, arg, sizeof(data32))) + return -EFAULT; + + data = compat_alloc_user_space(sizeof(*data)); + if (!access_ok(VERIFY_WRITE, data, sizeof(*data))) + return -EFAULT; + + if (put_user(data32.n_attachments, &data->n_attachments) || + put_user(compat_ptr(data32.attachments), + &data->attachments) || + put_user(data32.n_allowed_attachments, + &data->n_allowed_attachments) || + put_user(compat_ptr(data32.allowed_attachments), + &data->allowed_attachments) || + put_user(data32.custom_data_size, + &data->custom_data_size) || + put_user(compat_ptr(data32.custom_data), + &data->custom_data)) + return -EFAULT; + + ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA, (unsigned long)data); + if (ret < 0) + return ret; + + if (copy_in_user(arg->name, data->name, sizeof(arg->name)) || + copy_in_user(&arg->n_attachments, &data->n_attachments, + sizeof(arg->n_attachments)) || + copy_in_user(&arg->n_allowed_attachments, + &data->n_allowed_attachments, + sizeof(arg->n_allowed_attachments)) || + copy_in_user(&arg->custom_data_size, + &data->custom_data_size, + sizeof(arg->custom_data_size))) + return -EFAULT; + + return 0; +} + +long adf_compat_get_interface_data(struct file *file, + struct adf_interface_data32 __user *arg) +{ + struct adf_interface_data32 data32; + struct adf_interface_data __user *data; + int ret; + + if (copy_from_user(&data32, arg, sizeof(data32))) + return -EFAULT; + + data = compat_alloc_user_space(sizeof(*data)); + if (!access_ok(VERIFY_WRITE, data, sizeof(*data))) + return -EFAULT; + + if (put_user(data32.n_available_modes, &data->n_available_modes) || + put_user(compat_ptr(data32.available_modes), + &data->available_modes) || + put_user(data32.custom_data_size, + &data->custom_data_size) || + put_user(compat_ptr(data32.custom_data), + &data->custom_data)) + return -EFAULT; + + ret = adf_file_ioctl(file, ADF_GET_INTERFACE_DATA, (unsigned long)data); + if (ret < 0) + return ret; + + if (copy_in_user(arg->name, data->name, sizeof(arg->name)) || + copy_in_user(&arg->type, &data->type, + sizeof(arg->type)) || + copy_in_user(&arg->id, &data->id, sizeof(arg->id)) || + copy_in_user(&arg->flags, &data->flags, + sizeof(arg->flags)) || + copy_in_user(&arg->dpms_state, &data->dpms_state, + sizeof(arg->dpms_state)) || + copy_in_user(&arg->hotplug_detect, + &data->hotplug_detect, + sizeof(arg->hotplug_detect)) || + copy_in_user(&arg->width_mm, &data->width_mm, + sizeof(arg->width_mm)) || + copy_in_user(&arg->height_mm, &data->height_mm, + sizeof(arg->height_mm)) || + copy_in_user(&arg->current_mode, &data->current_mode, + sizeof(arg->current_mode)) || + copy_in_user(&arg->n_available_modes, + &data->n_available_modes, + sizeof(arg->n_available_modes)) || + copy_in_user(&arg->custom_data_size, + &data->custom_data_size, + sizeof(arg->custom_data_size))) + return -EFAULT; + + return 0; +} + +long adf_compat_get_overlay_engine_data(struct file *file, + struct adf_overlay_engine_data32 __user *arg) +{ + struct adf_overlay_engine_data32 data32; + struct adf_overlay_engine_data __user *data; + int ret; + + if (copy_from_user(&data32, arg, sizeof(data32))) + return -EFAULT; + + data = compat_alloc_user_space(sizeof(*data)); + if (!access_ok(VERIFY_WRITE, data, sizeof(*data))) + return -EFAULT; + + if (put_user(data32.n_supported_formats, &data->n_supported_formats) || + put_user(compat_ptr(data32.supported_formats), + &data->supported_formats) || + put_user(data32.custom_data_size, + &data->custom_data_size) || + put_user(compat_ptr(data32.custom_data), + &data->custom_data)) + return -EFAULT; + + ret = adf_file_ioctl(file, ADF_GET_OVERLAY_ENGINE_DATA, + (unsigned long)data); + if (ret < 0) + return ret; + + if (copy_in_user(arg->name, data->name, sizeof(arg->name)) || + copy_in_user(&arg->n_supported_formats, + &data->n_supported_formats, + sizeof(arg->n_supported_formats)) || + copy_in_user(&arg->custom_data_size, + &data->custom_data_size, + sizeof(arg->custom_data_size))) + return -EFAULT; + + return 0; +} + +long adf_file_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case ADF_POST_CONFIG32: + return adf_compat_post_config(file, compat_ptr(arg)); + + case ADF_GET_DEVICE_DATA32: + return adf_compat_get_device_data(file, compat_ptr(arg)); + + case ADF_GET_INTERFACE_DATA32: + return adf_compat_get_interface_data(file, compat_ptr(arg)); + + case ADF_GET_OVERLAY_ENGINE_DATA32: + return adf_compat_get_overlay_engine_data(file, + compat_ptr(arg)); + + default: + return adf_file_ioctl(file, cmd, arg); + } +} diff --git a/drivers/video/adf/adf_fops32.h b/drivers/video/adf/adf_fops32.h new file mode 100644 index 00000000000..53d43f01020 --- /dev/null +++ b/drivers/video/adf/adf_fops32.h @@ -0,0 +1,78 @@ +#ifndef __VIDEO_ADF_ADF_FOPS32_H +#define __VIDEO_ADF_ADF_FOPS32_H + +#include <linux/compat.h> +#include <linux/ioctl.h> + +#include <video/adf.h> + +#define ADF_POST_CONFIG32 \ + _IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config32) +#define ADF_GET_DEVICE_DATA32 \ + _IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data32) +#define ADF_GET_INTERFACE_DATA32 \ + _IOR(ADF_IOCTL_TYPE, 5, struct adf_interface_data32) +#define ADF_GET_OVERLAY_ENGINE_DATA32 \ + _IOR(ADF_IOCTL_TYPE, 6, struct adf_overlay_engine_data32) + +struct adf_post_config32 { + compat_size_t n_interfaces; + compat_uptr_t interfaces; + + compat_size_t n_bufs; + compat_uptr_t bufs; + + compat_size_t custom_data_size; + compat_uptr_t custom_data; + + __s64 complete_fence; +}; + +struct adf_device_data32 { + char name[ADF_NAME_LEN]; + + compat_size_t n_attachments; + compat_uptr_t attachments; + + compat_size_t n_allowed_attachments; + compat_uptr_t allowed_attachments; + + compat_size_t custom_data_size; + compat_uptr_t custom_data; +}; + +struct adf_interface_data32 { + char name[ADF_NAME_LEN]; + + __u8 type; + __u32 id; + /* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */ + __u32 flags; + + __u8 dpms_state; + __u8 hotplug_detect; + __u16 width_mm; + __u16 height_mm; + + struct drm_mode_modeinfo current_mode; + compat_size_t n_available_modes; + compat_uptr_t available_modes; + + compat_size_t custom_data_size; + compat_uptr_t custom_data; +}; + +struct adf_overlay_engine_data32 { + char name[ADF_NAME_LEN]; + + compat_size_t n_supported_formats; + compat_uptr_t supported_formats; + + compat_size_t custom_data_size; + compat_uptr_t custom_data; +}; + +long adf_file_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + +#endif /* __VIDEO_ADF_ADF_FOPS32_H */ diff --git a/drivers/video/adf/adf_format.c b/drivers/video/adf/adf_format.c new file mode 100644 index 00000000000..e3f22c7c85d --- /dev/null +++ b/drivers/video/adf/adf_format.c @@ -0,0 +1,280 @@ +/* + * Copyright (C) 2013 Google, Inc. + * modified from drivers/gpu/drm/drm_crtc.c + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/export.h> +#include <linux/kernel.h> +#include <drm/drm_fourcc.h> +#include <video/adf_format.h> + +bool adf_format_is_standard(u32 format) +{ + switch (format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB332: + case DRM_FORMAT_BGR233: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_AYUV: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return true; + default: + return false; + } +} +EXPORT_SYMBOL(adf_format_is_standard); + +bool adf_format_is_rgb(u32 format) +{ + switch (format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB332: + case DRM_FORMAT_BGR233: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + return true; + + default: + return false; + } +} +EXPORT_SYMBOL(adf_format_is_rgb); + +u8 adf_format_num_planes(u32 format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 3; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(adf_format_num_planes); + +u8 adf_format_bpp(u32 format) +{ + switch (format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB332: + case DRM_FORMAT_BGR233: + return 8; + + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + return 16; + + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + return 24; + + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + return 32; + + default: + pr_debug("%s: unsupported pixel format %u\n", __func__, format); + return 0; + } +} +EXPORT_SYMBOL(adf_format_bpp); + +u8 adf_format_plane_cpp(u32 format, int plane) +{ + if (plane >= adf_format_num_planes(format)) + return 0; + + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + return 2; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + return plane ? 2 : 1; + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 1; + default: + return adf_format_bpp(format) / 8; + } +} +EXPORT_SYMBOL(adf_format_plane_cpp); + +u8 adf_format_horz_chroma_subsampling(u32 format) +{ + switch (format) { + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(adf_format_horz_chroma_subsampling); + +u8 adf_format_vert_chroma_subsampling(u32 format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(adf_format_vert_chroma_subsampling); diff --git a/drivers/video/adf/adf_memblock.c b/drivers/video/adf/adf_memblock.c new file mode 100644 index 00000000000..3c99f27388d --- /dev/null +++ b/drivers/video/adf/adf_memblock.c @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/dma-buf.h> +#include <linux/highmem.h> +#include <linux/memblock.h> +#include <linux/slab.h> + +struct adf_memblock_pdata { + phys_addr_t base; +}; + +static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach, + enum dma_data_direction direction) +{ + struct adf_memblock_pdata *pdata = attach->dmabuf->priv; + unsigned long pfn = PFN_DOWN(pdata->base); + struct page *page = pfn_to_page(pfn); + struct sg_table *table; + int ret; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret < 0) + goto err; + + sg_set_page(table->sgl, page, attach->dmabuf->size, 0); + return table; + +err: + kfree(table); + return ERR_PTR(ret); +} + +static void adf_memblock_unmap(struct dma_buf_attachment *attach, + struct sg_table *table, enum dma_data_direction direction) +{ + sg_free_table(table); +} + +static void __init_memblock adf_memblock_release(struct dma_buf *buf) +{ + struct adf_memblock_pdata *pdata = buf->priv; + int err = memblock_free(pdata->base, buf->size); + + if (err < 0) + pr_warn("%s: freeing memblock failed: %d\n", __func__, err); + kfree(pdata); +} + +static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset, + bool atomic) +{ + struct adf_memblock_pdata *pdata = buf->priv; + unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset; + struct page *page = pfn_to_page(pfn); + + if (atomic) + return kmap_atomic(page); + else + return kmap(page); +} + +static void *adf_memblock_kmap_atomic(struct dma_buf *buf, + unsigned long pgoffset) +{ + return adf_memblock_do_kmap(buf, pgoffset, true); +} + +static void adf_memblock_kunmap_atomic(struct dma_buf *buf, + unsigned long pgoffset, void *vaddr) +{ + kunmap_atomic(vaddr); +} + +static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset) +{ + return adf_memblock_do_kmap(buf, pgoffset, false); +} + +static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset, + void *vaddr) +{ + kunmap(vaddr); +} + +static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma) +{ + struct adf_memblock_pdata *pdata = buf->priv; + + return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base), + vma->vm_end - vma->vm_start, vma->vm_page_prot); +} + +struct dma_buf_ops adf_memblock_ops = { + .map_dma_buf = adf_memblock_map, + .unmap_dma_buf = adf_memblock_unmap, + .release = adf_memblock_release, + .kmap_atomic = adf_memblock_kmap_atomic, + .kunmap_atomic = adf_memblock_kunmap_atomic, + .kmap = adf_memblock_kmap, + .kunmap = adf_memblock_kunmap, + .mmap = adf_memblock_mmap, +}; + +/** + * adf_memblock_export - export a memblock reserved area as a dma-buf + * + * @base: base physical address + * @size: memblock size + * @flags: mode flags for the dma-buf's file + * + * @base and @size must be page-aligned. + * + * Returns a dma-buf on success or ERR_PTR(-errno) on failure. + */ +struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags) +{ + struct adf_memblock_pdata *pdata; + struct dma_buf *buf; + + if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size) + return ERR_PTR(-EINVAL); + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = base; + buf = dma_buf_export(pdata, &adf_memblock_ops, size, flags); + if (IS_ERR(buf)) + kfree(pdata); + + return buf; +} diff --git a/drivers/video/adf/adf_sysfs.c b/drivers/video/adf/adf_sysfs.c new file mode 100644 index 00000000000..8c659c71ffa --- /dev/null +++ b/drivers/video/adf/adf_sysfs.c @@ -0,0 +1,296 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <video/adf_client.h> + +#include "adf.h" +#include "adf_fops.h" +#include "adf_sysfs.h" + +static struct class *adf_class; +static int adf_major; +static DEFINE_IDR(adf_minors); + +#define dev_to_adf_interface(p) \ + adf_obj_to_interface(container_of(p, struct adf_obj, dev)) + +static ssize_t dpms_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adf_interface *intf = dev_to_adf_interface(dev); + return scnprintf(buf, PAGE_SIZE, "%u\n", + adf_interface_dpms_state(intf)); +} + +static ssize_t dpms_state_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct adf_interface *intf = dev_to_adf_interface(dev); + u8 dpms_state; + int err; + + err = kstrtou8(buf, 0, &dpms_state); + if (err < 0) + return err; + + err = adf_interface_blank(intf, dpms_state); + if (err < 0) + return err; + + return count; +} + +static ssize_t current_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adf_interface *intf = dev_to_adf_interface(dev); + struct drm_mode_modeinfo mode; + + adf_interface_current_mode(intf, &mode); + + if (mode.name[0]) { + return scnprintf(buf, PAGE_SIZE, "%s\n", mode.name); + } else { + bool interlaced = !!(mode.flags & DRM_MODE_FLAG_INTERLACE); + return scnprintf(buf, PAGE_SIZE, "%ux%u%s\n", mode.hdisplay, + mode.vdisplay, interlaced ? "i" : ""); + } +} + +static ssize_t type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_interface *intf = dev_to_adf_interface(dev); + return scnprintf(buf, PAGE_SIZE, "%s\n", + adf_interface_type_str(intf)); +} + +static ssize_t vsync_timestamp_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adf_interface *intf = dev_to_adf_interface(dev); + ktime_t timestamp; + unsigned long flags; + + read_lock_irqsave(&intf->vsync_lock, flags); + memcpy(×tamp, &intf->vsync_timestamp, sizeof(timestamp)); + read_unlock_irqrestore(&intf->vsync_lock, flags); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", ktime_to_ns(timestamp)); +} + +static ssize_t hotplug_detect_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adf_interface *intf = dev_to_adf_interface(dev); + return scnprintf(buf, PAGE_SIZE, "%u\n", intf->hotplug_detect); +} + +static struct device_attribute adf_interface_attrs[] = { + __ATTR(dpms_state, S_IRUGO|S_IWUSR, dpms_state_show, dpms_state_store), + __ATTR_RO(current_mode), + __ATTR_RO(hotplug_detect), + __ATTR_RO(type), + __ATTR_RO(vsync_timestamp), +}; + +int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent) +{ + int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL); + if (ret < 0) { + pr_err("%s: allocating adf minor failed: %d\n", __func__, + ret); + return ret; + } + + obj->minor = ret; + obj->dev.parent = parent; + obj->dev.class = adf_class; + obj->dev.devt = MKDEV(adf_major, obj->minor); + + ret = device_register(&obj->dev); + if (ret < 0) { + pr_err("%s: registering adf object failed: %d\n", __func__, + ret); + goto err_device_register; + } + + return 0; + +err_device_register: + idr_remove(&adf_minors, obj->minor); + return ret; +} + +static char *adf_device_devnode(struct device *dev, umode_t *mode, + kuid_t *uid, kgid_t *gid) +{ + struct adf_obj *obj = container_of(dev, struct adf_obj, dev); + return kasprintf(GFP_KERNEL, "adf%d", obj->id); +} + +static char *adf_interface_devnode(struct device *dev, umode_t *mode, + kuid_t *uid, kgid_t *gid) +{ + struct adf_obj *obj = container_of(dev, struct adf_obj, dev); + struct adf_interface *intf = adf_obj_to_interface(obj); + struct adf_device *parent = adf_interface_parent(intf); + return kasprintf(GFP_KERNEL, "adf-interface%d.%d", + parent->base.id, intf->base.id); +} + +static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode, + kuid_t *uid, kgid_t *gid) +{ + struct adf_obj *obj = container_of(dev, struct adf_obj, dev); + struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj); + struct adf_device *parent = adf_overlay_engine_parent(eng); + return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d", + parent->base.id, eng->base.id); +} + +static void adf_noop_release(struct device *dev) +{ +} + +static struct device_type adf_device_type = { + .name = "adf_device", + .devnode = adf_device_devnode, + .release = adf_noop_release, +}; + +static struct device_type adf_interface_type = { + .name = "adf_interface", + .devnode = adf_interface_devnode, + .release = adf_noop_release, +}; + +static struct device_type adf_overlay_engine_type = { + .name = "adf_overlay_engine", + .devnode = adf_overlay_engine_devnode, + .release = adf_noop_release, +}; + +int adf_device_sysfs_init(struct adf_device *dev) +{ + dev->base.dev.type = &adf_device_type; + dev_set_name(&dev->base.dev, "%s", dev->base.name); + return adf_obj_sysfs_init(&dev->base, dev->dev); +} + +int adf_interface_sysfs_init(struct adf_interface *intf) +{ + struct adf_device *parent = adf_interface_parent(intf); + size_t i, j; + int ret; + + intf->base.dev.type = &adf_interface_type; + dev_set_name(&intf->base.dev, "%s-interface%d", parent->base.name, + intf->base.id); + + ret = adf_obj_sysfs_init(&intf->base, &parent->base.dev); + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++) { + ret = device_create_file(&intf->base.dev, + &adf_interface_attrs[i]); + if (ret < 0) { + dev_err(&intf->base.dev, "creating sysfs attribute %s failed: %d\n", + adf_interface_attrs[i].attr.name, ret); + goto err; + } + } + + return 0; + +err: + for (j = 0; j < i; j++) + device_remove_file(&intf->base.dev, &adf_interface_attrs[j]); + return ret; +} + +int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng) +{ + struct adf_device *parent = adf_overlay_engine_parent(eng); + + eng->base.dev.type = &adf_overlay_engine_type; + dev_set_name(&eng->base.dev, "%s-overlay-engine%d", parent->base.name, + eng->base.id); + + return adf_obj_sysfs_init(&eng->base, &parent->base.dev); +} + +struct adf_obj *adf_obj_sysfs_find(int minor) +{ + return idr_find(&adf_minors, minor); +} + +void adf_obj_sysfs_destroy(struct adf_obj *obj) +{ + idr_remove(&adf_minors, obj->minor); + device_unregister(&obj->dev); +} + +void adf_device_sysfs_destroy(struct adf_device *dev) +{ + adf_obj_sysfs_destroy(&dev->base); +} + +void adf_interface_sysfs_destroy(struct adf_interface *intf) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++) + device_remove_file(&intf->base.dev, &adf_interface_attrs[i]); + adf_obj_sysfs_destroy(&intf->base); +} + +void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng) +{ + adf_obj_sysfs_destroy(&eng->base); +} + +int adf_sysfs_init(void) +{ + struct class *class; + int ret; + + class = class_create(THIS_MODULE, "adf"); + if (IS_ERR(class)) { + ret = PTR_ERR(class); + pr_err("%s: creating class failed: %d\n", __func__, ret); + return ret; + } + + ret = register_chrdev(0, "adf", &adf_fops); + if (ret < 0) { + pr_err("%s: registering device failed: %d\n", __func__, ret); + goto err_chrdev; + } + + adf_class = class; + adf_major = ret; + return 0; + +err_chrdev: + class_destroy(adf_class); + return ret; +} + +void adf_sysfs_destroy(void) +{ + idr_destroy(&adf_minors); + class_destroy(adf_class); +} diff --git a/drivers/video/adf/adf_sysfs.h b/drivers/video/adf/adf_sysfs.h new file mode 100644 index 00000000000..0613ac364f8 --- /dev/null +++ b/drivers/video/adf/adf_sysfs.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __VIDEO_ADF_ADF_SYSFS_H +#define __VIDEO_ADF_ADF_SYSFS_H + +struct adf_device; +struct adf_interface; +struct adf_overlay_engine; + +int adf_device_sysfs_init(struct adf_device *dev); +void adf_device_sysfs_destroy(struct adf_device *dev); +int adf_interface_sysfs_init(struct adf_interface *intf); +void adf_interface_sysfs_destroy(struct adf_interface *intf); +int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng); +void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng); +struct adf_obj *adf_obj_sysfs_find(int minor); + +int adf_sysfs_init(void); +void adf_sysfs_destroy(void); + +#endif /* __VIDEO_ADF_ADF_SYSFS_H */ diff --git a/drivers/video/adf/adf_trace.h b/drivers/video/adf/adf_trace.h new file mode 100644 index 00000000000..3cb2a84d728 --- /dev/null +++ b/drivers/video/adf/adf_trace.h @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM adf + +#if !defined(__VIDEO_ADF_ADF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __VIDEO_ADF_ADF_TRACE_H + +#include <linux/tracepoint.h> +#include <video/adf.h> + +TRACE_EVENT(adf_event, + TP_PROTO(struct adf_obj *obj, enum adf_event_type type), + TP_ARGS(obj, type), + + TP_STRUCT__entry( + __string(name, obj->name) + __field(enum adf_event_type, type) + __array(char, type_str, 32) + ), + TP_fast_assign( + __assign_str(name, obj->name); + __entry->type = type; + strlcpy(__entry->type_str, adf_event_type_str(obj, type), + sizeof(__entry->type_str)); + ), + TP_printk("obj=%s type=%u (%s)", + __get_str(name), + __entry->type, + __entry->type_str) +); + +TRACE_EVENT(adf_event_enable, + TP_PROTO(struct adf_obj *obj, enum adf_event_type type), + TP_ARGS(obj, type), + + TP_STRUCT__entry( + __string(name, obj->name) + __field(enum adf_event_type, type) + __array(char, type_str, 32) + ), + TP_fast_assign( + __assign_str(name, obj->name); + __entry->type = type; + strlcpy(__entry->type_str, adf_event_type_str(obj, type), + sizeof(__entry->type_str)); + ), + TP_printk("obj=%s type=%u (%s)", + __get_str(name), + __entry->type, + __entry->type_str) +); + +TRACE_EVENT(adf_event_disable, + TP_PROTO(struct adf_obj *obj, enum adf_event_type type), + TP_ARGS(obj, type), + + TP_STRUCT__entry( + __string(name, obj->name) + __field(enum adf_event_type, type) + __array(char, type_str, 32) + ), + TP_fast_assign( + __assign_str(name, obj->name); + __entry->type = type; + strlcpy(__entry->type_str, adf_event_type_str(obj, type), + sizeof(__entry->type_str)); + ), + TP_printk("obj=%s type=%u (%s)", + __get_str(name), + __entry->type, + __entry->type_str) +); + +#endif /* __VIDEO_ADF_ADF_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE adf_trace +#include <trace/define_trace.h> diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig index c3853c92279..92f8d32b63e 100644 --- a/drivers/video/omap2/displays/Kconfig +++ b/drivers/video/omap2/displays/Kconfig @@ -16,7 +16,13 @@ config PANEL_TFP410 help Driver for TFP410 DPI-to-DVI chip. The driver uses i2c to read EDID information from the monitor. - + +config PANEL_ILI_9342 + tristate "ili9342 display controller" + depends on OMAP2_DSS_DPI && SPI + help + LCD Display controller used on the Olio H1 + config PANEL_LGPHILIPS_LB035Q02 tristate "LG.Philips LB035Q02 LCD Panel" depends on OMAP2_DSS_DPI && SPI @@ -72,4 +78,11 @@ config PANEL_N8X0 depends on BACKLIGHT_CLASS_DEVICE help This is the LCD panel used on Nokia N8x0 + +config PANEL_MINNOW + tristate "Minnow DSI Command Mode Panel" + depends on OMAP2_DSS_DSI + help + Driver for Minnow DSI command mode panels. + endmenu diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile index 58a5176b07b..3fb5d3ac750 100644 --- a/drivers/video/omap2/displays/Makefile +++ b/drivers/video/omap2/displays/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_PANEL_GENERIC_DPI) += panel-generic-dpi.o obj-$(CONFIG_PANEL_TFP410) += panel-tfp410.o +obj-$(CONFIG_PANEL_ILI_9342) += panel-ili9342.o obj-$(CONFIG_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o obj-$(CONFIG_PANEL_NEC_NL8048HL11_01B) += panel-nec-nl8048hl11-01b.o @@ -9,3 +10,4 @@ obj-$(CONFIG_PANEL_PICODLP) += panel-picodlp.o obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o obj-$(CONFIG_PANEL_N8X0) += panel-n8x0.o +obj-$(CONFIG_PANEL_MINNOW) += panel-minnow.o diff --git a/drivers/video/omap2/displays/panel-ili9342.c b/drivers/video/omap2/displays/panel-ili9342.c new file mode 100644 index 00000000000..d7bf8b42164 --- /dev/null +++ b/drivers/video/omap2/displays/panel-ili9342.c @@ -0,0 +1,459 @@ +#define DEBUG +/* + * Driver for ili9342 display driver + * + * Copyright (C) 2014 Olio Devices Inc. + * Author: Evan Wilson <evan@oliodevices.com> + * + * Adapted from panel-generic-dpi.c, panel-nec-nl8048hl11-01b.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/gpio.h> +#include <video/omapdss.h> + +struct panel_config { + struct omap_video_timings timings; + + int power_on_delay; + int power_off_delay; + + /* + * Used to match device to panel configuration + * when use generic panel driver + */ + const char *name; +}; + +/* Panel configurations */ +static struct panel_config ili9342_panels[] = { + /* Olio H1 panel */ + { + { + .x_res = 320, + .y_res = 240, + + .pixel_clock = 5333, + + .hsw = 10, + .hfp = 10, + .hbp = 20, + + .vsw = 2, + .vfp = 4, + .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + }, + .name = "olio_h1_panel", + }, +}; + +struct panel_drv_data { + + struct omap_dss_device *dssdev; + + struct panel_config *panel_config; + + struct mutex lock; +}; + +static int ili9342_panel_power_on(struct omap_dss_device *dssdev) +{ + int r; + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + struct panel_config *panel_config = drv_data->panel_config; + + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) + return 0; + + omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings); + omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines); + + r = omapdss_dpi_display_enable(dssdev); + if (r) + goto err0; + + /* wait couple of vsyncs until enabling the LCD */ + if (panel_config->power_on_delay) + msleep(panel_config->power_on_delay); + + if (dssdev->platform_enable) { + r = dssdev->platform_enable(dssdev); + if (r) + goto err1; + } + + return 0; +err1: + omapdss_dpi_display_disable(dssdev); +err0: + return r; +} + +static void ili9342_panel_power_off(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + struct panel_config *panel_config = drv_data->panel_config; + + if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) + return; + + if (dssdev->platform_disable) + dssdev->platform_disable(dssdev); + + /* wait couple of vsyncs after disabling the LCD */ + if (panel_config->power_off_delay) + msleep(panel_config->power_off_delay); + + omapdss_dpi_display_disable(dssdev); +} + +static int ili9342_panel_probe(struct omap_dss_device *dssdev) +{ + struct panel_config *panel_config = NULL; + struct panel_drv_data *drv_data = NULL; + int i; + + dev_dbg(&dssdev->dev, "probe\n"); + + if (!dssdev || !dssdev->name) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ili9342_panels); i++) { + if (strcmp(dssdev->name, ili9342_panels[i].name) == 0) { + panel_config = &ili9342_panels[i]; + break; + } + } + + if (!panel_config) { + dev_err(&dssdev->dev, "Could not find %s in ili9342 panel configs\n", dssdev->name); + return -EINVAL; + } + + dssdev->panel.timings = panel_config->timings; + + drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); + if (!drv_data) + return -ENOMEM; + + drv_data->dssdev = dssdev; + drv_data->panel_config = panel_config; + + mutex_init(&drv_data->lock); + + dev_set_drvdata(&dssdev->dev, drv_data); + + return 0; +} + +static void __exit ili9342_panel_remove(struct omap_dss_device *dssdev) +{ + dev_dbg(&dssdev->dev, "remove\n"); + + dev_set_drvdata(&dssdev->dev, NULL); +} + +static int ili9342_panel_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + int r; + + mutex_lock(&drv_data->lock); + + r = ili9342_panel_power_on(dssdev); + if (r) + goto err; + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; +err: + mutex_unlock(&drv_data->lock); + + return r; +} + +static void ili9342_panel_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + + mutex_lock(&drv_data->lock); + + ili9342_panel_power_off(dssdev); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; + + mutex_unlock(&drv_data->lock); +} + +static void ili9342_panel_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + + mutex_lock(&drv_data->lock); + + omapdss_dpi_set_timings(dssdev, timings); + + dssdev->panel.timings = *timings; + + mutex_unlock(&drv_data->lock); +} + +static void ili9342_panel_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + + mutex_lock(&drv_data->lock); + + *timings = dssdev->panel.timings; + + mutex_unlock(&drv_data->lock); +} + +static int ili9342_panel_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); + int r; + + mutex_lock(&drv_data->lock); + + r = dpi_check_timings(dssdev, timings); + + mutex_unlock(&drv_data->lock); + + return r; +} + +static struct omap_dss_driver ili9342_driver = { + .probe = ili9342_panel_probe, + .remove = __exit_p(ili9342_panel_remove), + + .enable = ili9342_panel_enable, + .disable = ili9342_panel_disable, + + .set_timings = ili9342_panel_set_timings, + .get_timings = ili9342_panel_get_timings, + .check_timings = ili9342_panel_check_timings, + + .driver = { + .name = "ili9342_panel", + .owner = THIS_MODULE, + }, +}; + +static int ili9342_spi_write(struct spi_device *spi, bool cmd, unsigned char val) { + unsigned short buf; + struct spi_message m; + struct spi_transfer t = { + .tx_buf = &buf, + .len = 2, + .bits_per_word = 9, + }; + int r; + + if(cmd) { + buf = 0; + } else { + buf = 1 << 8; + } + buf |= val; + + dev_dbg(&spi->dev, "SPI sync: %x", buf); + spi_message_init(&m); + spi_message_add_tail(&t, &m); + r = spi_sync(spi, &m); + if(r < 0) { + dev_err(&spi->dev, "SPI sync failed."); + return -EINVAL; + } + return 0; +} + +static int ili9342_write_cmd(struct spi_device *spi, unsigned char val) { + return ili9342_spi_write(spi, 1, val); +} + +static int ili9342_write_data(struct spi_device *spi, unsigned char val) { + return ili9342_spi_write(spi, 0, val); +} + +static inline void ili9342_init_seq(struct spi_device *spi) { + ili9342_write_cmd(spi, 0xC8); + ili9342_write_data(spi, 0xFF); + ili9342_write_data(spi, 0x93); + ili9342_write_data(spi, 0x42); + + ili9342_write_cmd(spi, 0xB0); + ili9342_write_data(spi, 0xE0); + + ili9342_write_cmd(spi, 0xB5); + ili9342_write_data(spi, 0x04); + ili9342_write_data(spi, 0x01); + ili9342_write_data(spi, 0x0a); + ili9342_write_data(spi, 0x14); + + //ili9342_write_cmd(spi, 0xB1); + //ili9342_write_data(spi, 0x00); + //ili9342_write_data(spi, 0x10); + + ili9342_write_cmd(spi, 0x0B); + ili9342_write_data(spi, 0x20); + + ili9342_write_cmd(spi, 0xF6); + ili9342_write_data(spi, 0x01); + ili9342_write_data(spi, 0x00); + ili9342_write_data(spi, 0x06); + + + ili9342_write_cmd(spi, 0xB4); + ili9342_write_data(spi, 0x02); + + ili9342_write_cmd(spi, 0xC0); + ili9342_write_data(spi, 0x16); + ili9342_write_data(spi, 0x11); + + ili9342_write_cmd(spi, 0xC1); + ili9342_write_data(spi, 0x01); + + ili9342_write_cmd(spi, 0xC5); + ili9342_write_data(spi, 0xF0); + + ili9342_write_cmd(spi, 0x36); + ili9342_write_data(spi, 0x08); + + ili9342_write_cmd(spi, 0xB6); + ili9342_write_data(spi, 0x0A); + ili9342_write_data(spi, 0x00); + ili9342_write_data(spi, 0x1D); + ili9342_write_data(spi, 0x04); + + ili9342_write_cmd(spi, 0xE0); + ili9342_write_data(spi, 0x00); //00 + ili9342_write_data(spi, 0x0A); //01 + ili9342_write_data(spi, 0x11); //02 + ili9342_write_data(spi, 0x06); //04 + ili9342_write_data(spi, 0x13); //06 + ili9342_write_data(spi, 0x09); //13 + ili9342_write_data(spi, 0x37); //20 + ili9342_write_data(spi, 0x79); //27 + ili9342_write_data(spi, 0x4A); //43 + ili9342_write_data(spi, 0x09); //50 + ili9342_write_data(spi, 0x0F); //57 + ili9342_write_data(spi, 0x0B); //59 + ili9342_write_data(spi, 0x19); //61 + ili9342_write_data(spi, 0x1D); //62 + ili9342_write_data(spi, 0x0F); //63 + + ili9342_write_cmd(spi, 0xE1); + ili9342_write_data(spi, 0x00); //63 + ili9342_write_data(spi, 0x22); //62 + ili9342_write_data(spi, 0x26); //61 + ili9342_write_data(spi, 0x04); //59 + ili9342_write_data(spi, 0x10); //57 + ili9342_write_data(spi, 0x05); //50 + ili9342_write_data(spi, 0x3B); //43 + ili9342_write_data(spi, 0x34); //26 + ili9342_write_data(spi, 0x49); //20 + ili9342_write_data(spi, 0x06); //13 + ili9342_write_data(spi, 0x0C); //06 + ili9342_write_data(spi, 0x09); //04 + ili9342_write_data(spi, 0x2e); //02 + ili9342_write_data(spi, 0x35); //01 + ili9342_write_data(spi, 0x0F); //00 + + ili9342_write_cmd(spi, 0x35); + ili9342_write_data(spi, 0x00); + + ili9342_write_cmd(spi, 0x3A); + ili9342_write_data(spi, 0x66); + + ili9342_write_cmd(spi, 0x11); + msleep(120); + ili9342_write_cmd(spi, 0x29); +} + +static inline int init_ili9342_spi(struct spi_device *spi) { + struct omap_dss_device *panel = spi->dev.platform_data; + int reset_gpio; + + if(!panel->reset_gpio) { + dev_err(&spi->dev, "platform data requires reset\n"); + return -EINVAL; + } + + reset_gpio = panel->reset_gpio; + + if (!panel) { + dev_err(&spi->dev, "no platform data\n"); + return -EINVAL; + } + + if(gpio_request_one(reset_gpio, GPIOF_OUT_INIT_LOW, "ili9342-reset")) { + dev_err(&spi->dev, "Could not request reset gpio %d", panel->reset_gpio); + return -EINVAL; + } + + if(gpio_export(reset_gpio, 0)) { + dev_err(&spi->dev, "Could not export reset gpio %d", panel->reset_gpio); + return -EINVAL; + } + + gpio_set_value(panel->reset_gpio, 1); + mdelay(1); + gpio_set_value(panel->reset_gpio, 0); + mdelay(50); + gpio_set_value(panel->reset_gpio, 1); + mdelay(120); + + ili9342_init_seq(spi); + + return 0; +} +static int ili9342_spi_probe(struct spi_device *spi) +{ + init_ili9342_spi(spi); + return omap_dss_register_driver(&ili9342_driver); +} + +static int ili9342_spi_remove(struct spi_device *spi) +{ + omap_dss_unregister_driver(&ili9342_driver); + return 0; +} + +static struct spi_driver ili9342_spi_driver = { + .probe = ili9342_spi_probe, + .remove = ili9342_spi_remove, + .driver = { + .name = "ili9342-spi", + .owner = THIS_MODULE, + }, +}; + +module_spi_driver(ili9342_spi_driver); + +MODULE_AUTHOR("Evan Wilson <evan@oliodevice.com"); +MODULE_DESCRIPTION("ili9342 Display Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/omap2/displays/panel-minnow-common.h b/drivers/video/omap2/displays/panel-minnow-common.h new file mode 100644 index 00000000000..1001cc67f46 --- /dev/null +++ b/drivers/video/omap2/displays/panel-minnow-common.h @@ -0,0 +1,335 @@ +/* + * Minnow DSI command mode panel - common initialization data header + * + * Copyright (C) 2013-2014 Motorola Mobility LLC. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _MINNOW_PANEL_COMMON_HEADER_ + +#define INIT_DATA_VERSION (0x072314) /*MM/DD/YY*/ +/* This header file is used to sync Bootloader and Kernel Display Initialize + * Structure/Data, please make sure sync it for both Bootloader/Kernel when + * it changes some settings for Solomon/Orise. Bootloader should pass + * INIT_DATA_VERSION to kernel that make sure settings are same on both side. + */ + +#ifndef u8 +typedef unsigned char u8; +#endif + + +enum minnow_panel_type { + PANEL_INIT = -1, /* Not Initialize */ + PANEL_DUMMY, /* None panel detected */ + OTM3201_1_0, /* Initial Orise Panel 1.0, it's not stable */ + OTM3201_2_0, /* Orise Panel 2.0, only for test purpose */ + OTM3201_2_1, /* Orise Panel 2.1 */ +}; + +/* Panel initialize command type description: + * DCS_WRITE_SYNC, DCS_WRITE: + * standard DCS type command with/without BTA sync + * GENERIC_WRITE_SYNC, GENERIC_WRITE: + * standard Generic type command with/without BTA sync + * BTA_SYNC: + * standard BTA sync command + * WAIT_MS: + * sleep for given milliseconds + * SSD2848_CMD: + * special command for SSD2848 bridge register, it has fixed 6 bytes format, + * the fist 2 bytes is the register address, the last is 32 bits register + * OTM3201_CMD: + * special command for OTM3201 register, it has flexible register length, + * the fist byte is the register address + * SWITCH_TO_PANEL: + * turn on/off bridge retransmission mode by follow 1 byte + * this is a indicator that separate initialize sequence for bridge and panel. + * + * There's different requirement for bridge and panel initialization, + * for bridge, it needs initialize first and it should verify all registers + * only after write all the registers. when the verification is failed, + * it must reset hardware to retry initialize. + * for panel, it needs initialize after bridge, and it should verify each + * register after each write. when the verify is failed, it could retry to + * re-write the failed register. + */ +enum minnow_cmd_type { + DCS_WRITE_SYNC, + GENERIC_WRITE_SYNC, + DCS_WRITE, + GENERIC_WRITE, + BTA_SYNC, + WAIT_MS, + SSD2848_CMD, + OTM3201_CMD, + SWITCH_TO_PANEL, + CMD_TYPE_MAX +}; +/* Special register id to indicate the verification is needed */ +#define CMD_VERIFY_REG 0xFF + +/* Panel initialize command buffer description: + * it uses compact buffer to store all initialize commands, the first + * byte of each command is the command length in byte + */ +static u8 panel_init_ssd2848_320x320[] = { +/*n, type, data_0, data_1 ... data_n-1*/ +1, SWITCH_TO_PANEL, 0, +/* SCM PLL Register[0x0008] + * POSTDIV = 5, MULT = 50, PLLOUT = 26 x 50 / (5+1) = 216.6 MHz + */ +6, SSD2848_CMD, 0x00, 0x08, 0x01, 0xF4, 0x05, 0x32, +/* SCM Clock Control Register[0x000C] + * MTXDIV = 0, MIPITX speed = 216.6 / (0 + 1) = 216.6 Mbps + * MIPITX clock = 216.6 / 2 = 108.3 MHz + * SYSDIV - 11, system clock = 216.6 / 2 / (11 + 1) = 9.0 MHz + */ +6, SSD2848_CMD, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0B, +/* SCM Miscellaneous Control Register[0x0014] + * MTXVPF = 3, MIPITX Video Pixel Format = 24bpp + * MRXLS = 0, MIPIRX Lane Select = 1 lane + * MRXECC = MRXCRC = MRXEOT = MRXEE = 1 + * enable MIPIRX ECC, CRC, EOR, Error Check + */ +6, SSD2848_CMD, 0x00, 0x14, 0x0C, 0x07, 0x80, 0x0F, +/* Sleep out and Waiting for SSD2848 PLL locked */ +1, DCS_WRITE_SYNC, MIPI_DCS_EXIT_SLEEP_MODE, +1, WAIT_MS, 1, +/* MIPIRX Delay Configuration Register[0x0008] */ +6, SSD2848_CMD, 0x10, 0x08, 0x01, 0x20, 0x01, 0x45, +/* VTCM Configuration Register[0x000C] + * TE_SEL = 1, Tear signal from display interface unit + * TEP = 0, vtcm_rgb_te signal Active high + */ +6, SSD2848_CMD, 0x20, 0x0C, 0x00, 0x00, 0x00, 0x02, +/* VTCM Pixel Clock Frequency Ratio Register[0x0010] + * PCLKDEN = 95, PCLKNUM = 31, + * Pixel clock = 9.0 x 31 / 95 = 2.94 MHz + * since SSD2848 uses 48 bits bus, the actual pixel clock is + * depend on current pixel format setting(24 bpp now) + * actual pix_clk = 2.94 * 48 / 24 = 5.88 MHz + */ +6, SSD2848_CMD, 0x20, 0x10, 0x00, 0x5F, 0x00, 0x1F, +/* VTCM Display Horizontal Configuration Register[0x0014] + * Horizontal Total = 392 = 20 + 42 + 320 + 10 + * Horizontal Display Period Start = 52 (10 + 42) + */ +6, SSD2848_CMD, 0x20, 0x14, 0x01, 0x88, 0x00, 0x34, +/* VTCM Vertical Display Configuration Register[0x0018] + * Vertical Total = 334 = 2 + 10 + 320 + 2 + * Vertical Display Period Start = 12 (2 + 10) + */ +6, SSD2848_CMD, 0x20, 0x18, 0x01, 0x4E, 0x00, 0x0C, +/* VTCM Display Size Register[0x0020] + * Display Width = 320 + * Display Height = 320 + */ +6, SSD2848_CMD, 0x20, 0x20, 0x01, 0x40, 0x01, 0x40, +/* VTCM Panel Size Register[0x0024] + * Panel Width = 320 + * Panel Height = 320 + */ +6, SSD2848_CMD, 0x20, 0x24, 0x01, 0x40, 0x01, 0x40, +/* VTCM URAM Control Register[0x0030] */ +6, SSD2848_CMD, 0x20, 0x30, 0x00, 0x00, 0x00, 0x15, +/* VTCM Panel Offset Start Register[0x0034] */ +6, SSD2848_CMD, 0x20, 0x34, 0x00, 0x00, 0x00, 0x00, +/* VTCM Panel Offset End Register[0x0038] */ +6, SSD2848_CMD, 0x20, 0x38, 0x01, 0x3F, 0x01, 0x3F, +/* VTCM Image Size Register[0x003C] */ +6, SSD2848_CMD, 0x20, 0x3C, 0x01, 0x40, 0x01, 0x40, +/* VTCM Qualcomm FBC Control Register[0x00A0] + * DEC_MODE = COM_MODE = 0 Bypass + */ +6, SSD2848_CMD, 0x20, 0xA0, 0x00, 0x00, 0x05, 0x00, +5, DCS_WRITE_SYNC, 0x2A, 0x00, 0x00, 0x01, 0x3F, +5, DCS_WRITE_SYNC, 0x2B, 0x00, 0x00, 0x01, 0x3F, +/* DSITX Control Register[0x0008] + * LPD = 4, LP clock = 216.6 / 8 / (4 + 1) = 5.4 MHz + * EOT = 1, EOT Packet Enable + */ +6, SSD2848_CMD, 0x60, 0x08, 0x00, 0x04, 0x00, 0x0A, +/* DSITX Video Timing Control Register[0x000C] + * VBP = 12, HBP = 52, VSA = 2, HSA = 10 + */ +6, SSD2848_CMD, 0x60, 0x0C, 0x0C, 0x34, 0x02, 0x0A, +/* DSITX Video Timing Control 2 Register[0x0010] + * VACT = 320, VFP = 2, HFP = 20 + */ +6, SSD2848_CMD, 0x60, 0x10, 0x01, 0x40, 0x02, 0x14, +/* DSITX Video Configuration Register[0x0014] + * VM = 10, burst mode + * VEC = 1, Command packet will be sent after video packet + are sent during Vertical blanking period for Non burst + video transfer or Vertical/Horizontal blanking period + for Burst video transfer + */ +6, SSD2848_CMD, 0x60, 0x14, 0x01, 0x00, 0x01, 0x4E, +/* DSITX Delay Adjustment 1 Register[0x0040] + * HPD = 1, HZD = 10, CPD = 1, CZD = 19 + * byte_clk = 1000 / (216.6 / 8) = 36.9 ns + * Ths-prepare-HPD = 36.9 * (1 + 3) = 147.7 ns + * Ths-zero-HZD = 36.9 * (10 + 1.25) = 415.5 ns + * Tclk-prepare-CPD = 36.9 * (1 + 3) = 147.7 ns + * Tclk-zero-CZD = 36.9 * (19 + 1.25) = 747.9 ns + */ +6, SSD2848_CMD, 0x60, 0x40, 0x13, 0x01, 0x0A, 0x01, +/* DSITX Delay Adjustment 2 Register[0x0044] + * CPTD = 10, CPED = 4, HTD = 5, CTD = 5 + * byte_clk = 1000 / (216.6 / 8) = 36.9 ns + * Tclk-post-CPTD = 36.9 * (10 + 2) = 443.2 ns + * Tclk-pre-CPED = 36.9 * 4 = 147.7 ns + * Ths-trail-HTD = 36.9 * (5 - 1.25) = 138.5 ns + * Tclk-trail-CTD = 36.9 * (5 + 1) - 4 = 217.6 ns + */ +6, SSD2848_CMD, 0x60, 0x44, 0x05, 0x05, 0x04, 0x0A, +/* DSITX DSIn Video Register[0x0080+(n*32) + 0x004] + * HACT = 320 + */ +6, SSD2848_CMD, 0x60, 0x84, 0x00, 0x00, 0x01, 0x40, +1, SSD2848_CMD, CMD_VERIFY_REG, /* command for verify ssd2848 registers */ +1, SWITCH_TO_PANEL, 1, +/* Orise Engineering Mode Enable (RF0h) + * Enable Engineering Mode + */ +3, OTM3201_CMD, 0xF0, 0x54, 0x47, +/* Register Read Mode Enable (RA0h) + * Enable to write + */ +2, OTM3201_CMD, 0xA0, 0x00, +/* Mux1 to 9 CKH timing structure register (RBDH) */ +4, OTM3201_CMD, 0xBD, 0x00, 0x11, 0x31, +/* Landscape MIPI Video Mode One Line Clock Number (RE9h) */ +2, OTM3201_CMD, 0xE9, 0x46, +/* Display Inversion Control (RB1h) */ +2, OTM3201_CMD, 0xB1, 0x12, +/* ??? undefined */ +2, OTM3201_CMD, 0xE2, 0xF0, +/* Display Waveform Cycle setting (RBAh) */ +5, OTM3201_CMD, 0xBA, 0x06, 0x15, 0x2B, 0x01, +/* RGB Interface Blanking Porch setting (RB3h) + * VFP = 2, VBP = 10, HFP = 20, HBP = 42, VSW = 2, HSW = 10 + */ +6, OTM3201_CMD, 0xB3, 0x02, 0x0A, 0x14, 0x2A, 0x2A, +/* Gamma Voltage adjust Control (RB5h) */ +5, OTM3201_CMD, 0xB5, 0x78, 0x78, 0x76, 0xF6, +/* Gamma (‘+’polarity) Correction Characteristics Setting R gamma (RC0h) */ +18, OTM3201_CMD, 0xC0, 0x00, 0x06, 0x17, 0x11, 0x16, 0x25, 0x0E, + 0x0C, 0x0C, 0x0E, 0x0C, 0x2F, 0x07, 0x0A, 0x3F, 0x3F, 0x3F, +/* Gamma (‘-’polarity) Correction Characteristics Setting R gamma (RC1h) */ +18, OTM3201_CMD, 0xC1, 0x00, 0x06, 0x17, 0x11, 0x16, 0x25, 0x0E, + 0x0C, 0x0C, 0x0E, 0x0C, 0x2F, 0x07, 0x0A, 0x3F, 0x3F, 0x3F, +/* Gamma (‘+’polarity) Correction Characteristics Setting G gamma (RC2h) */ +18, OTM3201_CMD, 0xC2, 0x00, 0x06, 0x17, 0x11, 0x16, 0x25, 0x0E, + 0x0C, 0x0C, 0x0E, 0x0C, 0x2F, 0x07, 0x0A, 0x3F, 0x3F, 0x3F, +/* Gamma (‘-’polarity) Correction Characteristics Setting G gamma (RC3h) */ +18, OTM3201_CMD, 0xC3, 0x00, 0x06, 0x17, 0x11, 0x16, 0x25, 0x0E, + 0x0C, 0x0C, 0x0E, 0x0C, 0x2F, 0x07, 0x0A, 0x3F, 0x3F, 0x3F, +/* Gamma (‘+’polarity) Correction Characteristics Setting B gamma (RC4h) */ +18, OTM3201_CMD, 0xC4, 0x00, 0x06, 0x17, 0x11, 0x16, 0x25, 0x0E, + 0x0C, 0x0C, 0x0E, 0x0C, 0x2F, 0x07, 0x0A, 0x3F, 0x3F, 0x3F, +/* Gamma (‘-’polarity) Correction Characteristics Setting B gamma (RC5h) */ +18, OTM3201_CMD, 0xC5, 0x00, 0x06, 0x17, 0x11, 0x16, 0x25, 0x0E, + 0x0C, 0x0C, 0x0E, 0x0C, 0x2F, 0x07, 0x0A, 0x3F, 0x3F, 0x3F, +1, OTM3201_CMD, CMD_VERIFY_REG, /* command for verify otm3201 registers */ +/* Register Read Mode Enable (RA0h) + * Enable to read, locked for write + */ +2, OTM3201_CMD, 0xA0, 0x80, +/* Orise Engineering Mode Enable (RF0h) + * Disable Engineering Mode, locked for second group register + */ +3, OTM3201_CMD, 0xF0, 0x00, 0x00, +1, DCS_WRITE_SYNC, MIPI_DCS_EXIT_SLEEP_MODE, +1, WAIT_MS, 120, +1, DCS_WRITE_SYNC, MIPI_DCS_SET_DISPLAY_ON, +1, SWITCH_TO_PANEL, 0, +0 +}; + +/* Special settings for OTM3201 PANEL revision 1.0 + * Orise panel 1.0 has known issue that don't meet MIPI timing requirement + * it needs the different timing setting that apply for panel 2.0 or above + */ +static u8 panel_init_ssd2848_320x320_1[] = { +/*n, type, data_0, data_1 ... data_n-1*/ +/* VTCM Pixel Clock Frequency Ratio Register[0x0010] + * PCLKDEN = 247, PCLKNUM = 108, + * Pixel clock = 9.0 x 108 / 247 = 3.93 MHz + * since SSD2848 uses 48 bits bus, the actual pixel clock is + * depend on current pixel format setting(24 bpp now) + * actual pix_clk = 3.93 * 48 / 24 = 7.87 MHz + */ +6, SSD2848_CMD, 0x20, 0x10, 0x00, 0xF7, 0x00, 0x6C, +/* VTCM Display Horizontal Configuration Register[0x0014] + * Horizontal Total = 392 = 20 + 42 + 320 + 10 + * Horizontal Display Period Start = 42 + */ +6, SSD2848_CMD, 0x20, 0x14, 0x01, 0x88, 0x00, 0x2A, +/* VTCM Vertical Display Configuration Register[0x0018] + * Vertical Total = 334 = 2 + 10 + 320 + 2 + * Vertical Display Period Start = 10 + */ +6, SSD2848_CMD, 0x20, 0x18, 0x01, 0x4E, 0x00, 0x0A, +/* DSITX Video Timing Control Register[0x000C] + * VBP = 10, HBP = 42, VSA = 2, HSA = 10 + */ +6, SSD2848_CMD, 0x60, 0x0C, 0x0A, 0x2A, 0x02, 0x0A, +/* DSITX Video Configuration Register[0x0014] + * VM = 00, Non burst mode with sync pulses + * VEC = 1, Command packet will be sent after video packet + are sent during Vertical blanking period for Non burst + video transfer or Vertical/Horizontal blanking period + for Burst video transfer + */ +6, SSD2848_CMD, 0x60, 0x14, 0x01, 0x00, 0x01, 0x40, +/* DSITX Delay Adjustment 2 Register[0x0044] + * CPTD = 22, CPED = 4, HTD = 10, CTD = 10 + * byte_clk = 1000 / (216.6 / 8) = 36.9 ns + * Tclk-post-CPTD = 36.9 * (22 + 2) = 886.4 ns + * Tclk-pre-CPED = 36.9 * 4 = 147.7 ns + * Ths-trail-HTD = 36.9 * (10 - 1.25) = 323.1 ns + * Tclk-trail-CTD = 36.9 * (10 + 1) - 4 = 402.2 ns + */ +6, SSD2848_CMD, 0x60, 0x44, 0x0A, 0x0A, 0x04, 0x16, +0 +}; + +static u8 panel_off_ssd2848_320x320[] = { +/*n, type, data_0, data_1 ... data_n-1*/ +1, SWITCH_TO_PANEL, 1, +1, DCS_WRITE_SYNC, MIPI_DCS_SET_DISPLAY_OFF, +1, DCS_WRITE, MIPI_DCS_ENTER_SLEEP_MODE, +1, SWITCH_TO_PANEL, 0, +1, DCS_WRITE_SYNC, MIPI_DCS_SET_DISPLAY_OFF, +1, WAIT_MS, 50, +1, DCS_WRITE, MIPI_DCS_ENTER_SLEEP_MODE, +1, WAIT_MS, 20, +/* MIPIRX Power Cut Register[0x0028] + * PWC - This bit will enable power cut to the whole chip and only global + * reset can restore the power supply to the chip. + */ +6, GENERIC_WRITE, 0x10, 0x28, 0x00, 0x00, 0x00, 0x01, +1, WAIT_MS, 5, +0 +}; + +/* Special code to process Orise internal register */ +static u8 otm3201_eng_mode[] = { 0xF0, 0x54, 0x47 }; +static u8 otm3201_write_mode[] = { 0xA0, 0x00 }; +static u8 otm3201_read_mode[] = { 0xA0, 0x80 }; + +#define _MINNOW_PANEL_COMMON_HEADER_ +#endif diff --git a/drivers/video/omap2/displays/panel-minnow.c b/drivers/video/omap2/displays/panel-minnow.c new file mode 100644 index 00000000000..8d127ae401f --- /dev/null +++ b/drivers/video/omap2/displays/panel-minnow.c @@ -0,0 +1,3761 @@ +/* + * Minnow DSI command mode panel + * + * Copyright (C) 2013-2014 Motorola Mobility LLC. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +/*#define PANEL_DEBUG*/ +#define PANEL_PERF_TIME + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/jiffies.h> +#include <linux/sched.h> +#include <linux/backlight.h> +#include <linux/fb.h> +#include <linux/interrupt.h> +#include <linux/gpio.h> +#include <linux/of_gpio.h> +#include <linux/workqueue.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/regulator/consumer.h> +#include <linux/clk.h> +#include <linux/wakelock.h> +#include <linux/leds.h> +#include <linux/alarmtimer.h> +#include <linux/m4sensorhub.h> +#include <linux/m4sensorhub/MemMapUserSettings.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> +#include <video/mipi_display.h> +#include <linux/notifier.h> +#include <linux/wakeup_source_notify.h> + +#include "../dss/dss.h" + +#include "panel-minnow-common.h" + +/* DSI Virtual channel. Hardcoded for now. */ +#define TCH 0 + +#define DCS_READ_NUM_ERRORS 0x05 +#define DCS_BRIGHTNESS 0x51 +#define DCS_CTRL_DISPLAY 0x53 +#define DCS_WRITE_CABC 0x55 +#define DCS_READ_CABC 0x56 +#define DCS_GET_ID1 0xda +#define DCS_GET_ID2 0xdb +#define DCS_GET_ID3 0xdc +#define DIM_BACKLIGHT_ALS 5 + +enum minnow_panel_component { + MINNOW_PANEL, + MINNOW_BRIDGE, + MINNOW_COMPONENT_MAX +}; + +enum minnow_panel_id { + MINNOW_PANEL_CM_220X176, + MINNOW_PANEL_CM_220X220, + MINNOW_PANEL_CM_BRIDGE_320X320, + MINNOW_PANEL_MAX +}; + +static u8 panel_init_220x176[] = { +/*n, type, data_0, data_1 ... data_n-1*/ +1, DCS_WRITE_SYNC, MIPI_DCS_EXIT_SLEEP_MODE, +1, WAIT_MS, 5, +3, DCS_WRITE_SYNC, 0xF0, 0x5A, 0x5A, +3, DCS_WRITE_SYNC, 0xF1, 0x5A, 0x5A, +18, DCS_WRITE_SYNC, 0xF2, 0x16, 0xDC, 0x03, 0x28, 0x28, 0x10, 0x00, 0x60, 0xF8, + 0x00, 0x07, 0x02, 0x00, 0x00, 0xDC, 0x28, 0x28, +15, DCS_WRITE_SYNC, 0xF4, 0x0A, 0x00, 0x00, 0x00, 0x77, 0x7F, 0x07, 0x22, 0x2A, + 0x43, 0x07, 0x2A, 0x43, 0x07, +11, DCS_WRITE_SYNC, 0xF5, 0x00, 0x50, 0x28, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, + 0x00, +10, DCS_WRITE_SYNC, 0xF6, 0x07, 0x00, 0x07, 0x00, 0x0B, 0x04, 0x04, 0x04, 0x07, +5, DCS_WRITE_SYNC, 0xF7, 0x00, 0x00, 0x00, 0x00, +3, DCS_WRITE_SYNC, 0xF8, 0x44, 0x08, +2, DCS_WRITE_SYNC, 0xF9, 0x04, +17, DCS_WRITE_SYNC, 0xFA, 0x0F, 0x0F, 0x1E, 0x23, 0x26, 0x2D, 0x21, 0x2B, 0x33, + 0x32, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x00, +17, DCS_WRITE_SYNC, 0xFB, 0x0F, 0x0F, 0x1E, 0x23, 0x26, 0x2D, 0x21, 0x2B, 0x33, + 0x32, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x00, +2, DCS_WRITE_SYNC, 0xF9, 0x02, +17, DCS_WRITE_SYNC, 0xFA, 0x00, 0x00, 0x0A, 0x16, 0x1D, 0x27, 0x1C, 0x30, 0x38, + 0x37, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, +17, DCS_WRITE_SYNC, 0xFB, 0x00, 0x00, 0x0A, 0x16, 0x1D, 0x27, 0x1C, 0x30, 0x38, + 0x37, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, +2, DCS_WRITE_SYNC, 0xF9, 0x01, +17, DCS_WRITE_SYNC, 0xFA, 0x00, 0x00, 0x13, 0x14, 0x19, 0x24, 0x1A, 0x31, 0x39, + 0x38, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, +17, DCS_WRITE_SYNC, 0xFB, 0x00, 0x00, 0x13, 0x14, 0x19, 0x24, 0x1A, 0x31, 0x39, + 0x38, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, +3, DCS_WRITE_SYNC, 0xF0, 0x00, 0x00, +3, DCS_WRITE_SYNC, 0xF1, 0x00, 0x00, +2, DCS_WRITE_SYNC, 0x36, 0xD8, +2, DCS_WRITE_SYNC, 0x3A, 0x06, +0 +}; + +static u8 panel_init_220x220[] = { +/*n, type, data_0, data_1 ... data_n-1*/ +1, DCS_WRITE_SYNC, MIPI_DCS_EXIT_SLEEP_MODE, +1, WAIT_MS, 5, +3, DCS_WRITE_SYNC, 0xF0, 0x5A, 0x5A, +3, DCS_WRITE_SYNC, 0xF1, 0x5A, 0x5A, +18, DCS_WRITE_SYNC, 0xF2, 0x1C, 0xDC, 0x03, 0x28, 0x28, 0x10, 0x00, 0x60, 0xF8, + 0x00, 0x07, 0x02, 0x00, 0x00, 0xDC, 0x28, 0x28, +15, DCS_WRITE_SYNC, 0xF4, 0x0A, 0x00, 0x00, 0x00, 0x77, 0x7F, 0x07, 0x22, 0x2A, + 0x43, 0x07, 0x2A, 0x43, 0x07, +11, DCS_WRITE_SYNC, 0xF5, 0x00, 0x50, 0x28, 0x00, 0x00, 0x09, 0x00, 0x00, 0x01, + 0x01, +10, DCS_WRITE_SYNC, 0xF6, 0x07, 0x00, 0x07, 0x00, 0x0B, 0x04, 0x04, 0x04, 0x07, +5, DCS_WRITE_SYNC, 0xF7, 0x00, 0x00, 0x00, 0x00, +3, DCS_WRITE_SYNC, 0xF8, 0x44, 0x02, +2, DCS_WRITE_SYNC, 0xF9, 0x04, +17, DCS_WRITE_SYNC, 0xFA, 0x1E, 0x1E, 0x0D, 0x1D, 0x21, 0x2C, 0x23, 0x28, 0x2C, + 0x28, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, +17, DCS_WRITE_SYNC, 0xFB, 0x1E, 0x1E, 0x0D, 0x1D, 0x21, 0x2C, 0x23, 0x28, 0x2C, + 0x28, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, +2, DCS_WRITE_SYNC, 0xF9, 0x02, +17, DCS_WRITE_SYNC, 0xFA, 0x19, 0x18, 0x08, 0x0F, 0x18, 0x26, 0x1E, 0x2C, 0x30, + 0x2C, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, +17, DCS_WRITE_SYNC, 0xFB, 0x19, 0x18, 0x08, 0x0F, 0x18, 0x26, 0x1E, 0x2C, 0x30, + 0x2C, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, +2, DCS_WRITE_SYNC, 0xF9, 0x01, +17, DCS_WRITE_SYNC, 0xFA, 0x19, 0x19, 0x09, 0x0D, 0x12, 0x21, 0x1B, 0x2E, 0x31, + 0x2E, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x00, +17, DCS_WRITE_SYNC, 0xFB, 0x19, 0x19, 0x09, 0x0D, 0x12, 0x21, 0x1B, 0x2E, 0x31, + 0x2E, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x00, +3, DCS_WRITE_SYNC, 0xF0, 0x00, 0x00, +3, DCS_WRITE_SYNC, 0xF1, 0x00, 0x00, +2, DCS_WRITE_SYNC, 0x36, 0xD8, +2, DCS_WRITE_SYNC, 0x3A, 0x06, +0 +}; + +static u8 panel_off_common[] = { +/*n, type, data_0, data_1 ... data_n-1*/ +1, DCS_WRITE_SYNC, MIPI_DCS_SET_DISPLAY_OFF, +1, DCS_WRITE, MIPI_DCS_ENTER_SLEEP_MODE, +1, WAIT_MS, 20, +0 +}; + +#define BRIDGE_WIDTH 320 +#define BRIDGE_HEIGHT 320 +#define PANEL_WIDTH BRIDGE_WIDTH +#define PANEL_HEIGHT 290 +#define UNUSED_LINES (BRIDGE_HEIGHT-PANEL_HEIGHT) +#define DRIVER_NAME "minnow-panel" + +struct minnow_panel_clk_range { + int min; + int max; +}; + +enum minnow_panel_active_level { + ACTIVE_LOW = 0, + ACTIVE_HIGH, + ACTIVE_MAX +}; +struct minnow_panel_hw_reset { + enum minnow_panel_active_level active; + int reset_ms; + int wait_ms; +}; + +struct minnow_panel_cmd_buf { + int count; + u8 *cmdbuf; +}; + +struct minnow_panel_attr { + int mode; + int xres; + int yres; + int pixel_clock; + int pixel_format; + int xoffset; + int yoffset; + struct minnow_panel_cmd_buf power_on; + struct minnow_panel_cmd_buf power_off; + struct minnow_panel_clk_range hs; + struct minnow_panel_clk_range lp; + struct minnow_panel_hw_reset panel_reset; + struct minnow_panel_hw_reset bridge_reset; +}; + +#define INIT_CMD_BUF(type, buf) .power_##type = {\ + .count = sizeof(buf), .cmdbuf = (buf) } +static struct minnow_panel_attr panel_attr_table[MINNOW_PANEL_MAX] = { + [MINNOW_PANEL_CM_220X176] = { + .mode = OMAP_DSS_DSI_CMD_MODE, + .xres = 220, + .yres = 176, + .pixel_clock = 4608, + .pixel_format = OMAP_DSS_DSI_FMT_RGB666, + .xoffset = 0x32, + .yoffset = 0, + INIT_CMD_BUF(on, panel_init_220x176), + INIT_CMD_BUF(off, panel_off_common), + .hs = { 100000000, 150000000 }, + .lp = { 7000000, 9000000 }, + .panel_reset = { ACTIVE_LOW, 1, 5 }, + .bridge_reset = { ACTIVE_LOW, 0, 0 }, + }, + [MINNOW_PANEL_CM_220X220] = { + .mode = OMAP_DSS_DSI_CMD_MODE, + .xres = 220, + .yres = 220, + .pixel_clock = 4608, + .pixel_format = OMAP_DSS_DSI_FMT_RGB666, + .xoffset = 0x32, + .yoffset = 0x4, + INIT_CMD_BUF(on, panel_init_220x220), + INIT_CMD_BUF(off, panel_off_common), + .hs = { 100000000, 150000000 }, + .lp = { 7000000, 9000000 }, + .panel_reset = { ACTIVE_LOW, 1, 5 }, + .bridge_reset = { ACTIVE_LOW, 0, 0 }, + }, + [MINNOW_PANEL_CM_BRIDGE_320X320] = { + .mode = OMAP_DSS_DSI_CMD_MODE, + .xres = PANEL_WIDTH, + .yres = PANEL_HEIGHT, + .pixel_clock = DIV_ROUND_UP(PANEL_WIDTH * + PANEL_HEIGHT * 45, 1000), + .pixel_format = OMAP_DSS_DSI_FMT_RGB888, + .xoffset = 0, + .yoffset = 0, + INIT_CMD_BUF(on, panel_init_ssd2848_320x320), + INIT_CMD_BUF(off, panel_off_ssd2848_320x320), + .hs = { 104000000, 150000000 }, + .lp = { 7000000, 9000000 }, + .panel_reset = { ACTIVE_LOW, 5, 10 }, + .bridge_reset = { ACTIVE_LOW, 20, 10 } + }, +}; + +#ifdef PANEL_PERF_TIME +#define GET_ELAPSE_TIME(last) jiffies_to_msecs((unsigned long)jiffies-last) +#endif + +enum display_state { + DISPLAY_DISABLE = SCREEN_STATUS_NORMAL_OFF, + DISPLAY_ENABLE = SCREEN_STATUS_NORMAL_ON, +#ifdef CONFIG_HAS_AMBIENTMODE + DISPLAY_AMBIENT_OFF = SCREEN_STATUS_AMBIENT_OFF, + DISPLAY_AMBIENT_ON = SCREEN_STATUS_AMBIENT_ON, +#endif +}; + +struct minnow_panel_data { + struct mutex lock; /* mutex */ + /* wake_lock for common function, it should be used in same thread */ + struct wake_lock wake_lock; + /* wake_lock for update function, it's used in different thread */ + struct wake_lock update_wake_lock; + + struct omap_dss_device *dssdev; + + /* panel HW configuration from DT or platform data */ + int reset_gpio[MINNOW_COMPONENT_MAX]; + int ext_te_gpio; + int vio_en_gpio; + struct pinctrl *vio_pctrl; + struct pinctrl_state *vio_state_output; + struct pinctrl_state *vio_state_pulldown; + int mem_en_gpio; + struct minnow_panel_hw_reset hw_reset[MINNOW_COMPONENT_MAX]; + struct regulator *regulators[MINNOW_COMPONENT_MAX]; + struct clk *clk_in; + bool clk_in_en; + +#ifdef CONFIG_PANEL_BACKLIGHT + bool use_dsi_backlight; + struct backlight_device *bldev; +#endif + + struct omap_dsi_pin_config pin_config; + struct omap_dss_dsi_config dsi_config; + struct minnow_panel_cmd_buf power_on; + struct minnow_panel_cmd_buf power_off; + u8 *last_init_data; + + int id_panel; + int x_offset; + int y_offset; + int reset_ms; + int release_ms; + + /* runtime variables */ + enum display_state state; + enum display_state m4_state; + bool enabled; + bool interactive; + bool output_enabled; + enum minnow_panel_type panel_type; + + bool te_enabled; + + atomic_t do_update; + int channel; + + struct delayed_work te_timeout_work; + +#ifdef PANEL_DEBUG + unsigned cabc_mode; +#endif + + bool first_enable; + bool skip_first_init; + int panel_retry_count; + int esd_errors; + + struct workqueue_struct *workqueue; + + struct delayed_work esd_work; + unsigned esd_interval; +#ifdef CONFIG_HAS_AMBIENTMODE + struct timespec esd_start_time; +#endif + + bool ulps_enabled; + unsigned ulps_timeout; + struct delayed_work ulps_work; + + int total_update; + int total_error; + int total_esd_reset; +#ifdef PANEL_PERF_TIME + unsigned long time_power_on; + unsigned long time_ulps; + unsigned long time_update; + unsigned long time_update_min; + unsigned long time_update_max; + unsigned long last_power_on; + unsigned long last_ulps; + unsigned long last_update; +#endif + int vsync_events_gpio; + struct sysfs_dirent *vsync_events_sysfs; + bool vsync_events_enabled; + ktime_t vsync_events_timestamp; +#ifdef CONFIG_WAKEUP_SOURCE_NOTIFY + bool early_inited; + enum display_state last_state; + struct notifier_block displayenable_nb; + struct work_struct early_init_work; + struct delayed_work early_init_timeout_work; +#endif /* CONFIG_WAKEUP_SOURCE_NOTIFY */ +#ifdef CONFIG_HAS_AMBIENTMODE + struct completion resume_completion; + struct work_struct dock_work; + struct work_struct ambient_wake_work; + struct alarm ambient_timeout_alarm; + bool smart_ambient; + int ambient_timeout; /* time out in seconds */ + struct work_struct ambient_timeout_work; + bool is_docked; + bool is_gesture_view_on; +#endif +}; + +#define DECLARE_MPD_FROM_CONTAINER(ptr, member) \ + struct minnow_panel_data *mpd = \ + container_of(ptr, struct minnow_panel_data, member) + +/* panel parameter passed from boot-loader */ +static char *def_panel_param; +module_param_named(panel_param, def_panel_param, charp, 0); + +static irqreturn_t minnow_panel_te_isr(int irq, void *data); +static void minnow_panel_te_timeout_work_callback(struct work_struct *work); +static int _minnow_panel_enable_te(struct minnow_panel_data *mpd, bool enable); + +static int minnow_panel_wake_up_locked(struct minnow_panel_data *mpd); +static void minnow_panel_framedone_cb(int err, void *data); +static int minnow_panel_enable_locked(struct minnow_panel_data *mpd); +static void minnow_panel_disable_locked(struct minnow_panel_data *mpd, + bool fast_power_off); +static int minnow_panel_update_locked(struct minnow_panel_data *mpd); + +static void minnow_panel_esd_work(struct work_struct *work); +static void minnow_panel_ulps_work(struct work_struct *work); + +static int minnow_panel_enable_mlocked(struct minnow_panel_data *mpd); +static void minnow_panel_disable_mlocked(struct minnow_panel_data *mpd); +static int minnow_panel_change_state_mlocked(struct minnow_panel_data *mpd, + int state); + +static void minnow_panel_sync_resume_mlocked(struct minnow_panel_data *mpd) +{ +#if defined(CONFIG_HAS_AMBIENTMODE) + /* check if there is already resumed */ + if (completion_done(&mpd->resume_completion)) + return; + /* wait 500ms for resume completed */ + if (wait_for_completion_timeout(&mpd->resume_completion, + msecs_to_jiffies(500))) + return; + WARN(1, "%s: failed sync with resume\n", __func__); +#else + (void)mpd; +#endif +} + +#ifdef CONFIG_HAS_AMBIENTMODE +/* the smart ambient feature enabled when + * 1) support_smart_ambient + * 2) and it's not on dock + */ +#define is_smart_ambient_feature_enabled(mpd) \ + (mpd->smart_ambient && !mpd->is_docked) + +/* the smart ambient timeout enabled when + * 1) support_smart_ambient + * 2) and defined ambient timeout + * 3) and it's not on dock + */ +#define is_smart_ambient_timeout_enabled(mpd) \ + (mpd->smart_ambient && mpd->ambient_timeout && !mpd->is_docked) +#endif + +#ifdef CONFIG_WAKEUP_SOURCE_NOTIFY +static int omapdss_displayenable_notify(struct notifier_block *self, + unsigned long action, void *dev) +{ + DECLARE_MPD_FROM_CONTAINER(self, displayenable_nb); + + /* don't case non-display wakeup event */ + if (GET_WAKEUP_EVENT_TYPE(action) != WAKEUP_DISPLAY) + return NOTIFY_OK; + + dev_info(&mpd->dssdev->dev, "%s, action is %lu", __func__, action); + + switch (action) { + case DISPLAY_WAKE_EVENT_POWERKEY: + case DISPLAY_WAKE_EVENT_TOUCH: + case DISPLAY_WAKE_EVENT_GESTURE: + /* Queue work to early enable the display */ + queue_work(mpd->workqueue, &mpd->early_init_work); + break; + case DISPLAY_WAKE_EVENT_GESTURE_VIEWON: + case DISPLAY_WAKE_EVENT_GESTURE_VIEWOFF: +#ifdef CONFIG_HAS_AMBIENTMODE + mpd->is_gesture_view_on = + action == DISPLAY_WAKE_EVENT_GESTURE_VIEWON; + /* Queue work to enable the smart ambient display mode */ + if (is_smart_ambient_feature_enabled(mpd)) + queue_work(mpd->workqueue, &mpd->ambient_wake_work); +#endif + break; + case DISPLAY_WAKE_EVENT_DOCKON: + case DISPLAY_WAKE_EVENT_DOCKOFF: +#ifdef CONFIG_HAS_AMBIENTMODE + mpd->is_docked = action == DISPLAY_WAKE_EVENT_DOCKON; + /* Queue work to dock the display */ + queue_work(mpd->workqueue, &mpd->dock_work); +#endif + break; + default: + dev_err(&mpd->dssdev->dev, + "%s: ignore unknown action(%lu)!\n", __func__, action); + break; + } + + return NOTIFY_OK; +} + +static void minnow_panel_early_init_func(struct work_struct *work) +{ + DECLARE_MPD_FROM_CONTAINER(work, early_init_work); + int r; + mutex_lock(&mpd->lock); + if (!mpd->enabled) { + /* record last state for later switch back */ + if (!mpd->early_inited) { + if (mpd->state == DISPLAY_AMBIENT_OFF) + mpd->last_state = DISPLAY_AMBIENT_OFF; + else + mpd->last_state = DISPLAY_DISABLE; + } + r = minnow_panel_change_state_mlocked(mpd, DISPLAY_ENABLE); + if (r) { + dev_err(&mpd->dssdev->dev, + "%s: minnow_panel_enable failed: %d\n", + __func__, r); + } else { + /* it will turn off display if it's not enabled + * by android within 500ms or kernel suspend + */ + mpd->early_inited = true; + queue_delayed_work(mpd->workqueue, + &mpd->early_init_timeout_work, + msecs_to_jiffies(500)); + } + } + mutex_unlock(&mpd->lock); +} + +static void minnow_panel_early_init_timeout_func(struct work_struct *work) +{ + DECLARE_MPD_FROM_CONTAINER(work, early_init_timeout_work.work); + mutex_lock(&mpd->lock); + if (mpd->early_inited) { + /* switch back to last state when early init been called */ + minnow_panel_change_state_mlocked(mpd, mpd->last_state); + dev_dbg(&mpd->dssdev->dev, "%s: cancelled previous early" + " initialize works\n", __func__); + } + mutex_unlock(&mpd->lock); +} +#endif /* CONFIG_WAKEUP_SOURCE_NOTIFY */ + +#ifdef CONFIG_HAS_AMBIENTMODE +static void minnow_panel_dock_func(struct work_struct *work) +{ + DECLARE_MPD_FROM_CONTAINER(work, dock_work); + bool update_state = false; + /* to handler DOCK event is for blocking the smart ambient + * timeout when it's on dock, so the only thing is needed + * just update state to DISPLAY_AMBIENT_ON as: + * 1) when ambient timeout (DISPLAY_AMBIENT_OFF), put device + * dock, need wake up it to ambient mode and never timeout + * 2) when it's on ambient mode, leave device from dock, set + * DISPLAY_AMBIENT_ON that will restart ambient timeout + * for all other cases, just ignore this event + */ + mutex_lock(&mpd->lock); + if (mpd->is_docked) { + if (mpd->state == DISPLAY_AMBIENT_OFF) + update_state = true; + } else { + if (mpd->state == DISPLAY_AMBIENT_ON) + update_state = true; + } + if (update_state) + minnow_panel_change_state_mlocked(mpd, DISPLAY_AMBIENT_ON); + mutex_unlock(&mpd->lock); +} + +static void minnow_panel_ambient_wake_func(struct work_struct *work) +{ + DECLARE_MPD_FROM_CONTAINER(work, ambient_wake_work); + mutex_lock(&mpd->lock); + /* we should only care view gesture when it's in ambient mode */ + if ((mpd->state == DISPLAY_AMBIENT_ON) || + (mpd->state == DISPLAY_AMBIENT_OFF)) { + int state = mpd->is_gesture_view_on + ? DISPLAY_AMBIENT_ON : DISPLAY_AMBIENT_OFF; + minnow_panel_change_state_mlocked(mpd, state); + } + mutex_unlock(&mpd->lock); +} + +static enum alarmtimer_restart minnow_panel_ambient_alarm_callback( + struct alarm *alarm, ktime_t now) +{ + DECLARE_MPD_FROM_CONTAINER(alarm, ambient_timeout_alarm); + dev_dbg(&mpd->dssdev->dev, "%s: turn off display\n", __func__); + /* Queue work to turn off the display */ + queue_work(mpd->workqueue, &(mpd->ambient_timeout_work)); + + return ALARMTIMER_NORESTART; +} + +static void minnow_panel_ambient_timeout_func(struct work_struct *work) +{ + DECLARE_MPD_FROM_CONTAINER(work, ambient_timeout_work); + mutex_lock(&mpd->lock); + minnow_panel_change_state_mlocked(mpd, DISPLAY_AMBIENT_OFF); + mutex_unlock(&mpd->lock); +} + +static void minnow_panel_start_ambient_alarm(struct minnow_panel_data *mpd) +{ + alarm_cancel(&mpd->ambient_timeout_alarm); + if (is_smart_ambient_timeout_enabled(mpd)) + alarm_start_relative(&mpd->ambient_timeout_alarm, + ktime_set(mpd->ambient_timeout, 0)); +} + +enum refresh_rate { + REFRESH_RATE_30HZ, + REFRESH_RATE_45HZ, + REFRESH_RATE_60HZ, +}; +#define minnow_panel_set_lowest_fps(mpd) \ + minnow_panel_set_refresh_rate_mlocked(mpd, REFRESH_RATE_30HZ) +#define minnow_panel_set_default_fps(mpd) \ + minnow_panel_set_refresh_rate_mlocked(mpd, REFRESH_RATE_45HZ) +#define minnow_panel_set_dock_fps(mpd) \ + minnow_panel_set_refresh_rate_mlocked(mpd, REFRESH_RATE_60HZ) +static int minnow_panel_set_refresh_rate_mlocked(struct minnow_panel_data *mpd, + enum refresh_rate rate) +{ + static u8 ssd2848_vtcm_pcfrr[][6] = { + [REFRESH_RATE_30HZ] = {0x20, 0x10, 0x00, 0xEF, 0x00, 0x34}, + [REFRESH_RATE_45HZ] = {0x20, 0x10, 0x00, 0x5F, 0x00, 0x1F}, + [REFRESH_RATE_60HZ] = {0x20, 0x10, 0x00, 0xF7, 0x00, 0x6C}, + }; + int r = 0; + + if (mpd->panel_type < OTM3201_2_0) + return r; + + dsi_bus_lock(mpd->dssdev); + r = minnow_panel_wake_up_locked(mpd); + if (!r) + r = dsi_vc_generic_write(mpd->dssdev, mpd->channel, + ssd2848_vtcm_pcfrr[rate], 6); + dsi_bus_unlock(mpd->dssdev); + if (r) + dev_err(&mpd->dssdev->dev, + "Failed to set refresh rate(%d)\n", rate); + + return r; +} +#endif /* CONFIG_HAS_AMBIENTMODE */ + +#ifdef CONFIG_OMAP2_DSS_DEBUGFS +static void minnow_panel_dump_regs(struct seq_file *s) +{ + static struct {char name[8]; int addr; int endreg; } regs[] = { + {"SCM", 0x0000, 0x30}, + {"MIPIRX", 0x1000, 0x30}, + {"VTCM", 0x2000, 0xB4}, + {"VCU", 0x4000, 0x20}, + {"GPIO", 0x5000, 0x04}, + {"MIPITX", 0x6000, 0x54}, + {"TX-DSI0", 0x6080, 0x14}, + }; + struct omap_dss_output *out = omap_dss_get_output(OMAP_DSS_OUTPUT_DSI1); + struct omap_dss_device *dssdev = out->device; + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int i, j, r; + + mutex_lock(&mpd->lock); + if (!mpd->enabled) { + seq_puts(s, "display is disabled!"); + goto exit1; + } + + dsi_bus_lock(dssdev); + r = minnow_panel_wake_up_locked(mpd); + if (r) { + seq_printf(s, "display wake up failed(%d)!\n", r); + goto exit; + } + + if (mpd->dssdev != dssdev) { + seq_puts(s, "dssdev mis-matched!"); + goto exit; + } + + if (dsi_vc_set_max_rx_packet_size(dssdev, mpd->channel, 4)) { + seq_puts(s, "failed set max rx_packet_size 4"); + goto exit; + } + + for (i = 0; i < sizeof(regs)/sizeof(regs[0]); i++) { + seq_printf(s, "%s Registers:\n", regs[i].name); + for (j = 0; j <= regs[i].endreg; j += 4) { + u8 reg[4]; + u16 addr = j + regs[i].addr; + seq_printf(s, " %04X: ", addr); + r = dsi_vc_generic_read_2(dssdev, mpd->channel, + addr>>8, addr&0xFF, reg, 4); + if (r) + seq_printf(s, "read failed ret = %d\n", r); + else + seq_printf(s, "%02X%02X%02X%02X\n", + reg[0], reg[1], reg[2], reg[3]); + } + } + + dsi_vc_set_max_rx_packet_size(dssdev, mpd->channel, 1); +exit: + dsi_bus_unlock(dssdev); +exit1: + mutex_unlock(&mpd->lock); +} +#endif + +static void minnow_panel_delay(int delay_ms) +{ + if (delay_ms > 5) + msleep(delay_ms); + else + usleep_range(1000 * delay_ms, 1000 * delay_ms + 100); +} + +static int panel_ssd2848_set_retransmit(struct minnow_panel_data *mpd, + int enable) +{ + u8 data[2] = {0xFF, enable ? 0x01 : 0x00}; + return dsi_vc_generic_write(mpd->dssdev, mpd->channel, data, 2); +} + +static int panel_ssd2848_read_reg(struct minnow_panel_data *mpd, + u16 addr, u8 *read) +{ + int r; + dsi_vc_set_max_rx_packet_size(mpd->dssdev, mpd->channel, 4); + r = dsi_vc_generic_read_2(mpd->dssdev, mpd->channel, + addr>>8, addr&0xFF, read, 4); + dsi_vc_set_max_rx_packet_size(mpd->dssdev, mpd->channel, 1); + return r; +} + +static int panel_otm3201_read_reg(struct minnow_panel_data *mpd, + u8 addr, u8 *read, u8 len) +{ + int r; + dsi_vc_set_max_rx_packet_size(mpd->dssdev, mpd->channel, len); + r = dsi_vc_dcs_read(mpd->dssdev, mpd->channel, addr, read, len); + dsi_vc_set_max_rx_packet_size(mpd->dssdev, mpd->channel, 1); + return r; +} + +#define WRITE_OTM3201(mpd, cmd) \ + dsi_vc_dcs_write(mpd->dssdev, mpd->channel, cmd, sizeof(cmd)) +static int panel_otm3201_rewrite_reg(struct minnow_panel_data *mpd, + u8 *data, u8 len, u8 *read) +{ + /* retry to write only when it could read back */ + int r, retry; + + for (retry = 3; retry--; ) { + r = WRITE_OTM3201(mpd, otm3201_eng_mode); + if (r) + break; + r = WRITE_OTM3201(mpd, otm3201_write_mode); + if (r) + break; + r = dsi_vc_dcs_write(mpd->dssdev, mpd->channel, data, len); + if (r) + break; + r = WRITE_OTM3201(mpd, otm3201_read_mode); + if (r) + break; + r = panel_otm3201_read_reg(mpd, data[0], read, len-1); + if (r) + break; + /* special register B5h */ + if (data[0] == 0xB5) + read[3] |= data[4]&0x80; + if (!memcmp(read, data+1, len-1)) + break; + } + + return r; +} + +static int minnow_panel_dcs_read_1(struct minnow_panel_data *mpd, + u8 dcs_cmd, u8 *data) +{ + int r; + u8 buf[1]; + + r = dsi_vc_dcs_read(mpd->dssdev, mpd->channel, dcs_cmd, buf, 1); + + if (r < 0) + return r; + + *data = buf[0]; + + return 0; +} + +static int minnow_panel_dcs_write_0(struct minnow_panel_data *mpd, u8 dcs_cmd) +{ + return dsi_vc_dcs_write(mpd->dssdev, mpd->channel, &dcs_cmd, 1); +} + +static int minnow_panel_dcs_write_1(struct minnow_panel_data *mpd, + u8 dcs_cmd, u8 param) +{ + u8 buf[2]; + buf[0] = dcs_cmd; + buf[1] = param; + return dsi_vc_dcs_write(mpd->dssdev, mpd->channel, buf, 2); +} + +static int minnow_panel_get_id(struct minnow_panel_data *mpd, + u8 *id1, u8 *id2, u8 *id3) +{ + int r; + + if (mpd->id_panel == MINNOW_PANEL_CM_BRIDGE_320X320) { + u8 data[4]; + r = panel_ssd2848_read_reg(mpd, 0, data); + if (!r) { + *id1 = data[0]; + *id2 = data[1]; + *id3 = mpd->panel_type; + } + return r; + } + + r = minnow_panel_dcs_read_1(mpd, DCS_GET_ID1, id1); + if (r) + return r; + r = minnow_panel_dcs_read_1(mpd, DCS_GET_ID2, id2); + if (r) + return r; + r = minnow_panel_dcs_read_1(mpd, DCS_GET_ID3, id3); + if (r) + return r; + + + return 0; +} + +static int minnow_panel_set_update_window(struct minnow_panel_data *mpd, + u16 x, u16 y, u16 w, u16 h) +{ + int r; + u16 x1 = x + mpd->x_offset; + u16 x2 = x + mpd->x_offset + w - 1; + u16 y1 = y + mpd->y_offset; + u16 y2 = y + mpd->y_offset + h - 1; + u8 buf[5]; + + buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS; + buf[1] = (x1 >> 8) & 0xff; + buf[2] = (x1 >> 0) & 0xff; + buf[3] = (x2 >> 8) & 0xff; + buf[4] = (x2 >> 0) & 0xff; + + r = dsi_vc_dcs_write_nosync(mpd->dssdev, mpd->channel, + buf, sizeof(buf)); + if (!r) { + buf[0] = MIPI_DCS_SET_PAGE_ADDRESS; + buf[1] = (y1 >> 8) & 0xff; + buf[2] = (y1 >> 0) & 0xff; + buf[3] = (y2 >> 8) & 0xff; + buf[4] = (y2 >> 0) & 0xff; + + r = dsi_vc_dcs_write_nosync(mpd->dssdev, mpd->channel, + buf, sizeof(buf)); + if (!r) + r = dsi_vc_send_bta_sync(mpd->dssdev, mpd->channel); + } + + return r; +} + +/* since SSD2848 frame buffer is 320 x 320, but the actual panel is 320 x 290 + * it needs clear the unused bottom 30 lines for saving power + */ +static int minnow_panel_clear_bottom_line(struct minnow_panel_data *mpd, + unsigned int delay_ms) +{ + int r = 0; +#if UNUSED_LINES + unsigned int last_ms = jiffies_to_msecs(jiffies); + int plen, total; + u8 buf[124]; /* maximum packet size */ + + memset(buf, 0, sizeof(buf)); + buf[0] = MIPI_DCS_WRITE_MEMORY_START; + + r = panel_ssd2848_set_retransmit(mpd, false); + if (r) + return r; + + omapdss_dsi_vc_enable_hs(mpd->dssdev, mpd->channel, true); + + plen = dsi_get_pixel_size(mpd->dssdev->panel.dsi_pix_fmt); + plen = DIV_ROUND_UP(plen, 8); + total = PANEL_WIDTH * UNUSED_LINES * plen; + plen = (sizeof(buf) - 1) / plen * plen; + r = minnow_panel_set_update_window(mpd, 0, PANEL_HEIGHT, + PANEL_WIDTH, UNUSED_LINES); + for (; total && !r; buf[0] = MIPI_DCS_WRITE_MEMORY_CONTINUE) { + if (plen > total) + plen = total; + total -= plen; + r = dsi_vc_dcs_write(mpd->dssdev, mpd->channel, buf, plen+1); + } + + if (!r) + r = minnow_panel_set_update_window(mpd, 0, 0, + mpd->dssdev->panel.timings.x_res, + mpd->dssdev->panel.timings.y_res); + + omapdss_dsi_vc_enable_hs(mpd->dssdev, mpd->channel, false); + + if (r) { + /* waiting for the reset of time */ + last_ms = jiffies_to_msecs(jiffies) - last_ms; + if (last_ms < delay_ms) + minnow_panel_delay(delay_ms - last_ms); + } +#else + minnow_panel_delay(delay_ms); +#endif + return r; +} + +static int minnow_panel_process_cmd(struct minnow_panel_data *mpd, + u8 cmd, u8 *data, int len, bool retrans) +{ + int r = -EINVAL; + + if (retrans) { + r = panel_ssd2848_set_retransmit(mpd, true); + if (r) + return r; + } + + switch (cmd) { + case DCS_WRITE_SYNC: + case OTM3201_CMD: + r = dsi_vc_dcs_write(mpd->dssdev, + mpd->channel, data, len); + break; + case GENERIC_WRITE_SYNC: + case SSD2848_CMD: + r = dsi_vc_generic_write(mpd->dssdev, + mpd->channel, data, len); + break; + case DCS_WRITE: + r = dsi_vc_dcs_write_nosync(mpd->dssdev, + mpd->channel, data, len); + break; + case GENERIC_WRITE: + r = dsi_vc_generic_write_nosync(mpd->dssdev, + mpd->channel, data, len); + break; + case BTA_SYNC: + r = dsi_vc_send_bta_sync(mpd->dssdev, mpd->channel); + break; + case WAIT_MS: + r = (int)(len == 1 ? (u32)(*data) : *(u16 *)data); + /* It needs to clean bottom line for Solomon+Orise, + * and to save time it uses the waiting time(120 ms) of + * Orise's initialization process + */ + if ((r > 100) && + (mpd->id_panel == MINNOW_PANEL_CM_BRIDGE_320X320)) + r = minnow_panel_clear_bottom_line(mpd, r); + else { + minnow_panel_delay(r); + r = 0; + } + break; + default: + r = -EINVAL; + } + + if (retrans && !r) + r = panel_ssd2848_set_retransmit(mpd, false); + + return r; +} + +static int minnow_panel_detect_type(struct minnow_panel_data *mpd) +{ + u8 rev; + int r = panel_otm3201_read_reg(mpd, 0xB0, &rev, sizeof(rev)); + if (!r) { + switch (rev) { + case 0x12: + mpd->panel_type = OTM3201_1_0; + break; + case 0x1A: + mpd->panel_type = OTM3201_2_0; + break; + case 0x1C: + default: + mpd->panel_type = OTM3201_2_1; + break; + } + } + return r; +} + +static void print_reg_mismatch(struct device *dev, u8 *src, u8 *read, int len) +{ + char *pstr, str[100]; + int i; + for (i = 0, pstr = str; i < len; i++, pstr += 3) + sprintf(pstr, " %02X", src[i]); + dev_dbg(dev, " Init:%s\n", str); + for (i = 0, pstr = str; i < len; i++, pstr += 3) + sprintf(pstr, " %02X", read[i]); + dev_dbg(dev, " Read:%s\n", str); +} + +static int minnow_panel_verify_ssd2848(struct minnow_panel_data *mpd, + u8 *cmdbuf) +{ + u8 *data, read[4]; + int r = panel_ssd2848_set_retransmit(mpd, false); + for (data = cmdbuf; *data && !r; data += *data+2) { + u16 addr; + if (data[1] != SSD2848_CMD) + continue; + if (data[2] == CMD_VERIFY_REG) + break; + addr = ((u16)data[2]<<8) | data[3]; + r = panel_ssd2848_read_reg(mpd, addr, read); + if (r) + break; + if (!memcmp(&read, data+4, 4)) + continue; + dev_err(&mpd->dssdev->dev, "Failed verify ssd2848" + " register: %04X\n", addr); + print_reg_mismatch(&mpd->dssdev->dev, data+4, read, 4); + break; + } + + return r; +} + +static bool _minnow_panel_replace_cmdbuf(u8 *cmdbuf, u8 *data) +{ + for (; *cmdbuf; cmdbuf += *cmdbuf+2) { + if ((data[0] != cmdbuf[0]) || (data[1] != cmdbuf[1])) + continue; + if (data[1] == SSD2848_CMD) { + if ((data[2] != cmdbuf[2]) || (data[3] != cmdbuf[3])) + continue; + memcpy(cmdbuf+4, data+4, 4); + return true; + } else if (data[1] == OTM3201_CMD) { + if (data[2] != cmdbuf[2]) + continue; + memcpy(cmdbuf+3, data+3, cmdbuf[0]-1); + return true; + } + } + return false; +} + +static bool minnow_panel_replace_cmdbuf(u8 *cmdbuf, u8 *data) +{ + bool ret = false; + for (; *data; data += *data+2) { + if (_minnow_panel_replace_cmdbuf(cmdbuf, data)) + ret = true; + } + return ret; +} + +static int minnow_panel_verify_otm3201(struct minnow_panel_data *mpd, + u8 *cmdbuf) +{ + u8 *data, addr, read[20]; + int r = panel_ssd2848_set_retransmit(mpd, true); + for (data = cmdbuf; *data && !r; data += *data+2) { + if (data[1] != OTM3201_CMD) + continue; + addr = data[2]; + if (addr == CMD_VERIFY_REG) { + break; + } else if (addr == 0xF0) { + /* resend command to make sure enable engineer mode */ + r = dsi_vc_dcs_write(mpd->dssdev, mpd->channel, + data+2, data[0]); + continue; + } else if (addr == 0xA0) { + /* for initialization, it needs set write mode, + * but for verification, it needs set read mode + */ + r = WRITE_OTM3201(mpd, otm3201_read_mode); + if (!r && (mpd->panel_type == PANEL_INIT)) { + r = minnow_panel_detect_type(mpd); + if (r) + break; + if (mpd->panel_type != OTM3201_1_0) + continue; + /* turn of ESD for panel 1.0 */ + mpd->esd_interval = 0; + /* it needs replace some settings for 1.0 */ + if (minnow_panel_replace_cmdbuf + (cmdbuf, panel_init_ssd2848_320x320_1)) { + dev_info(&mpd->dssdev->dev, + "Force reset for new settings" + " of panel 1.0!\n"); + mpd->panel_retry_count = 0; + mpd->total_error--; + r = -EIO; + break; + } + } + continue; + } + r = panel_otm3201_read_reg(mpd, addr, read, data[0]-1); + if (r) + break; + /* special register B5h */ + if (addr == 0xB5) + read[3] |= data[6]&0x80; + if (!memcmp(&read, data+3, data[0]-1)) + continue; + /* trying to rewrite register */ + r = panel_otm3201_rewrite_reg(mpd, data+2, data[0], read); + if (r) { + print_reg_mismatch(&mpd->dssdev->dev, + data+3, read, data[0]-1); + break; + } + } + if (r) + dev_err(&mpd->dssdev->dev, "Failed verify otm3201" + " register: %02X\n", addr); + else + r = panel_ssd2848_set_retransmit(mpd, false); + + return r; +} + +static int minnow_panel_check_cmdbuf(struct minnow_panel_data *mpd, + u8 *data, int count) +{ + int i, r = 0; + + for (i = count; *data && (i > 0); ) { + if (data[1] >= CMD_TYPE_MAX) + break; + i -= ((u32)*data + 2); + data += (*data + 2); + } + + /* command data shall end with 0 */ + if (*data || (i != 1)) { + dev_err(&mpd->dssdev->dev, "Invalid command data(0x%02x) " + "found at offset %d", *data, count - i); + r = -EINVAL; + } + + return r; +} + +static int minnow_panel_process_cmdbuf(struct minnow_panel_data *mpd, + struct minnow_panel_cmd_buf *cmd_buf, + bool verify) +{ + u8 *data; + int i, r; + bool retrans = false; + + /* be safe to check command data every time before sent to driver */ + r = minnow_panel_check_cmdbuf(mpd, cmd_buf->cmdbuf, cmd_buf->count); + if (r) + return r; + + for (i = 0, data = cmd_buf->cmdbuf; *data; i++, data += *data+2) { + if ((data[1] == SSD2848_CMD) && + (data[0] == 1) && (data[2] == CMD_VERIFY_REG)) { + if (!verify) + continue; + r = minnow_panel_verify_ssd2848(mpd, cmd_buf->cmdbuf); + } else if ((data[1] == OTM3201_CMD) && (data[0] == 1) && + (data[2] == CMD_VERIFY_REG)) { + if (!verify) + continue; + r = minnow_panel_verify_otm3201(mpd, cmd_buf->cmdbuf); + } else if (data[1] == SWITCH_TO_PANEL) { + retrans = !!data[2]; + } else + r = minnow_panel_process_cmd(mpd, data[1], data+2, + data[0], retrans); + if (r) { + dev_err(&mpd->dssdev->dev, "Failed process initialize" + " command[%d] len=%d type=%d ret=%d\n", + i, data[0], data[1], r); + break; + } + } + + return r; +} + +static void minnow_panel_queue_esd_work(struct minnow_panel_data *mpd) +{ + if (!mpd->esd_interval) + return; + queue_delayed_work(mpd->workqueue, &mpd->esd_work, + msecs_to_jiffies(mpd->esd_interval)); +#ifdef CONFIG_HAS_AMBIENTMODE + /* store start time of delayed work */ + read_persistent_clock(&mpd->esd_start_time); +#endif +} + +static void minnow_panel_cancel_esd_work(struct minnow_panel_data *mpd) +{ + cancel_delayed_work(&mpd->esd_work); +} + +static void minnow_panel_queue_ulps_work(struct minnow_panel_data *mpd) +{ + if (!mpd->ulps_timeout) + return; + queue_delayed_work(mpd->workqueue, &mpd->ulps_work, + msecs_to_jiffies(mpd->ulps_timeout)); +} + +static void minnow_panel_cancel_ulps_work(struct minnow_panel_data *mpd) +{ + cancel_delayed_work(&mpd->ulps_work); +} + +static int minnow_panel_dsi_recovery_locked(struct minnow_panel_data *mpd) +{ + struct omap_dss_device *dssdev = mpd->dssdev; + int r; + + /* true/true for fast disable dsi */ + omapdss_dsi_display_disable(mpd->dssdev, true, true); + mpd->ulps_enabled = false; + r = omapdss_dsi_display_enable(dssdev); + if (r) { + dev_err(&dssdev->dev, "DSI recovery failed to enable DSI\n"); + goto _ret_r_; + } + omapdss_dsi_vc_enable_hs(dssdev, mpd->channel, true); + r = _minnow_panel_enable_te(mpd, mpd->te_enabled); + /* for some reason, reset DSI may occur "false control" with bridge + * that will cause first command failed, so work around to try resend + * that check if it's still failed or not + */ + if (r) + r = _minnow_panel_enable_te(mpd, mpd->te_enabled); + if (r) + dev_err(&dssdev->dev, "DSI recovery failed to re-init TE"); +_ret_r_: + return r; +} + +static int minnow_panel_recovery_locked(struct minnow_panel_data *mpd) +{ + int r; + dev_err(&mpd->dssdev->dev, "performing LCD reset\n"); + mpd->total_error++; + mpd->total_esd_reset++; + minnow_panel_disable_locked(mpd, true); + msleep(20); + r = minnow_panel_enable_locked(mpd); + dev_err(&mpd->dssdev->dev, "LCD reset done(%d)\n", r); + return r; +} + +static int minnow_panel_enter_ulps_locked(struct minnow_panel_data *mpd) +{ + int r; + + if (mpd->ulps_enabled) + return 0; + + minnow_panel_cancel_ulps_work(mpd); + + r = _minnow_panel_enable_te(mpd, false); + if (r) { + /* try once to recovery DSI */ + r = minnow_panel_dsi_recovery_locked(mpd); + if (r) + goto err; + r = _minnow_panel_enable_te(mpd, false); + if (r) + goto err; + } + + if (gpio_is_valid(mpd->ext_te_gpio)) + disable_irq(gpio_to_irq(mpd->ext_te_gpio)); + + omapdss_dsi_display_disable(mpd->dssdev, false, true); + + mpd->ulps_enabled = true; + dev_dbg(&mpd->dssdev->dev, "entered ULPS mode\n"); +#ifdef PANEL_PERF_TIME + mpd->last_ulps = jiffies; +#endif + + return 0; + +err: + dev_err(&mpd->dssdev->dev, "enter ULPS failed\n"); + + mpd->ulps_enabled = false; + minnow_panel_queue_ulps_work(mpd); + + return r; +} + +static int minnow_panel_exit_ulps_locked(struct minnow_panel_data *mpd) +{ + struct omap_dss_device *dssdev = mpd->dssdev; + int r; + + if (!mpd->ulps_enabled) + return 0; + + r = omapdss_dsi_display_enable(dssdev); + if (r) { + /* try once to recovery DSI */ + r = minnow_panel_dsi_recovery_locked(mpd); + if (!r) + goto next; + dev_err(&dssdev->dev, "failed to enable DSI\n"); + goto err; + } + + omapdss_dsi_vc_enable_hs(dssdev, mpd->channel, true); + + r = _minnow_panel_enable_te(mpd, mpd->te_enabled); + if (r) { + /* try once to recovery DSI */ + r = minnow_panel_dsi_recovery_locked(mpd); + if (r) + goto err; + } + +next: + if (gpio_is_valid(mpd->ext_te_gpio)) + enable_irq(gpio_to_irq(mpd->ext_te_gpio)); + + minnow_panel_queue_ulps_work(mpd); + + mpd->ulps_enabled = false; + + dev_dbg(&dssdev->dev, "exited ULPS mode\n"); + +#ifdef PANEL_PERF_TIME + mpd->time_ulps += GET_ELAPSE_TIME(mpd->last_ulps); +#endif + return 0; + +err: + dev_err(&dssdev->dev, "failed to exit ULPS\n"); + return r; +} + +static int minnow_panel_wake_up_locked(struct minnow_panel_data *mpd) +{ + if (mpd->ulps_enabled) + return minnow_panel_exit_ulps_locked(mpd); + + minnow_panel_cancel_ulps_work(mpd); + minnow_panel_queue_ulps_work(mpd); + return 0; +} + +#ifdef CONFIG_PANEL_BACKLIGHT +static int minnow_panel_bl_update_status(struct backlight_device *dev) +{ + struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int r; + int level; + + if (dev->props.fb_blank == FB_BLANK_UNBLANK && + dev->props.power == FB_BLANK_UNBLANK) + level = dev->props.brightness; + else + level = 0; + + dev_dbg(&dssdev->dev, "update brightness to %d\n", level); + + mutex_lock(&mpd->lock); + + if (mpd->enabled) { + dsi_bus_lock(dssdev); + + r = minnow_panel_wake_up_locked(mpd); + if (!r) + r = minnow_panel_dcs_write_1(mpd, DCS_BRIGHTNESS, + level); + + dsi_bus_unlock(dssdev); + } else { + r = 0; + } + + mutex_unlock(&mpd->lock); + + return r; +} + +static int minnow_panel_bl_get_intensity(struct backlight_device *dev) +{ + if (dev->props.fb_blank == FB_BLANK_UNBLANK && + dev->props.power == FB_BLANK_UNBLANK) + return dev->props.brightness; + + return 0; +} + +static const struct backlight_ops minnow_panel_bl_ops = { + .get_brightness = minnow_panel_bl_get_intensity, + .update_status = minnow_panel_bl_update_status, +}; +#endif + +static void minnow_panel_get_resolution(struct omap_dss_device *dssdev, + u16 *xres, u16 *yres) +{ + *xres = dssdev->panel.timings.x_res; + *yres = dssdev->panel.timings.y_res; +} + +static ssize_t minnow_panel_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + static struct { u16 addr; const char *name; } dump[] = { + {0x1018, "MIPIRX-Phy Error"}, + {0x102C, "MIPIRX-DSI Error"}, + {0x1030, "MIPIRX-DSI Error Count"}, + {0x608C, "MIPITX-DSI0 Status"}, + }; + u8 read[6]; + int i, r, len = 0; + + mutex_lock(&mpd->lock); + len += snprintf(buf+len, PAGE_SIZE-len, "Updates: %d\n", + mpd->total_update); + len += snprintf(buf+len, PAGE_SIZE-len, "Errors: %d\n", + mpd->total_error); + len += snprintf(buf+len, PAGE_SIZE-len, "ESD RST: %d\n", + mpd->total_esd_reset); + dsi_bus_lock(dssdev); + + if (!mpd->enabled || (mpd->id_panel != MINNOW_PANEL_CM_BRIDGE_320X320)) + goto _ret_; + + r = minnow_panel_wake_up_locked(mpd); + if (r) { + len += snprintf(buf+len, PAGE_SIZE-len, "Failed to wakeup!\n"); + goto _ret_; + } + + for (i = 0; i < sizeof(dump)/sizeof(dump[0]); i++) { + r = panel_ssd2848_read_reg(mpd, dump[i].addr, read+2); + if (r) + len += snprintf(buf+len, PAGE_SIZE-len, + "Failed read register %s\n", + dump[i].name); + else + len += snprintf(buf+len, PAGE_SIZE-len, + "%s:\t%02X%02X%02X%02X\n", + dump[i].name, read[2], read[3], + read[4], read[5]); + if (dump[i].addr != 0x608C) + continue; + /* Cleaning MIPITX-DSI0 Status */ + read[0] = 0x60; + read[1] = 0x8C; + dsi_vc_generic_write(mpd->dssdev, mpd->channel, read, 6); + } + +_ret_: + dsi_bus_unlock(dssdev); + mutex_unlock(&mpd->lock); + + return len; +} + +static ssize_t minnow_panel_hw_revision_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + u8 id1, id2, id3; + int r; + + mutex_lock(&mpd->lock); + + if (mpd->enabled) { + dsi_bus_lock(dssdev); + + r = minnow_panel_wake_up_locked(mpd); + if (!r) + r = minnow_panel_get_id(mpd, &id1, &id2, &id3); + + dsi_bus_unlock(dssdev); + } else { + r = -ENODEV; + } + + mutex_unlock(&mpd->lock); + + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); +} + +#ifdef PANEL_DEBUG +static const char *cabc_modes[] = { + "off", /* used also always when CABC is not supported */ + "ui", + "still-image", + "moving-image", +}; + +static ssize_t show_cabc_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + const char *mode_str; + int mode; + + mode = mpd->cabc_mode; + + mode_str = "unknown"; + if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes)) + mode_str = cabc_modes[mode]; + + return snprintf(buf, PAGE_SIZE, "%s\n", mode_str); +} + +static ssize_t store_cabc_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int i; + int r; + + for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { + if (sysfs_streq(cabc_modes[i], buf)) + break; + } + + if (i == ARRAY_SIZE(cabc_modes)) + return -EINVAL; + + mutex_lock(&mpd->lock); + + if (mpd->enabled) { + dsi_bus_lock(dssdev); + + r = minnow_panel_wake_up_locked(mpd); + if (r) + goto err; + + r = minnow_panel_dcs_write_1(mpd, DCS_WRITE_CABC, i); + if (r) + goto err; + + dsi_bus_unlock(dssdev); + } + + mpd->cabc_mode = i; + + mutex_unlock(&mpd->lock); + + return count; +err: + dsi_bus_unlock(dssdev); + mutex_unlock(&mpd->lock); + return r; +} + +static ssize_t show_cabc_available_modes(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i, len; + + for (i = 0, len = 0; + len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) + len += snprintf(&buf[len], PAGE_SIZE-len, "%s%s%s", + i ? " " : "", cabc_modes[i], + i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : ""); + + return len; +} +#endif + +static ssize_t minnow_panel_store_esd_interval(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + + unsigned long t; + int r; + + r = strict_strtoul(buf, 10, &t); + if (r) + return r; + + mutex_lock(&mpd->lock); + minnow_panel_cancel_esd_work(mpd); + /* special settings for test purpose */ + switch (t) { + case 1: case 2: + /* active panel/bridge reset to force ESD */ + t--; + dev_info(&mpd->dssdev->dev, "ESD test to force %s reset\n", + t == MINNOW_PANEL ? "panel" : "bridge"); + gpio_set_value(mpd->reset_gpio[t], + mpd->hw_reset[t].active ? 1 : 0); + break; + case 3: + dsi_bus_lock(dssdev); + dev_info(&mpd->dssdev->dev, "ESD test for DSI recovery\n"); + r = minnow_panel_exit_ulps_locked(mpd); + dev_info(&mpd->dssdev->dev, + "minnow_panel_exit_ulps_locked = %d\n", r); + r = minnow_panel_dsi_recovery_locked(mpd); + dev_info(&mpd->dssdev->dev, + "minnow_panel_dsi_recovery_locked = %d\n", r); + r = minnow_panel_enter_ulps_locked(mpd); + dev_info(&mpd->dssdev->dev, + "minnow_panel_enter_ulps_locked = %d\n", r); + r = minnow_panel_dsi_recovery_locked(mpd); + dev_info(&mpd->dssdev->dev, + "minnow_panel_dsi_recovery_locked = %d\n", r); + dsi_bus_unlock(dssdev); + break; + case 4: + dsi_bus_lock(dssdev); + dev_info(&mpd->dssdev->dev, "ESD test for panel recovery\n"); + minnow_panel_disable_locked(mpd, true); + dev_info(&mpd->dssdev->dev, + "minnow_panel_disable_locked done\n"); + msleep(20); + dev_info(&mpd->dssdev->dev, + "minnow_panel_enable_locked start\n"); + r = minnow_panel_enable_locked(mpd); + dev_info(&mpd->dssdev->dev, + "minnow_panel_enable_locked = %d\n", r); + if (!r) { + r = minnow_panel_update_locked(mpd); + /* no dsi_bus_unlock when update start successfully */ + if (!r) + break; + } + dsi_bus_unlock(dssdev); + default: + mpd->esd_interval = t; + break; + } + if (mpd->enabled) + minnow_panel_queue_esd_work(mpd); + mutex_unlock(&mpd->lock); + + return count; +} + +static ssize_t minnow_panel_show_esd_interval(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned t; + + mutex_lock(&mpd->lock); + t = mpd->esd_interval; + mutex_unlock(&mpd->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + +static ssize_t minnow_panel_store_ulps(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned long t; + int r; + + r = strict_strtoul(buf, 10, &t); + if (r) + return r; + + mutex_lock(&mpd->lock); + + if (mpd->enabled) { + dsi_bus_lock(dssdev); + + if (t) + r = minnow_panel_enter_ulps_locked(mpd); + else + r = minnow_panel_wake_up_locked(mpd); + + dsi_bus_unlock(dssdev); + } + + mutex_unlock(&mpd->lock); + + return r ? r : count; +} + +static ssize_t minnow_panel_show_ulps(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned t; + + mutex_lock(&mpd->lock); + t = mpd->ulps_enabled; + mutex_unlock(&mpd->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + +static ssize_t minnow_panel_store_ulps_timeout(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned long t; + int r; + + r = strict_strtoul(buf, 10, &t); + if (r) + return r; + + mutex_lock(&mpd->lock); + mpd->ulps_timeout = t; + + if (mpd->enabled) { + /* minnow_panel_wake_up_locked will restart the timer */ + dsi_bus_lock(dssdev); + r = minnow_panel_wake_up_locked(mpd); + dsi_bus_unlock(dssdev); + } + + mutex_unlock(&mpd->lock); + + return r ? r : count; +} + +static ssize_t minnow_panel_show_ulps_timeout(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned t; + + mutex_lock(&mpd->lock); + t = mpd->ulps_timeout; + mutex_unlock(&mpd->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + +#ifdef PANEL_DEBUG +static ssize_t minnow_panel_store_init_data(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + u8 *data; + int r; + + r = minnow_panel_check_cmdbuf(mpd, (u8 *)buf, count); + if (r) + return r; + + data = devm_kzalloc(&dssdev->dev, count, GFP_KERNEL); + if (!data) + return -ENOMEM; + memcpy(data, buf, count); + + mutex_lock(&mpd->lock); + mpd->power_on.count = count; + mpd->power_on.cmdbuf = data; + if (mpd->last_init_data) + devm_kfree(&dssdev->dev, mpd->last_init_data); + mpd->last_init_data = data; + mutex_unlock(&mpd->lock); + + return count; +} + +static ssize_t minnow_panel_show_init_data(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int i, j; + u8 *data; + + mutex_lock(&mpd->lock); + data = mpd->power_on.cmdbuf; + mutex_unlock(&mpd->lock); + + for (i = 0; i < PAGE_SIZE && *data; ) { + i += snprintf(buf+i, PAGE_SIZE-i, + "%02d %02d:", data[0], data[1]); + for (j = 0; j < *data && i < PAGE_SIZE; j++) { + i += snprintf(buf+i, PAGE_SIZE-i, " %02X", data[2+j]); + } + snprintf(buf+i, PAGE_SIZE-i, "\n"); + i++; + data += *data + 2; + } + + return i; +} +#endif /* PANEL_DEBUG */ + +#ifdef CONFIG_HAS_AMBIENTMODE +static ssize_t minnow_panel_show_interactivemode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned t; + + mutex_lock(&mpd->lock); + t = mpd->interactive; + mutex_unlock(&mpd->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + +static ssize_t minnow_panel_store_interactivemode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned long t; + int r; + + r = strict_strtoul(buf, 10, &t); + if (!r) { + bool enable = !!t; + mutex_lock(&mpd->lock); +#ifdef CONFIG_WAKEUP_SOURCE_NOTIFY + /* clean early init timeout as someone handle it also */ + if (mpd->early_inited) { + mpd->early_inited = false; + cancel_delayed_work(&mpd->early_init_timeout_work); + } +#endif + if (mpd->interactive != enable) { + int state = mpd->state; + if (enable) + state = DISPLAY_ENABLE; + else if (state != DISPLAY_DISABLE) + state = DISPLAY_AMBIENT_ON; + r = minnow_panel_change_state_mlocked(mpd, state); + if (!r) + mpd->interactive = enable; + } + mutex_unlock(&mpd->lock); + if (r) + dev_err(&dssdev->dev, "%s interactive mode failed %d\n", + enable ? "enable" : "disable", r); + else + dev_dbg(&dssdev->dev, "%s interactive mode succeeded\n", + enable ? "enable" : "disable"); + } + + return r ? r : count; +} + +static ssize_t minnow_panel_show_ambient_timeout(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned int t; + + mutex_lock(&mpd->lock); + t = mpd->ambient_timeout; + mutex_unlock(&mpd->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + +static ssize_t minnow_panel_store_ambient_timeout(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned long t; + int r; + + r = kstrtoul(buf, 10, &t); + if (!r) { + mutex_lock(&mpd->lock); + mpd->ambient_timeout = (int)t; + mutex_unlock(&mpd->lock); + } + + return r ? r : count; +} +#endif /* CONFIG_HAS_AMBIENTMODE */ + +#ifdef PANEL_PERF_TIME +static ssize_t minnow_panel_perftime_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int len = 0; + + mutex_lock(&mpd->lock); + if (mpd->enabled) { + mpd->time_power_on += GET_ELAPSE_TIME(mpd->last_power_on); + mpd->last_power_on = jiffies; + } + len += snprintf(buf+len, PAGE_SIZE-len, "Power On: %lu ms\n", + mpd->time_power_on); + len += snprintf(buf+len, PAGE_SIZE-len, "Enter ULPS: %lu ms\n", + mpd->time_ulps); + len += snprintf(buf+len, PAGE_SIZE-len, "Update Frame: %lu ms\n", + mpd->time_update); + len += snprintf(buf+len, PAGE_SIZE-len, " Frame Min: %lu ms\n", + mpd->time_update_min); + len += snprintf(buf+len, PAGE_SIZE-len, " Frame Max: %lu ms\n", + mpd->time_update_max); + len += snprintf(buf+len, PAGE_SIZE-len, " Frame Avg: %lu ms\n", + mpd->time_update / mpd->total_update); + mutex_unlock(&mpd->lock); + + return len; +} +#endif + +static ssize_t minnow_panel_vsync_events_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + bool vsync_events_enabled; + + mutex_lock(&mpd->lock); + vsync_events_enabled = mpd->vsync_events_enabled; + mutex_unlock(&mpd->lock); + + return snprintf(buf, PAGE_SIZE, "%d\n", vsync_events_enabled); +} + +static int minnow_panel_vsync_events_init(struct minnow_panel_data *mpd) +{ + int r = 0; + if (gpio_is_valid(mpd->vsync_events_gpio)) { + r = devm_gpio_request_one(&mpd->dssdev->dev, + mpd->vsync_events_gpio, GPIOF_IN, + "minnow-panel vsync_events"); + mpd->vsync_events_sysfs = sysfs_get_dirent( + mpd->dssdev->dev.kobj.sd, NULL, "vsync_events"); + } + return r; +} + +static irqreturn_t minnow_panel_vsync_events_isr(int irq, void *dev_id) +{ + struct minnow_panel_data *mpd = dev_id; + mpd->vsync_events_timestamp = ktime_get(); + sysfs_notify_dirent(mpd->vsync_events_sysfs); + return IRQ_HANDLED; +} + +static int minnow_panel_enable_vsync_events_mlocked( + struct minnow_panel_data *mpd, bool enabled) +{ + int r = 0; + if (!gpio_is_valid(mpd->vsync_events_gpio)) { + dev_err(&mpd->dssdev->dev, + "enable_vsync_events: store: gpio not valid"); + r = -EINVAL; + } else if (enabled != mpd->vsync_events_enabled) { + if (enabled) { + r = devm_request_irq(&mpd->dssdev->dev, + gpio_to_irq(mpd->vsync_events_gpio), + minnow_panel_vsync_events_isr, + IRQF_TRIGGER_RISING, + "minnow-panel vsync_events", + mpd); + } else { + devm_free_irq(&mpd->dssdev->dev, + gpio_to_irq(mpd->vsync_events_gpio), + mpd); + } + mpd->vsync_events_enabled = enabled; + } + return r; +} + +static ssize_t minnow_panel_vsync_events_enabled_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + unsigned long t; + int r; + + r = kstrtoul(buf, 10, &t); + if (!r) { + bool enabled = !!t; + mutex_lock(&mpd->lock); + r = minnow_panel_enable_vsync_events_mlocked(mpd, enabled); + mutex_unlock(&mpd->lock); + } + + return r ? r : count; +} + +static ssize_t minnow_panel_vsync_events_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + return snprintf(buf, PAGE_SIZE, "%llu\n", + ktime_to_ns(mpd->vsync_events_timestamp)); +} + +static DEVICE_ATTR(errors, S_IRUGO, minnow_panel_errors_show, NULL); +static DEVICE_ATTR(hw_revision, S_IRUGO, minnow_panel_hw_revision_show, NULL); +static DEVICE_ATTR(esd_interval, S_IRUGO | S_IWUSR, + minnow_panel_show_esd_interval, + minnow_panel_store_esd_interval); +static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR, + minnow_panel_show_ulps, minnow_panel_store_ulps); +static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR, + minnow_panel_show_ulps_timeout, + minnow_panel_store_ulps_timeout); +#ifdef PANEL_DEBUG +static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, + show_cabc_mode, store_cabc_mode); +static DEVICE_ATTR(cabc_available_modes, S_IRUGO, + show_cabc_available_modes, NULL); +static DEVICE_ATTR(init_data, S_IRUGO | S_IWUSR, + minnow_panel_show_init_data, + minnow_panel_store_init_data); +#endif +#ifdef CONFIG_HAS_AMBIENTMODE +static DEVICE_ATTR(interactivemode, S_IRUGO | S_IWUSR, + minnow_panel_show_interactivemode, + minnow_panel_store_interactivemode); +static DEVICE_ATTR(ambient_timeout, S_IRUGO | S_IWUSR, + minnow_panel_show_ambient_timeout, + minnow_panel_store_ambient_timeout); +#endif +#ifdef PANEL_PERF_TIME +static DEVICE_ATTR(perftime, S_IRUGO, minnow_panel_perftime_show, NULL); +#endif +static DEVICE_ATTR(vsync_events_enabled, S_IRUGO | S_IWUSR, + minnow_panel_vsync_events_enabled_show, + minnow_panel_vsync_events_enabled_store); +static DEVICE_ATTR(vsync_events, S_IRUGO, + minnow_panel_vsync_events_show, NULL); + +static struct attribute *minnow_panel_attrs[] = { + &dev_attr_errors.attr, + &dev_attr_hw_revision.attr, + &dev_attr_esd_interval.attr, + &dev_attr_ulps.attr, + &dev_attr_ulps_timeout.attr, +#ifdef PANEL_DEBUG + &dev_attr_cabc_mode.attr, + &dev_attr_cabc_available_modes.attr, + &dev_attr_init_data.attr, +#endif +#ifdef CONFIG_HAS_AMBIENTMODE + &dev_attr_interactivemode.attr, + &dev_attr_ambient_timeout.attr, +#endif +#ifdef PANEL_PERF_TIME + &dev_attr_perftime.attr, +#endif + &dev_attr_vsync_events_enabled.attr, + &dev_attr_vsync_events.attr, + NULL, +}; + +static struct attribute_group minnow_panel_attr_group = { + .attrs = minnow_panel_attrs, +}; + +static void _minnow_panel_hw_active_reset(struct minnow_panel_data *mpd) +{ + int i; + if (mpd->reset_ms < 0) + return; + + /* reset the device */ + for (i = 0; i < MINNOW_COMPONENT_MAX; i++) { + if (!gpio_is_valid(mpd->reset_gpio[i])) + continue; + gpio_set_value(mpd->reset_gpio[i], + mpd->hw_reset[i].active ? 1 : 0); + } + + /* wait device reset */ + minnow_panel_delay(mpd->reset_ms); +} + +static void _minnow_panel_hw_reset(struct minnow_panel_data *mpd) +{ + int i; + + _minnow_panel_hw_active_reset(mpd); + + /* assert reset */ + for (i = 0; i < MINNOW_COMPONENT_MAX; i++) { + if (!gpio_is_valid(mpd->reset_gpio[i])) + continue; + gpio_set_value(mpd->reset_gpio[i], + mpd->hw_reset[i].active ? 0 : 1); + } + + /* wait after releasing reset */ + if (mpd->release_ms > 0) + minnow_panel_delay(mpd->release_ms); +} + +static int minnow_panel_set_regulators(struct minnow_panel_data *mpd, + int (*func)(struct regulator *regulator)) +{ + int i; + + for (i = 0; i < MINNOW_COMPONENT_MAX; i++) { + if (!mpd->regulators[i]) + continue; + if (func(mpd->regulators[i])) + return -ENODEV; + } + + return 0; +} + +static void minnow_panel_enable_vio(struct minnow_panel_data *mpd, bool enable) +{ + if (enable) { + if (gpio_is_valid(mpd->vio_en_gpio)) { + /* This is workaround to fix unexpected M4 reset issue + * select pulldown mode to enable switch will tuen on + * 1.8v power supply slowly, that will help reduce the + * dip of 1.8v supply + */ + int r = pinctrl_select_state(mpd->vio_pctrl, + mpd->vio_state_pulldown); + if (r) + dev_err(&mpd->dssdev->dev, "failed to activate" + " vio_state_pulldown!"); + usleep_range(250, 300); + /* go back to output low mode to keep switch enabled */ + gpio_set_value(mpd->vio_en_gpio, 0); + r = pinctrl_select_state(mpd->vio_pctrl, + mpd->vio_state_output); + if (r) + dev_err(&mpd->dssdev->dev, "failed to activate" + " vio_state_output!"); + } + if (gpio_is_valid(mpd->mem_en_gpio)) + gpio_set_value(mpd->mem_en_gpio, 1); + } else { + if (gpio_is_valid(mpd->mem_en_gpio)) + gpio_set_value(mpd->mem_en_gpio, 0); + if (gpio_is_valid(mpd->vio_en_gpio)) + gpio_set_value(mpd->vio_en_gpio, 1); + } +} + +static int minnow_panel_enable_clkin(struct minnow_panel_data *mpd, + bool enable) +{ + int r = 0; + if ((mpd->clk_in_en != enable) && (mpd->clk_in != NULL)) { + if (enable) + r = clk_prepare_enable(mpd->clk_in); + else + clk_disable_unprepare(mpd->clk_in); + if (!r) + mpd->clk_in_en = enable; + } + return r; +} + +#define DEBUG_DT +#ifdef DEBUG_DT +#define DTINFO(fmt, ...) \ + printk(KERN_INFO "minnow-panel DT: " fmt, ## __VA_ARGS__) +#define DTINFO_PIXFMT(msg, pix) \ +{ char *fmt[4] = {"RGB888", "RGB666", "RGB666_PACKED", "RGB565"};\ + DTINFO(msg"%s\n", fmt[pix]);\ +} +#define DTINFO_ARRAY(msg, a, n, fmt, blen) \ +{ int i; char str[blen], *p = str;\ + for (i = 0; i < n; i++) {\ + sprintf(p, fmt, a[i]);\ + p += strlen(p); \ + } \ + DTINFO(msg"%s\n", str);\ +} +#else /* DEBUG_DT */ +#define DTINFO(fmt, ...) +#define DTINFO_PIXFMT(msg, pix) +#define DTINFO_ARRAY(msg, a, n, fmt, blen) +#endif + +static struct of_device_id minnow_panel_ids[] = { + { .compatible = "mot,minnow-panel-dsi-cm" }, + { /*sentinel*/ } +}; + +static int minnow_panel_dt_init(struct minnow_panel_data *mpd) +{ + u32 range[2], value = 0; + struct minnow_panel_attr *panel_attr; + struct device_node *dt_node; + char *clkin; + + dt_node = of_find_matching_node(NULL, minnow_panel_ids); + if (dt_node == NULL) { + dev_err(&mpd->dssdev->dev, "No dt_node found!\n"); + return -ENODEV; + } + + /* Save the dt node entry to the device */ + mpd->dssdev->dev.of_node = dt_node; + + if (of_property_read_u32(dt_node, "id_panel", &value) \ + || (value >= MINNOW_PANEL_MAX)) { + dev_err(&mpd->dssdev->dev, \ + "Invalid id_panel = %u!\n", value); + return -EINVAL; + } + mpd->id_panel = value; + DTINFO("id_panel = %d\n", mpd->id_panel); + + panel_attr = &panel_attr_table[mpd->id_panel]; + mpd->power_on = panel_attr->power_on; + mpd->power_off = panel_attr->power_off; + mpd->dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | + OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; + mpd->dssdev->panel.timings.x_res = panel_attr->xres; + mpd->dssdev->panel.timings.y_res = panel_attr->yres; + mpd->dssdev->panel.timings.pixel_clock = panel_attr->pixel_clock; + mpd->dssdev->panel.dsi_pix_fmt = panel_attr->pixel_format; + mpd->dsi_config.mode = panel_attr->mode; + mpd->dsi_config.pixel_format = panel_attr->pixel_format; + mpd->dsi_config.hs_clk_min = panel_attr->hs.min; + mpd->dsi_config.hs_clk_max = panel_attr->hs.max; + mpd->dsi_config.lp_clk_min = panel_attr->lp.min; + mpd->dsi_config.lp_clk_max = panel_attr->lp.max; + mpd->x_offset = panel_attr->xoffset; + mpd->y_offset = panel_attr->yoffset; + + mpd->hw_reset[MINNOW_PANEL] = panel_attr->panel_reset; + mpd->hw_reset[MINNOW_BRIDGE] = panel_attr->bridge_reset; + mpd->reset_gpio[MINNOW_PANEL] = + of_get_named_gpio(dt_node, "gpio_panel_reset", 0); + DTINFO("gpio_panel_reset = %d\n", mpd->reset_gpio[MINNOW_PANEL]); + mpd->reset_gpio[MINNOW_BRIDGE] = + of_get_named_gpio(dt_node, "gpio_bridge_reset", 0); + DTINFO("gpio_bridge_reset = %d\n", + mpd->reset_gpio[MINNOW_BRIDGE]); + mpd->ext_te_gpio = of_get_named_gpio(dt_node, "gpio_te", 0); + DTINFO("gpio_te = %d\n", mpd->ext_te_gpio); + mpd->vio_en_gpio = of_get_named_gpio(dt_node, "gpio_vio_en", 0); + DTINFO("gpio_vio_en = %d\n", mpd->vio_en_gpio); + if (gpio_is_valid(mpd->vio_en_gpio)) { + mpd->vio_pctrl = devm_pinctrl_get(&mpd->dssdev->dev); + if (IS_ERR(mpd->vio_pctrl)) { + dev_err(&mpd->dssdev->dev, "no vio pinctrl handle\n"); + return PTR_ERR(mpd->vio_pctrl); + } + mpd->vio_state_pulldown = + pinctrl_lookup_state(mpd->vio_pctrl, "viopulldown"); + if (IS_ERR(mpd->vio_state_pulldown)) { + dev_err(&mpd->dssdev->dev, "no vio pulldown state\n"); + return PTR_ERR(mpd->vio_state_pulldown); + } + mpd->vio_state_output = + pinctrl_lookup_state(mpd->vio_pctrl, "viooutput"); + if (IS_ERR(mpd->vio_state_output)) { + dev_err(&mpd->dssdev->dev, "no vio output state\n"); + return PTR_ERR(mpd->vio_state_output); + } + } + mpd->mem_en_gpio = of_get_named_gpio(dt_node, "gpio_mem_en", 0); + DTINFO("gpio_mem_en = %d\n", mpd->mem_en_gpio); + mpd->vsync_events_gpio = of_get_named_gpio(dt_node, + "gpio_vsync_events", 0); + DTINFO("gpio_vsync_events = %d\n", mpd->vsync_events_gpio); + clkin = (char *)of_get_property(dt_node, "clk_in", NULL); + if (clkin) { + mpd->clk_in = clk_get(NULL, clkin); + if (IS_ERR(mpd->clk_in)) { + int r = PTR_ERR(mpd->clk_in); + dev_err(&mpd->dssdev->dev, + "Failed get external clock %s!\n", clkin); + mpd->clk_in = NULL; + return r; + } + } + mpd->esd_interval = 0; + if (!of_property_read_u32(dt_node, "esd_interval", &value)) { + mpd->esd_interval = value; + DTINFO("esd_interval = %d\n", mpd->esd_interval); + } +#ifdef CONFIG_HAS_AMBIENTMODE + mpd->smart_ambient = + of_property_read_bool(dt_node, "support_smart_ambient"); + DTINFO("support_smart_ambient = %d\n", mpd->smart_ambient); + mpd->ambient_timeout = 0; + if (!of_property_read_u32(dt_node, "ambient_timeout", &value)) { + mpd->ambient_timeout = value; + DTINFO("ambient_timeout = %d\n", mpd->ambient_timeout); + } +#endif + /* automatically go to ULPS mode for none-update within 250ms */ + mpd->ulps_timeout = 250; +#ifdef CONFIG_PANEL_BACKLIGHT + mpd->use_dsi_backlight = false; +#endif + + mpd->pin_config.num_pins = 4; + mpd->pin_config.pins[0] = 0; + mpd->pin_config.pins[1] = 1; + mpd->pin_config.pins[2] = 2; + mpd->pin_config.pins[3] = 3; + if (of_get_property(dt_node, "pins", &value)) { + u32 pins[OMAP_DSS_MAX_DSI_PINS]; + u32 num_pins = value / sizeof(u32); + if (!num_pins || (num_pins > OMAP_DSS_MAX_DSI_PINS)) { + dev_err(&mpd->dssdev->dev, \ + "Invalid DSI pins count = %u!\n", num_pins); + return -EINVAL; + } + value = 0; + if (!of_property_read_u32_array(dt_node, \ + "pins", pins, num_pins)) { + for (; value < num_pins; value++) { + if (pins[value] >= OMAP_DSS_MAX_DSI_PINS) + break; + mpd->pin_config.pins[value]\ + = pins[value]; + } + } + if (value < num_pins) { + dev_err(&mpd->dssdev->dev, \ + "Invalid DSI pins config!\n"); + return -EINVAL; + } + mpd->pin_config.num_pins = num_pins; + DTINFO("num_pins = %d\n", \ + mpd->pin_config.num_pins); + DTINFO_ARRAY("pins =", mpd->pin_config.pins,\ + mpd->pin_config.num_pins, " %u", 64); + } + + if (!of_property_read_u32(dt_node, "pixel_clock", &value)) { + if (value < mpd->dssdev->panel.timings.pixel_clock) { + dev_err(&mpd->dssdev->dev, \ + "Invalid pixel_clock = %u!\n", value); + return -EINVAL; + } + mpd->dssdev->panel.timings.pixel_clock = value; + DTINFO("pixel_clock = %u\n", \ + mpd->dssdev->panel.timings.pixel_clock); + } + + if (!of_property_read_u32(dt_node, "pixel_format", &value)) { + switch (value) { + case OMAP_DSS_DSI_FMT_RGB888: + case OMAP_DSS_DSI_FMT_RGB666: + case OMAP_DSS_DSI_FMT_RGB666_PACKED: + case OMAP_DSS_DSI_FMT_RGB565: + break; + default: + dev_err(&mpd->dssdev->dev, \ + "Invalid pixel_format = %u!\n", value); + return -EINVAL; + } + mpd->dssdev->panel.dsi_pix_fmt = \ + mpd->dsi_config.pixel_format = value; + DTINFO_PIXFMT("pixel_format = ", \ + mpd->dssdev->panel.dsi_pix_fmt); + } + + if (!of_property_read_u32_array(dt_node, "hs_clk", range, 2)) { + mpd->dsi_config.hs_clk_min = range[0]; + mpd->dsi_config.hs_clk_max = range[1]; + DTINFO("hs_clk_min = %lu, hs_clk_max = %lu\n", \ + mpd->dsi_config.hs_clk_min, \ + mpd->dsi_config.hs_clk_max); + } + + if (!of_property_read_u32_array(dt_node, "lp_clk", range, 2)) { + mpd->dsi_config.lp_clk_min = range[0]; + mpd->dsi_config.lp_clk_max = range[1]; + DTINFO("lp_clk_min = %lu, lp_clk_max = %lu\n", \ + mpd->dsi_config.lp_clk_min, \ + mpd->dsi_config.lp_clk_max); + } + + return 0; +} + +static int minnow_panel_parse_panel_param(char *param, int *ptype, int *pver) +{ + char *p, *start = (char *)param; + int ver, type; + + if (!param) + return -EINVAL; + type = simple_strtoul(start, &p, 10); + if (start == p) + return -EINVAL; + if (*p != '#') + return -EINVAL; + start = p + 1; + ver = simple_strtoul(start, &p, 10); + if (start == p) + return -EINVAL; + if (*p != '\0') + return -EINVAL; + *ptype = type; + *pver = ver; + return 0; +} + +static int minnow_panel_probe(struct omap_dss_device *dssdev) +{ + struct minnow_panel_data *mpd; +#ifdef CONFIG_PANEL_BACKLIGHT + struct backlight_device *bldev = NULL; +#endif + int i, r; + + dev_dbg(&dssdev->dev, "probe\n"); + + mpd = devm_kzalloc(&dssdev->dev, sizeof(*mpd), GFP_KERNEL); + if (!mpd) + return -ENOMEM; + + dev_set_drvdata(&dssdev->dev, mpd); + mpd->dssdev = dssdev; + mpd->first_enable = true; + mpd->m4_state = DISPLAY_ENABLE; + + r = minnow_panel_dt_init(mpd); + if (r) + return r; + + mutex_init(&mpd->lock); + + atomic_set(&mpd->do_update, 0); +#ifdef PANEL_PERF_TIME + mpd->time_update_min = (unsigned long)(-1); + mpd->time_update_max = 0; +#endif + /* it will reset bridge/panel if boot-loader does not initialize it */ + mpd->skip_first_init = false; + if (minnow_panel_parse_panel_param(def_panel_param, &i, &r)) { + dev_err(&dssdev->dev, "wrong panel parameter %s\n", + def_panel_param); + i = PANEL_INIT; + } + mpd->panel_type = i; + switch (mpd->panel_type) { + case PANEL_INIT: + case PANEL_DUMMY: + dev_info(&dssdev->dev, + "There is not panel id coming from boot-loader\n"); + break; + case OTM3201_1_0: + /* turn of ESD for panel 1.0 */ + mpd->esd_interval = 0; + /* it needs replace the settings for panel 1.0 */ + if (minnow_panel_replace_cmdbuf(mpd->power_on.cmdbuf, + panel_init_ssd2848_320x320_1)){ + dev_info(&dssdev->dev, "Replaced for the settings " + "of panel 1.0!\n"); + } + case OTM3201_2_0: + case OTM3201_2_1: + if (r == INIT_DATA_VERSION) + mpd->skip_first_init = true; + else + dev_err(&dssdev->dev, "Initialize version mismatch" + " (%d-%d)!\n", r, INIT_DATA_VERSION); + break; + default: + dev_err(&dssdev->dev, + "Wrong panel id(%d) got from boot-loader\n", + mpd->panel_type); + break; + } + dev_info(&dssdev->dev, "skip first time initialization is %s\n", + mpd->skip_first_init ? "enabled" : "disabled"); + + if (gpio_is_valid(mpd->vio_en_gpio)) { + r = devm_gpio_request_one(&dssdev->dev, mpd->vio_en_gpio, + mpd->skip_first_init + ? GPIOF_OUT_INIT_LOW + : GPIOF_OUT_INIT_HIGH, + "minnow-panel vio_en"); + if (r) { + dev_err(&dssdev->dev, + "failed to request panel vio_en gpio\n"); + return r; + } + r = pinctrl_select_state(mpd->vio_pctrl, + mpd->vio_state_output); + if (r) { + dev_err(&mpd->dssdev->dev, + "failed to activate vio output state!"); + return r; + } + } + + if (gpio_is_valid(mpd->mem_en_gpio)) { + r = devm_gpio_request_one(&dssdev->dev, mpd->mem_en_gpio, + mpd->skip_first_init + ? GPIOF_OUT_INIT_HIGH + : GPIOF_OUT_INIT_LOW, + "minnow-panel mem_en"); + if (r) { + dev_err(&dssdev->dev, + "failed to request panel mem_en gpio %d\n", + mpd->mem_en_gpio); + return r; + } + } + + mpd->reset_ms = -1; + mpd->release_ms = -1; + for (i = 0; i < MINNOW_COMPONENT_MAX; i++) { + static const char * const name[MINNOW_COMPONENT_MAX] = { + "minnow-panel reset", + "minnow-bridge reset" + }; + bool active_low = (mpd->hw_reset[i].active == ACTIVE_LOW); + if (!gpio_is_valid(mpd->reset_gpio[i])) + continue; + /* skip_first_init hw_reset.active gpio_init + * Y ACTIVE_LOW INIT_HIGH + * Y ACTIVE_HIGH INIT_LOW + * N ACTIVE_LOW INIT_LOW + * N ACTIVE_HIGH INIT_HIGH + */ + r = devm_gpio_request_one(&dssdev->dev, mpd->reset_gpio[i], + (mpd->skip_first_init ^ active_low) + ? GPIOF_OUT_INIT_LOW + : GPIOF_OUT_INIT_HIGH, + name[i]); + if (r) { + dev_err(&dssdev->dev, + "failed to request %s gpio\n", name[i]); + return r; + } + if (mpd->reset_ms < mpd->hw_reset[i].reset_ms) + mpd->reset_ms = mpd->hw_reset[i].reset_ms; + if (mpd->release_ms < mpd->hw_reset[i].wait_ms) + mpd->release_ms = mpd->hw_reset[i].wait_ms; + } + + if (gpio_is_valid(mpd->ext_te_gpio)) { + r = devm_gpio_request_one(&dssdev->dev, mpd->ext_te_gpio, + GPIOF_IN, "minnow-panel irq"); + if (r) { + dev_err(&dssdev->dev, + "failed to request ext_te gpio\n"); + return r; + } + + r = devm_request_irq(&dssdev->dev, + gpio_to_irq(mpd->ext_te_gpio), + minnow_panel_te_isr, + IRQF_TRIGGER_RISING, + "minnow-panel vsync", dssdev); + + if (r) { + dev_err(&dssdev->dev, "IRQ request failed\n"); + return r; + } + + INIT_DEFERRABLE_WORK(&mpd->te_timeout_work, + minnow_panel_te_timeout_work_callback); + + dev_dbg(&dssdev->dev, "Using GPIO TE\n"); + } + + for (i = 0; i < MINNOW_COMPONENT_MAX; i++) { + static const char * const name[MINNOW_COMPONENT_MAX] = { + "panel", + "bridge" + }; + struct regulator *rt; + rt = devm_regulator_get(&dssdev->dev, name[i]); + if (IS_ERR(rt)) { + rt = NULL; + dev_info(&dssdev->dev, "Could not get %s regulator\n", + name[i]); + } + mpd->regulators[i] = rt; + if (!mpd->skip_first_init) { + /* toggle enable/disable to force disable */ + r = regulator_enable(rt); + r = regulator_disable(rt); + } + } + + mpd->workqueue = create_singlethread_workqueue("minnow_panel_esd"); + if (mpd->workqueue == NULL) { + dev_err(&dssdev->dev, "can't create ESD workqueue\n"); + return -ENOMEM; + } + INIT_DEFERRABLE_WORK(&mpd->esd_work, minnow_panel_esd_work); + INIT_DELAYED_WORK(&mpd->ulps_work, minnow_panel_ulps_work); + +#ifdef CONFIG_PANEL_BACKLIGHT + if (mpd->use_dsi_backlight) { + struct backlight_properties props; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 255; + + props.type = BACKLIGHT_RAW; + bldev = backlight_device_register(dev_name(&dssdev->dev), + &dssdev->dev, dssdev, + &minnow_panel_bl_ops, + &props); + if (IS_ERR(bldev)) { + r = PTR_ERR(bldev); + goto err_bl; + } + + mpd->bldev = bldev; + + bldev->props.fb_blank = FB_BLANK_UNBLANK; + bldev->props.power = FB_BLANK_UNBLANK; + bldev->props.brightness = 255; + + minnow_panel_bl_update_status(bldev); + } +#endif + + r = omap_dsi_request_vc(dssdev, &mpd->channel); + if (r) { + dev_err(&dssdev->dev, "failed to get virtual channel\n"); + goto err_req_vc; + } + + r = omap_dsi_set_vc_id(dssdev, mpd->channel, TCH); + if (r) { + dev_err(&dssdev->dev, "failed to set VC_ID\n"); + goto err_vc_id; + } + + r = sysfs_create_group(&dssdev->dev.kobj, &minnow_panel_attr_group); + if (r) { + dev_err(&dssdev->dev, "failed to create sysfs files\n"); + goto err_vc_id; + } + + r = minnow_panel_vsync_events_init(mpd); + if (r) { + dev_err(&dssdev->dev, "failed to init vsync_events\n"); + goto err_vc_id; + } + +#ifdef CONFIG_OMAP2_DSS_DEBUGFS + if (mpd->id_panel == MINNOW_PANEL_CM_BRIDGE_320X320) + dss_debugfs_create_file("panel_regs", minnow_panel_dump_regs); +#endif + wake_lock_init(&mpd->wake_lock, WAKE_LOCK_SUSPEND, "minnow-panel"); + wake_lock_init(&mpd->update_wake_lock, WAKE_LOCK_SUSPEND, + "minnow-panel-update"); + +#ifdef CONFIG_WAKEUP_SOURCE_NOTIFY + INIT_WORK(&mpd->early_init_work, minnow_panel_early_init_func); + INIT_DELAYED_WORK(&mpd->early_init_timeout_work, + minnow_panel_early_init_timeout_func); + mpd->displayenable_nb.notifier_call = omapdss_displayenable_notify; + wakeup_source_register_notify(&mpd->displayenable_nb); +#endif /* CONFIG_WAKEUP_SOURCE_NOTIFY */ + +#ifdef CONFIG_HAS_AMBIENTMODE + init_completion(&mpd->resume_completion); + complete_all(&mpd->resume_completion); + INIT_WORK(&mpd->dock_work, minnow_panel_dock_func); + INIT_WORK(&mpd->ambient_wake_work, minnow_panel_ambient_wake_func); + INIT_WORK(&mpd->ambient_timeout_work, + minnow_panel_ambient_timeout_func); + alarm_init(&mpd->ambient_timeout_alarm, ALARM_REALTIME, + minnow_panel_ambient_alarm_callback); +#endif + + return 0; + +err_vc_id: + omap_dsi_release_vc(dssdev, mpd->channel); +err_req_vc: +#ifdef CONFIG_PANEL_BACKLIGHT + if (bldev != NULL) + backlight_device_unregister(bldev); +err_bl: +#endif + destroy_workqueue(mpd->workqueue); + return r; +} + +static void __exit minnow_panel_remove(struct omap_dss_device *dssdev) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + + dev_dbg(&dssdev->dev, "remove\n"); + + sysfs_remove_group(&dssdev->dev.kobj, &minnow_panel_attr_group); + omap_dsi_release_vc(dssdev, mpd->channel); + +#ifdef CONFIG_PANEL_BACKLIGHT + if (mpd->bldev != NULL) { + struct backlight_device *bldev = mpd->bldev; + bldev->props.power = FB_BLANK_POWERDOWN; + minnow_panel_bl_update_status(bldev); + backlight_device_unregister(bldev); + } +#endif + +#ifdef CONFIG_WAKEUP_SOURCE_NOTIFY + wakeup_source_unregister_notify(&mpd->displayenable_nb); + cancel_work_sync(&mpd->early_init_work); +#endif /* CONFIG_WAKEUP_SOURCE_NOTIFY */ +#ifdef CONFIG_HAS_AMBIENTMODE + alarm_cancel(&mpd->ambient_timeout_alarm); +#endif + minnow_panel_cancel_ulps_work(mpd); + minnow_panel_cancel_esd_work(mpd); + destroy_workqueue(mpd->workqueue); + + /* reset, to be sure that the panel is in a valid state */ + _minnow_panel_hw_reset(mpd); +} + +#define DCS_POWER_MODE_NORMAL 0x1C +#define ERROR_UNEXPECT_MODE(mode) (0x12345600 | (mode)) +/* panel 1.0 has timing issue that may occur missing command or ECC error + * then it will receive 00 from Solomon, so for this case, retry to read + * might be better + */ +#define IS_ERR_UNEXPECT_MODE_00(err) ((err) == 0x12345600) +static int minnow_panel_check_panel_status(struct minnow_panel_data *mpd) +{ + u8 mode; + int r; + + if (mpd->id_panel != MINNOW_PANEL_CM_BRIDGE_320X320) + return 0; + + r = panel_ssd2848_set_retransmit(mpd, true); + if (!r) { + r = panel_otm3201_read_reg(mpd, MIPI_DCS_GET_POWER_MODE, + &mode, 1); + if (!r) + r = panel_ssd2848_set_retransmit(mpd, false); + } + + if (r) + dev_dbg(&mpd->dssdev->dev, "unable to get panel power mode\n"); + else if (mode != DCS_POWER_MODE_NORMAL) { + dev_err(&mpd->dssdev->dev, + "panel is not On, power mode is 0x%02x\n", mode); + r = ERROR_UNEXPECT_MODE(mode); + } + + return r; +} + +#define POWER_ON_RETRY_TIMES 3 +static int minnow_panel_power_on(struct minnow_panel_data *mpd) +{ + struct omap_dss_device *dssdev = mpd->dssdev; + u8 id1, id2, id3; + int r; + bool need_verify = true; + + mpd->panel_retry_count = 0; + mpd->esd_errors = 0; + +init_start: + mpd->panel_retry_count++; + r = omapdss_dsi_configure_pins(dssdev, &mpd->pin_config); + if (r) { + dev_err(&dssdev->dev, "failed to configure DSI pins\n"); + goto err0; + }; + + mpd->dsi_config.timings = &dssdev->panel.timings; + r = omapdss_dsi_set_config(dssdev, &mpd->dsi_config); + if (r) { + dev_err(&dssdev->dev, "failed to configure DSI\n"); + goto err0; + } + + r = omapdss_dsi_display_enable(dssdev); + if (r) { + dev_err(&dssdev->dev, "failed to enable DSI\n"); + goto err0; + } + if (mpd->output_enabled) { + dsi_disable_video_output(mpd->dssdev, mpd->channel); + mpd->output_enabled = false; + } + + omapdss_dsi_vc_enable_hs(dssdev, mpd->channel, false); + + /* for the first time power on, do not reset h/w to keep logo on */ + if (mpd->first_enable && mpd->skip_first_init) { + dsi_vc_send_bta_sync(dssdev, mpd->channel); + } else { + _minnow_panel_hw_reset(mpd); + r = minnow_panel_process_cmdbuf(mpd, &mpd->power_on, + need_verify); + if (!r && (mpd->panel_type != PANEL_DUMMY)) { + /* check if panel power on correctly, it may + * get power on mode with 0 sometimes, but it + * could not prove panel does not work, needs + * retry to check it again. + */ + int retry = 3; + do { + r = minnow_panel_check_panel_status(mpd); + } while (retry-- && IS_ERR_UNEXPECT_MODE_00(r)); + } + if (r) { + if (mpd->panel_retry_count >= POWER_ON_RETRY_TIMES) { + if (mpd->panel_type == PANEL_INIT) { + dev_err(&dssdev->dev, + "No panel to be detected, " + "using dummy panel instead\n"); + mpd->panel_type = PANEL_DUMMY; + } else { + if (!need_verify) + goto err; + } + /* try without read back check */ + need_verify = false; + } + mpd->total_error++; + dev_err(&dssdev->dev, "Reset hardware to retry ...\n"); + /* true/true for fast disable dsi */ + omapdss_dsi_display_disable(dssdev, true, true); + goto init_start; + } + } + + /* it needed enable TE to force update after display enabled */ + mpd->te_enabled = true; + r = _minnow_panel_enable_te(mpd, mpd->te_enabled); + if (r) + goto err; + + if (mpd->first_enable) { + r = minnow_panel_get_id(mpd, &id1, &id2, &id3); + if (r) + goto err; + } + + r = minnow_panel_dcs_write_0(mpd, MIPI_DCS_SET_DISPLAY_ON); + if (r) + goto err; + + omapdss_dsi_vc_enable_hs(dssdev, mpd->channel, true); + + r = dsi_enable_video_output(dssdev, mpd->channel); + if (r) + goto err; + + mpd->enabled = true; + mpd->output_enabled = true; + + if (mpd->first_enable) + dev_info(&dssdev->dev, "panel revision %02x.%02x.%02x\n", + id1, id2, id3); +#ifdef PANEL_PERF_TIME + mpd->last_power_on = jiffies; +#endif + return 0; +err: + mpd->total_error++; + dev_err(&dssdev->dev, + "error while enabling panel, issuing HW reset\n"); + _minnow_panel_hw_active_reset(mpd); + + omapdss_dsi_display_disable(dssdev, true, false); +err0: + return r; +} + +static void minnow_panel_power_off(struct minnow_panel_data *mpd, + bool fast_power_off) +{ + int r; + +#ifdef PANEL_PERF_TIME + if (mpd->enabled) + mpd->time_power_on += GET_ELAPSE_TIME(mpd->last_power_on); +#endif + if (!fast_power_off) { + dsi_disable_video_output(mpd->dssdev, mpd->channel); + mpd->output_enabled = false; + + r = minnow_panel_process_cmdbuf(mpd, &mpd->power_off, false); + if (r) + dev_err(&mpd->dssdev->dev, + "error disabling panel, return %d\n", r); + } + /* true/true for fast disable DSI */ + omapdss_dsi_display_disable(mpd->dssdev, true, fast_power_off); + + mpd->enabled = false; + mpd->ulps_enabled = false; +} + +static void minnow_panel_disable_locked(struct minnow_panel_data *mpd, + bool fast_power_off) +{ + minnow_panel_cancel_ulps_work(mpd); + minnow_panel_power_off(mpd, fast_power_off); + mpd->dssdev->state = OMAP_DSS_DISPLAY_DISABLED; + + _minnow_panel_hw_active_reset(mpd); + minnow_panel_enable_clkin(mpd, false); + minnow_panel_enable_vio(mpd, false); + minnow_panel_set_regulators(mpd, regulator_disable); +} + +static int minnow_panel_enable_locked(struct minnow_panel_data *mpd) +{ + int r; + + r = minnow_panel_set_regulators(mpd, regulator_enable); + if (r) + goto err; + /* By default, there's a leakage current coming from peripheral + * 26 MHz clock, so turn on 1.8v VIO first, then turn on clk_in + * will help avoid the step issue of enable 1.8v VIO. + * But unfortunately, there is a big problem of the hardware now, + * when turn off VIO for long time(above 12 minutes), it occurred + * the capacitance fully discharged, when enable VIO later, it will + * pull down 1.8V power supply momently as the capacitance charging, + * that occurred M4 reset since power supply dip. + * So turn on peripheral 26 MHz clock first and waiting for 6ms, + * let leakage current fully charge the capacitance, then turn on + * VIO, that will help avoid unexpected M4 reset (it still has dip + * on 1.8V power supply but smaller than worse case) + * For the step issue of 1.8v VIO, it's not a big deal as the reset + * pin has been hold and will release after it reaches 1.8v for 20ms, + * this matches the requirement of Solomon power on sequence. + * This is just software work around to fix the hardware issue + */ + r = minnow_panel_enable_clkin(mpd, true); + if (r) + goto err; + usleep_range(6000, 6300); + minnow_panel_enable_vio(mpd, true); + + r = minnow_panel_power_on(mpd); + if (r) + goto err; + + minnow_panel_queue_esd_work(mpd); + mpd->dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; + +err: + dev_err(&mpd->dssdev->dev, "Display enable failed, err = %d\n", r); + + /* clean up clk/power */ + minnow_panel_disable_locked(mpd, true); + return r; +} + +static int minnow_panel_update_locked(struct minnow_panel_data *mpd) +{ + int r = 0; + + /* hold wake_lock to avoid kernel suspend */ + wake_lock(&mpd->update_wake_lock); + + /* XXX no need to send this every frame, but dsi break if not done */ + r = minnow_panel_set_update_window(mpd, 0, 0, + mpd->dssdev->panel.timings.x_res, + mpd->dssdev->panel.timings.y_res); + if (r) + goto err; + + if (mpd->te_enabled && gpio_is_valid(mpd->ext_te_gpio)) { + schedule_delayed_work(&mpd->te_timeout_work, + msecs_to_jiffies(250)); + atomic_set(&mpd->do_update, 1); + } else { + r = omap_dsi_update(mpd->dssdev, mpd->channel, + minnow_panel_framedone_cb, mpd->dssdev); + if (r) + goto err; + } + +#ifdef PANEL_PERF_TIME + mpd->last_update = jiffies; +#endif + /* No wake_unlock here, unlock will be done in framedone_cb */ + return r; +err: + wake_unlock(&mpd->update_wake_lock); + return r; +} + +static int minnow_panel_enable_mlocked(struct minnow_panel_data *mpd) +{ + struct omap_dss_device *dssdev = mpd->dssdev; + bool update; + int r = 0; + + dev_info(&dssdev->dev, "%s: current state = %d\n", + __func__, dssdev->state); + + if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED) { + wake_lock(&mpd->wake_lock); + minnow_panel_cancel_ulps_work(mpd); + minnow_panel_cancel_esd_work(mpd); + minnow_panel_sync_resume_mlocked(mpd); + + dsi_bus_lock(dssdev); + r = minnow_panel_enable_locked(mpd); + /* do not force update at first time to keep boot logo on */ + update = !r && !(mpd->first_enable && mpd->skip_first_init); + mpd->first_enable = false; + if (update) + update = !minnow_panel_update_locked(mpd); + /* it will release dsi_bus_unlock in frame done callback when + * update start successfully + */ + if (!update) + dsi_bus_unlock(dssdev); + if (!r) { + minnow_panel_queue_ulps_work(mpd); + minnow_panel_queue_esd_work(mpd); + dev_dbg(&dssdev->dev, "Display enabled successfully " + "%s update!\n", update ? "with" : "without"); + } + wake_unlock(&mpd->wake_lock); + } + + return r; +} + +static void minnow_panel_disable_mlocked(struct minnow_panel_data *mpd) +{ + struct omap_dss_device *dssdev = mpd->dssdev; + + dev_info(&dssdev->dev, "%s: current state = %d\n", + __func__, dssdev->state); + + wake_lock(&mpd->wake_lock); + mpd->early_inited = false; + cancel_delayed_work(&mpd->early_init_timeout_work); + minnow_panel_cancel_ulps_work(mpd); + minnow_panel_cancel_esd_work(mpd); + minnow_panel_sync_resume_mlocked(mpd); + + dsi_bus_lock(dssdev); + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { + /* if it can not wakeup, do fast disable */ + bool fast = minnow_panel_wake_up_locked(mpd) != 0; + minnow_panel_disable_locked(mpd, fast); + } + dsi_bus_unlock(dssdev); + + wake_unlock(&mpd->wake_lock); +} + +static void minnow_panel_sync_display_status_mlocked( + struct minnow_panel_data *mpd) +{ + struct m4sensorhub_data *m4sensorhub; + enum display_state m4_state = mpd->state; + /* special case for dock mode, set to DISPLAY_ENABLE + * to block all wakeup gestures + */ + if (mpd->is_docked) + m4_state = DISPLAY_ENABLE; + if (mpd->m4_state == m4_state) + return; + + /* be safety to sync resume states first */ + minnow_panel_sync_resume_mlocked(mpd); + + m4sensorhub = m4sensorhub_client_get_drvdata(); + if (m4sensorhub->mode != NORMALMODE) { + dev_err(&mpd->dssdev->dev, + "M4 is not ready, unable to set screen status(%d)\n", + m4_state); + return; + } + if (m4sensorhub_reg_write_1byte(m4sensorhub, + M4SH_REG_USERSETTINGS_SCREENSTATUS, + m4_state, 0xFF) != 1) { + dev_err(&mpd->dssdev->dev, + "Unable to set screen status(%d) to M4\n", + m4_state); + return; + } + dev_dbg(&mpd->dssdev->dev, + "Set screen status(%d) to M4 success!\n", m4_state); + mpd->m4_state = m4_state; +} + +#ifdef CONFIG_HAS_AMBIENTMODE +static void led_set_dim_brightness(struct device *dev) +{ + struct m4sensorhub_data *m4sensorhub; + uint16_t als = DIM_BACKLIGHT_ALS; /* default value */ + int size; + + m4sensorhub = m4sensorhub_client_get_drvdata(); + size = m4sensorhub_reg_getsize(m4sensorhub, + M4SH_REG_LIGHTSENSOR_SIGNAL); + if (size != sizeof(als)) + dev_err(dev, "can't get M4 reg size for ALS\n"); + else if (size != m4sensorhub_reg_read(m4sensorhub, + M4SH_REG_LIGHTSENSOR_SIGNAL, + (char *)&als)) + dev_err(dev, "error reading M4 ALS value\n"); + + led_set_brightness_raw_als(led_get_default_dev(), als); +} +#endif /* CONFIG_HAS_AMBIENTMODE */ + +static int minnow_panel_change_state_mlocked(struct minnow_panel_data *mpd, + int state) +{ + int r = 0; + + dev_info(&mpd->dssdev->dev, + "change state %d ==> %d\n", mpd->state, state); + + /* already in state, return success */ + if (state == mpd->state) { +#ifdef CONFIG_HAS_AMBIENTMODE + if (state == DISPLAY_AMBIENT_ON) { + /* reset smart ambient timeout if it's needed */ + minnow_panel_start_ambient_alarm(mpd); + } +#endif + goto _ret_; + } + +#ifdef CONFIG_HAS_AMBIENTMODE + alarm_cancel(&mpd->ambient_timeout_alarm); +#endif + /* be safety to sync resume states first */ + minnow_panel_sync_resume_mlocked(mpd); + + switch (state) { + case DISPLAY_DISABLE: + if (mpd->enabled) + minnow_panel_disable_mlocked(mpd); + break; + case DISPLAY_ENABLE: + if (!mpd->enabled) + r = minnow_panel_enable_mlocked(mpd); +#ifdef CONFIG_HAS_AMBIENTMODE + /* switch back default refresh rate when last state is + * ambient mode + */ + else if (mpd->state == DISPLAY_AMBIENT_ON) + minnow_panel_set_default_fps(mpd); +#endif + break; +#ifdef CONFIG_HAS_AMBIENTMODE + case DISPLAY_AMBIENT_OFF: + /* it can't go to ambient mode when display already disabled. + * this is normal case that turn off display first, then set + * interactive off later, do nothing just return success + */ + if (mpd->state == DISPLAY_DISABLE) + goto _ret_; + if (is_smart_ambient_feature_enabled(mpd)) { + /* Turn off the back light */ + led_set_brightness(led_get_default_dev(), 0); + minnow_panel_disable_mlocked(mpd); + } + break; + case DISPLAY_AMBIENT_ON: + /* it can't go to ambient mode when display already disabled. + * this is normal case that turn off display first, then set + * interactive off later, do nothing just return success + */ + if (mpd->state == DISPLAY_DISABLE) + goto _ret_; + /* check if it's ambient mode on dock */ + if ((mpd->state == DISPLAY_ENABLE) && mpd->is_docked) { + /* switch to dock refresh rate */ + minnow_panel_set_dock_fps(mpd); + break; + } + /* check if smart ambient mode feature enabled */ + if (!is_smart_ambient_feature_enabled(mpd)) + break; + /* turn on display when it's off before */ + if (!mpd->enabled) + r = minnow_panel_enable_mlocked(mpd); + if (!r) { + /* switch to lowest refresh rate */ + minnow_panel_set_lowest_fps(mpd); + /* Dim the back light */ + led_set_dim_brightness(&mpd->dssdev->dev); + minnow_panel_start_ambient_alarm(mpd); + } + break; +#endif /* CONFIG_HAS_AMBIENTMODE */ + default: + r = -EINVAL; + } + if (!r) + mpd->state = state; + else + dev_err(&mpd->dssdev->dev, "failed(%d) set state(%d)," + " current state(%d)\n", r, state, mpd->state); +_ret_: + minnow_panel_sync_display_status_mlocked(mpd); + return r; +} + +#ifdef CONFIG_HAS_AMBIENTMODE +static int minnow_panel_suspend(struct omap_dss_device *dssdev) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + dev_dbg(&dssdev->dev, "%s: current state = %d, wake_lock:%d\n", + __func__, dssdev->state, wake_lock_active(&mpd->wake_lock)); + + mutex_lock(&mpd->lock); +#ifdef CONFIG_WAKEUP_SOURCE_NOTIFY + /* it needs turn off display if it's early initialized */ + if (mpd->early_inited) { + /* switch back to last state when early init been called */ + minnow_panel_change_state_mlocked(mpd, mpd->last_state); + dev_dbg(&mpd->dssdev->dev, "%s: cancelled previous early" + " initialize works\n", __func__); + } +#endif + if (mpd->enabled) { + dsi_bus_lock(dssdev); + minnow_panel_enter_ulps_locked(mpd); + dsi_bus_unlock(dssdev); + /* cancel queued esd work in suspend mode */ + if (mpd->esd_interval) + minnow_panel_cancel_esd_work(mpd); + } + /* block all threads waiting on resume */ + INIT_COMPLETION(mpd->resume_completion); + mutex_unlock(&mpd->lock); + + return 0; +} + +static int minnow_panel_resume(struct omap_dss_device *dssdev) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + dev_dbg(&dssdev->dev, "%s: current state = %d, wake_lock:%d\n", + __func__, dssdev->state, wake_lock_active(&mpd->wake_lock)); + + /* wake up all threads waiting on resume + * don't mutex_lock this as it may dead lock + */ + complete_all(&mpd->resume_completion); + + mutex_lock(&mpd->lock); + /* calculate delay time to queue esd work again */ + if (mpd->esd_interval && mpd->enabled) { + int ms; + struct timespec ts; + read_persistent_clock(&ts); + ts = timespec_sub(ts, mpd->esd_start_time); + ms = ts.tv_sec * MSEC_PER_SEC + + ts.tv_nsec / NSEC_PER_MSEC; + if (ms >= mpd->esd_interval) + ms = 0; + else + ms = mpd->esd_interval - ms; + queue_delayed_work(mpd->workqueue, &mpd->esd_work, + msecs_to_jiffies(ms)); + } + mutex_unlock(&mpd->lock); + + return 0; +} +#endif /* CONFIG_HAS_AMBIENTMODE */ + +static void minnow_panel_framedone_cb(int err, void *data) +{ + struct omap_dss_device *dssdev = data; + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + dev_dbg(&dssdev->dev, "framedone, err %d\n", err); + mpd->total_update++; +#ifdef PANEL_PERF_TIME +{ + unsigned long ms = GET_ELAPSE_TIME(mpd->last_update); + if (ms < mpd->time_update_min) + mpd->time_update_min = ms; + if (ms > mpd->time_update_max) + mpd->time_update_max = ms; + mpd->time_update += ms; +} +#endif + dsi_bus_unlock(dssdev); + wake_unlock(&mpd->update_wake_lock); +} + +static irqreturn_t minnow_panel_te_isr(int irq, void *data) +{ + struct omap_dss_device *dssdev = data; + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int old; + int r; + + old = atomic_cmpxchg(&mpd->do_update, 1, 0); + + if (old) { + cancel_delayed_work(&mpd->te_timeout_work); + r = omap_dsi_update(dssdev, mpd->channel, minnow_panel_framedone_cb, + dssdev); + if (r) + goto err; + } + + return IRQ_HANDLED; +err: + dev_err(&dssdev->dev, "start update failed\n"); + dsi_bus_unlock(dssdev); + return IRQ_HANDLED; +} + +static void minnow_panel_te_timeout_work_callback(struct work_struct *work) +{ + DECLARE_MPD_FROM_CONTAINER(work, te_timeout_work.work); + struct omap_dss_device *dssdev = mpd->dssdev; + + dev_err(&dssdev->dev, "TE not received for 250ms!\n"); + + atomic_set(&mpd->do_update, 0); + dsi_bus_unlock(dssdev); +} + +static int minnow_panel_enable(struct omap_dss_device *dssdev) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int r; + + mutex_lock(&mpd->lock); +#ifdef CONFIG_WAKEUP_SOURCE_NOTIFY + if (mpd->early_inited) { + /* clean early init timeout as someone handle it also */ + mpd->early_inited = false; + cancel_delayed_work(&mpd->early_init_timeout_work); + } +#endif + r = minnow_panel_change_state_mlocked(mpd, DISPLAY_ENABLE); + mutex_unlock(&mpd->lock); + return r; +} + +static void minnow_panel_disable(struct omap_dss_device *dssdev) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + + mutex_lock(&mpd->lock); + minnow_panel_change_state_mlocked(mpd, DISPLAY_DISABLE); + mutex_unlock(&mpd->lock); +} + +static int minnow_panel_update(struct omap_dss_device *dssdev, + u16 x, u16 y, u16 w, u16 h) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int r = 0; + + /* if driver is disabled ot it's video mode, do not manual update */ + mutex_lock(&mpd->lock); + wake_lock(&mpd->wake_lock); + if (mpd->enabled && (mpd->dsi_config.mode!=OMAP_DSS_DSI_VIDEO_MODE)) { + int recovered = 0; + dev_dbg(&dssdev->dev, "update %d, %d, %d-%d\n", x, y, w, h); + minnow_panel_sync_resume_mlocked(mpd); + + dsi_bus_lock(dssdev); + r = minnow_panel_wake_up_locked(mpd); + if (!r) + goto _update_; + _recovery_: + /* try recovery panel if it can't wake up */ + r = minnow_panel_recovery_locked(mpd); + if (r) + goto _dsi_unlock_; + _update_: + r = minnow_panel_update_locked(mpd); + /* no dsi_bus_unlock when update start successfully */ + if (!r) + goto _mutex_unlock_; + /* try if it need recovery once */ + if (recovered++) + goto _dsi_unlock_; + /* be safety, check the panel status to make sure it's stuck now */ + r = minnow_panel_check_panel_status(mpd); + if (r) + goto _recovery_; + _dsi_unlock_: + dev_err(&dssdev->dev, "update %d, %d, %d-%d failed(%d)\n", x, y, w, h, r); + dsi_bus_unlock(dssdev); + } +_mutex_unlock_: + wake_unlock(&mpd->wake_lock); + mutex_unlock(&mpd->lock); + return r; +} + +static int minnow_panel_sync(struct omap_dss_device *dssdev) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + + dev_dbg(&dssdev->dev, "sync\n"); + + mutex_lock(&mpd->lock); + dsi_bus_lock(dssdev); + dsi_bus_unlock(dssdev); + mutex_unlock(&mpd->lock); + + dev_dbg(&dssdev->dev, "sync done\n"); + + return 0; +} + +static int _minnow_panel_enable_te(struct minnow_panel_data *mpd, bool enable) +{ + int r; + + if (enable) + r = minnow_panel_dcs_write_1(mpd, MIPI_DCS_SET_TEAR_ON, 0); + else + r = minnow_panel_dcs_write_0(mpd, MIPI_DCS_SET_TEAR_OFF); + + if (!gpio_is_valid(mpd->ext_te_gpio)) + omapdss_dsi_enable_te(mpd->dssdev, enable); + + return r; +} + +static int minnow_panel_enable_te(struct omap_dss_device *dssdev, bool enable) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int r; + + mutex_lock(&mpd->lock); + + if (mpd->te_enabled == enable) + goto end; + + dsi_bus_lock(dssdev); + + if (mpd->enabled) { + r = minnow_panel_wake_up_locked(mpd); + if (r) + goto err; + + r = _minnow_panel_enable_te(mpd, enable); + if (r) + goto err; + } + + mpd->te_enabled = enable; + + dsi_bus_unlock(dssdev); +end: + mutex_unlock(&mpd->lock); + + return 0; +err: + dsi_bus_unlock(dssdev); + mutex_unlock(&mpd->lock); + + return r; +} + +static int minnow_panel_get_te(struct omap_dss_device *dssdev) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + int r; + + mutex_lock(&mpd->lock); + r = mpd->te_enabled; + mutex_unlock(&mpd->lock); + + return r; +} + +#ifdef PANEL_DEBUG +static int minnow_panel_run_test(struct omap_dss_device *dssdev, int test_num) +{ + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + u8 id1, id2, id3; + int r; + + mutex_lock(&mpd->lock); + + if (!mpd->enabled) { + r = -ENODEV; + goto err1; + } + + dsi_bus_lock(dssdev); + + r = minnow_panel_wake_up_locked(mpd); + if (r) + goto err2; + + r = minnow_panel_dcs_read_1(mpd, DCS_GET_ID1, &id1); + if (r) + goto err2; + r = minnow_panel_dcs_read_1(mpd, DCS_GET_ID2, &id2); + if (r) + goto err2; + r = minnow_panel_dcs_read_1(mpd, DCS_GET_ID3, &id3); + if (r) + goto err2; + + dsi_bus_unlock(dssdev); + mutex_unlock(&mpd->lock); + return 0; +err2: + dsi_bus_unlock(dssdev); +err1: + mutex_unlock(&mpd->lock); + return r; +} + +static int minnow_panel_memory_read(struct omap_dss_device *dssdev, + void *buf, size_t size, u16 x, u16 y, u16 w, u16 h) +{ + int r; + int plen; + u32 buf_used = 0; + struct minnow_panel_data *mpd = dev_get_drvdata(&dssdev->dev); + u8 dcs_cmd = MIPI_DCS_READ_MEMORY_START; + + plen = dsi_get_pixel_size(mpd->dssdev->panel.dsi_pix_fmt); + plen = DIV_ROUND_UP(plen, 8); + if (size < w * h * plen) + return -ENOMEM; + + mutex_lock(&mpd->lock); + + if (!mpd->enabled) { + r = -ENODEV; + goto err1; + } + + size = min(w * h * plen, + dssdev->panel.timings.x_res * + dssdev->panel.timings.y_res * plen); + + dsi_bus_lock(dssdev); + + r = minnow_panel_wake_up_locked(mpd); + if (r) + goto err2; + + minnow_panel_set_update_window(mpd, x, y, w, h); + + /* each read 8 pixel as SSD2848 has minimum read pixels */ + plen *= 8; + r = dsi_vc_set_max_rx_packet_size(dssdev, mpd->channel, plen); + if (r) + goto err2; + + size = size / plen * plen; + for (; buf_used < size; dcs_cmd = MIPI_DCS_READ_MEMORY_CONTINUE) { + r = dsi_vc_dcs_read(dssdev, mpd->channel, dcs_cmd, + buf + buf_used, plen); + if (r) { + dev_err(&dssdev->dev, + "read failed at %u err=%d\n", buf_used, r); + goto err3; + } + buf_used += plen; + if (signal_pending(current)) { + dev_err(&dssdev->dev, "signal pending, " + "aborting memory read\n"); + r = -ERESTARTSYS; + goto err3; + } + } + r = buf_used; + +err3: + dsi_vc_set_max_rx_packet_size(dssdev, mpd->channel, 1); +err2: + dsi_bus_unlock(dssdev); +err1: + mutex_unlock(&mpd->lock); + return r; +} +#endif + +static void minnow_panel_ulps_work(struct work_struct *work) +{ + DECLARE_MPD_FROM_CONTAINER(work, ulps_work.work); + struct omap_dss_device *dssdev = mpd->dssdev; + + mutex_lock(&mpd->lock); + + if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !mpd->enabled) { + mutex_unlock(&mpd->lock); + return; + } + + dsi_bus_lock(dssdev); + minnow_panel_enter_ulps_locked(mpd); + dsi_bus_unlock(dssdev); + + mutex_unlock(&mpd->lock); +} + +static void minnow_panel_esd_work(struct work_struct *work) +{ + DECLARE_MPD_FROM_CONTAINER(work, esd_work.work); + struct omap_dss_device *dssdev = mpd->dssdev; + int r; + + mutex_lock(&mpd->lock); + + if (!mpd->enabled) { + mutex_unlock(&mpd->lock); + return; + } + + wake_lock(&mpd->wake_lock); + minnow_panel_sync_resume_mlocked(mpd); + + dsi_bus_lock(dssdev); + + r = minnow_panel_wake_up_locked(mpd); + if (r) { + dev_err(&dssdev->dev, "failed to exit ULPS\n"); + goto _reset_; + } + + omapdss_dsi_vc_enable_hs(dssdev, mpd->channel, false); + r = minnow_panel_check_panel_status(mpd); + if (r) { + if (IS_ERR_UNEXPECT_MODE_00(r)) { + /* workaround to assume panel is good till it gets + * power state 00 for 3 continue times + */ + if (++mpd->esd_errors < 3) + goto _next_; + } + dev_err(&dssdev->dev, "failed to read minnow-panel status\n"); + goto _reset_; + } + mpd->esd_errors = 0; + +_next_: + omapdss_dsi_vc_enable_hs(dssdev, mpd->channel, true); + if (!mpd->interactive) + minnow_panel_enter_ulps_locked(mpd); + else + minnow_panel_queue_ulps_work(mpd); + + dsi_bus_unlock(dssdev); + minnow_panel_queue_esd_work(mpd); + wake_unlock(&mpd->wake_lock); + mutex_unlock(&mpd->lock); + return; + +_reset_: + r = minnow_panel_recovery_locked(mpd); + if (!r) { + r = minnow_panel_update_locked(mpd); + /* it will release dsi_bus_unlock in frame done callback when + * update start successfully + */ + if (!r) + goto _munlock_; + if (!mpd->interactive) + minnow_panel_enter_ulps_locked(mpd); + else + minnow_panel_queue_ulps_work(mpd); + } + dsi_bus_unlock(dssdev); +_munlock_: + wake_unlock(&mpd->wake_lock); + mutex_unlock(&mpd->lock); +} + +static struct omap_dss_driver minnow_panel_driver = { + .probe = minnow_panel_probe, + .remove = __exit_p(minnow_panel_remove), + + .enable = minnow_panel_enable, + .disable = minnow_panel_disable, +#ifdef CONFIG_HAS_AMBIENTMODE + .suspend = minnow_panel_suspend, + .resume = minnow_panel_resume, +#endif + + .update = minnow_panel_update, + .sync = minnow_panel_sync, + + .get_resolution = minnow_panel_get_resolution, + .get_recommended_bpp = omapdss_default_get_recommended_bpp, + + .enable_te = minnow_panel_enable_te, + .get_te = minnow_panel_get_te, + +#ifdef PANEL_DEBUG + .run_test = minnow_panel_run_test, + .memory_read = minnow_panel_memory_read, +#endif + + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init minnow_panel_init(void) +{ + omap_dss_register_driver(&minnow_panel_driver); + + return 0; +} + +static void __exit minnow_panel_exit(void) +{ + omap_dss_unregister_driver(&minnow_panel_driver); +} + +module_init(minnow_panel_init); +module_exit(minnow_panel_exit); + +MODULE_DESCRIPTION("Minnow Panel DSI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig index cb0f145c707..2bf82e2e15a 100644 --- a/drivers/video/omap2/dss/Kconfig +++ b/drivers/video/omap2/dss/Kconfig @@ -116,4 +116,8 @@ config OMAP2_DSS_SLEEP_AFTER_VENC_RESET This option enables the sleep, and is enabled by default. You can disable the sleep if it doesn't cause problems on your platform. +config OMAP2_DSS_RESET + bool "Reset OMAP Display Subsystem" + default n + endif diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index c9c2252e371..e08d71fef88 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c @@ -53,6 +53,11 @@ static char *def_disp_name; module_param_named(def_disp, def_disp_name, charp, 0); MODULE_PARM_DESC(def_disp, "default display name"); +#ifdef DEBUG +bool dss_debug; +module_param_named(debug, dss_debug, bool, 0644); +#endif + static bool dss_initialized; const char *omapdss_get_default_display_name(void) diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c index 0aa8ad8f966..1a45e4e77ba 100644 --- a/drivers/video/omap2/dss/display.c +++ b/drivers/video/omap2/dss/display.c @@ -80,6 +80,10 @@ static int dss_suspend_device(struct device *dev, void *data) { struct omap_dss_device *dssdev = to_dss_device(dev); +#if defined(CONFIG_HAS_AMBIENTMODE) + if (dssdev->driver->suspend) + return dssdev->driver->suspend(dssdev); +#else if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { dssdev->activate_after_resume = false; return 0; @@ -88,6 +92,7 @@ static int dss_suspend_device(struct device *dev, void *data) dssdev->driver->disable(dssdev); dssdev->activate_after_resume = true; +#endif return 0; } @@ -109,16 +114,20 @@ int dss_suspend_all_devices(void) static int dss_resume_device(struct device *dev, void *data) { - int r; struct omap_dss_device *dssdev = to_dss_device(dev); +#if defined(CONFIG_HAS_AMBIENTMODE) + if (dssdev->driver->resume) + return dssdev->driver->resume(dssdev); +#else if (dssdev->activate_after_resume) { - r = dssdev->driver->enable(dssdev); + int r = dssdev->driver->enable(dssdev); if (r) return r; } dssdev->activate_after_resume = false; +#endif return 0; } diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index a73dedc3310..1e1da1bc88e 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -46,6 +46,7 @@ #include "dss_features.h" #define DSI_CATCH_MISSING_TE +#define DSI_DISABLE_LP_RX_TO struct dsi_reg { u16 idx; }; @@ -338,6 +339,8 @@ struct dsi_data { #endif int debug_read; int debug_write; + atomic_t update_pending; + atomic_t runtime_active; #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS spinlock_t irq_stats_lock; @@ -1097,9 +1100,14 @@ int dsi_runtime_get(struct platform_device *dsidev) struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); DSSDBG("dsi_runtime_get\n"); - - r = pm_runtime_get_sync(&dsi->pdev->dev); - WARN_ON(r < 0); + r = atomic_xchg(&dsi->runtime_active, 1); + if (!r) { + r = pm_runtime_get_sync(&dsi->pdev->dev); + if (r < 0) { + atomic_set(&dsi->runtime_active, 0); + WARN(1, "dsi_runtime_get ret = %d\n", r); + } + } return r < 0 ? r : 0; } @@ -1109,9 +1117,11 @@ void dsi_runtime_put(struct platform_device *dsidev) int r; DSSDBG("dsi_runtime_put\n"); - - r = pm_runtime_put_sync(&dsi->pdev->dev); - WARN_ON(r < 0 && r != -ENOSYS); + r = atomic_xchg(&dsi->runtime_active, 0); + if (r) { + r = pm_runtime_put_sync(&dsi->pdev->dev); + WARN(r < 0 && r != -ENOSYS, "dsi_runtime_put ret = %d\n", r); + } } /* source clock for DSI PLL. this could also be PCLKFREE */ @@ -2523,21 +2533,23 @@ static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel) /* Wait for completion only if TE_EN/TE_START is still set */ if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) { - if (wait_for_completion_timeout(&completion, + u32 ts_last = REG_GET(dsidev, DSI_VC_TE(channel), 23, 0); + while (wait_for_completion_timeout(&completion, msecs_to_jiffies(10)) == 0) { - DSSERR("Failed to complete previous frame transfer\n"); - r = -EIO; - goto err1; + u32 ts = REG_GET(dsidev, DSI_VC_TE(channel), 23, 0); + if (!ts) + break; + if (ts == ts_last) { + DSSERR("Failed to complete previous frame transfer\n"); + r = -EIO; + break; + } + ts_last = ts; } } dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, &vp_data, DSI_VC_IRQ_PACKET_SENT); - - return 0; -err1: - dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, - &vp_data, DSI_VC_IRQ_PACKET_SENT); err0: return r; } @@ -2829,6 +2841,8 @@ int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) int r = 0; u32 err; + dsi_sync_vc(dsidev, channel); + r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler, &completion, DSI_VC_IRQ_BTA); if (r) @@ -2844,7 +2858,7 @@ int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) goto err2; if (wait_for_completion_timeout(&completion, - msecs_to_jiffies(500)) == 0) { + msecs_to_jiffies(50)) == 0) { DSSERR("Failed to receive BTA\n"); r = -EIO; goto err2; @@ -3523,7 +3537,11 @@ static void dsi_set_lp_rx_timeout(struct platform_device *dsidev, fck = dsi_fclk_rate(dsidev); r = dsi_read_reg(dsidev, DSI_TIMING2); +#ifdef DSI_DISABLE_LP_RX_TO + r = FLD_MOD(r, 0, 15, 15); /* LP_RX_TO */ +#else r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ +#endif r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */ r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */ r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ @@ -4230,6 +4248,9 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev) DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h); + WARN_ON(atomic_read(&dsi->update_pending) != 0); + atomic_inc(&dsi->update_pending); + dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP); bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8; @@ -4272,18 +4293,16 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev) dsi_perf_mark_start(dsidev); - r = schedule_delayed_work(&dsi->framedone_timeout_work, - msecs_to_jiffies(250)); - BUG_ON(r == 0); - dss_mgr_set_timings(mgr, &dsi->timings); dss_mgr_start_update(mgr); if (dsi->te_enabled) { +#ifndef DSI_DISABLE_LP_RX_TO /* disable LP_RX_TO, so that we can receive TE. Time to wait * for TE is longer than the timer allows */ REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ +#endif dsi_vc_send_bta(dsidev, channel); @@ -4291,6 +4310,19 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev) mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250)); #endif } + /* When kernel is busy, it will be delayed between update and timer. + * So it moves start timer after update started to avoid wrong time out + * of frame updating, but it still occurs another problem as the frame + * event might be called before start timer called, either it will miss + * cacel_delayed_work in frame done isr, or get wrong time out if there + * is not follow updates. To fix it, call cancel_delayed_work first. + */ + if (atomic_read(&dsi->update_pending) > 0) { + cancel_delayed_work(&dsi->framedone_timeout_work); + r = schedule_delayed_work(&dsi->framedone_timeout_work, + msecs_to_jiffies(250)); + WARN_ON(r == 0); + } } #ifdef DSI_CATCH_MISSING_TE @@ -4304,12 +4336,20 @@ static void dsi_handle_framedone(struct platform_device *dsidev, int error) { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + if (atomic_dec_return(&dsi->update_pending) < 0) { + atomic_inc(&dsi->update_pending); + DSSERR("Unmatched Frame Done!\n"); + return; + } + /* SIDLEMODE back to smart-idle */ dispc_enable_sidle(); if (dsi->te_enabled) { +#ifndef DSI_DISABLE_LP_RX_TO /* enable LP_RX_TO again after the TE */ REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ +#endif } dsi->framedone_callback(error, dsi->framedone_data); @@ -4569,6 +4609,25 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev, dsi_pll_uninit(dsidev, disconnect_lanes); } +static int dsi_soft_reset(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int i = 10; + + dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); + /* enable DSI soft reset */ + REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1); + /* waiting for DSI soft reset done*/ + while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) { + if (!i--) { + DSSERR("DSI soft reset failed!\n"); + return -ENODEV; + } + udelay(1); + } + return 0; +} + int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); @@ -4593,6 +4652,8 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) dsi_enable_pll_clock(dsidev, 1); + dsi_soft_reset(dsidev); + _dsi_initialize_irq(dsidev); r = dsi_display_init_dsi(dsidev); @@ -4626,19 +4687,30 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev, WARN_ON(!dsi_bus_is_locked(dsidev)); mutex_lock(&dsi->lock); + /*do nothing if it did not enabled */ + if (!atomic_read(&dsi->runtime_active)) + goto exit; - dsi_sync_vc(dsidev, 0); - dsi_sync_vc(dsidev, 1); - dsi_sync_vc(dsidev, 2); - dsi_sync_vc(dsidev, 3); - - dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps); + /* special fast rest DSI for disconnect_lanes/enter_ulps all true + * it will save 150 ms + */ + if (disconnect_lanes && enter_ulps) { + dsi_soft_reset(dsidev); + dsi_cio_uninit(dsidev); + dsi_pll_uninit(dsidev, true); + } else { + dsi_sync_vc(dsidev, 0); + dsi_sync_vc(dsidev, 1); + dsi_sync_vc(dsidev, 2); + dsi_sync_vc(dsidev, 3); + dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps); + } dsi_runtime_put(dsidev); dsi_enable_pll_clock(dsidev, 0); omap_dss_stop_device(dssdev); - +exit: mutex_unlock(&dsi->lock); } EXPORT_SYMBOL(omapdss_dsi_display_disable); @@ -5521,13 +5593,6 @@ static int omap_dsihw_probe(struct platform_device *dsidev) return -ENODEV; } - r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler, - IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev); - if (r < 0) { - DSSERR("request_irq failed\n"); - return r; - } - /* DSI VCs initialization */ for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) { dsi->vc[i].source = DSI_VC_SOURCE_L4; @@ -5547,6 +5612,13 @@ static int omap_dsihw_probe(struct platform_device *dsidev) if (r) goto err_runtime_get; + r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler, + IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev); + if (r < 0) { + DSSERR("request_irq failed\n"); + goto err_runtime_get; + } + rev = dsi_read_reg(dsidev, DSI_REVISION); dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index 84758936429..9e577d1dfa4 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h @@ -35,8 +35,14 @@ #define pr_fmt(fmt) fmt #endif +#ifdef DEBUG +extern bool dss_debug; #define DSSDBG(format, ...) \ - pr_debug(format, ## __VA_ARGS__) + if (dss_debug) \ + pr_debug(format, ## __VA_ARGS__) +#else +#define DSSDBG(format, ...) +#endif #ifdef DSS_SUBSYS_NAME #define DSSERR(format, ...) \ diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c index d30b45d7264..c9879f3a204 100644 --- a/drivers/video/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c @@ -303,6 +303,18 @@ static int omapfb_update_window(struct fb_info *fbi, return display->driver->update(display, x, y, w, h); } +static int omapfb_update_display(struct fb_info *fbi) +{ + struct omap_dss_device *display = fb2display(fbi); + u16 dw, dh; + + if (!display) + return 0; + + display->driver->get_resolution(display, &dw, &dh); + return display->driver->update(display, 0, 0, dw, dh); +} + int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode) { @@ -604,6 +616,10 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) int r = 0; switch (cmd) { + case FBIO_UPDATE_DISPLAY: + DBG("ioctl FBIO_UPDATE_DISPLAY\n"); + r = omapfb_update_display(fbi); + break; case OMAPFB_SYNC_GFX: DBG("ioctl SYNC_GFX\n"); if (!display || !display->driver->sync) { diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 856917b3361..18a9429d686 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -29,6 +29,7 @@ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/omapfb.h> +#include <linux/uaccess.h> #include <video/omapdss.h> #include <video/omapvrfb.h> @@ -392,7 +393,7 @@ static int check_fb_res_bounds(struct fb_var_screeninfo *var) var->xres_virtual = var->xres; if (var->yres_virtual == 0) - var->yres_virtual = var->yres; + var->yres_virtual = 2 * var->yres; if (var->xres_virtual < xres_min || var->yres_virtual < yres_min) return -EINVAL; @@ -906,6 +907,8 @@ int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl, mirror = ofbi->mirror; info.paddr = data_start_p; + info.p_uv_addr = (u32)omapfb_get_region_vaddr(ofbi) + + (data_start_p - (u32)omapfb_get_region_paddr(ofbi)); info.screen_width = screen_width; info.width = xres; info.height = yres; @@ -1048,27 +1051,34 @@ static int omapfb_set_par(struct fb_info *fbi) static int omapfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi) { + struct omap_dss_device *display = fb2display(fbi); struct omapfb_info *ofbi = FB2OFB(fbi); struct fb_var_screeninfo new_var; int r; DBG("pan_display(%d)\n", FB2OFB(fbi)->id); - if (var->xoffset == fbi->var.xoffset && - var->yoffset == fbi->var.yoffset) - return 0; + if (var->xoffset != fbi->var.xoffset || + var->yoffset != fbi->var.yoffset) { - new_var = fbi->var; - new_var.xoffset = var->xoffset; - new_var.yoffset = var->yoffset; + new_var = fbi->var; + new_var.xoffset = var->xoffset; + new_var.yoffset = var->yoffset; - fbi->var = new_var; + fbi->var = new_var; - omapfb_get_mem_region(ofbi->region); + omapfb_get_mem_region(ofbi->region); - r = omapfb_apply_changes(fbi, 0); + r = omapfb_apply_changes(fbi, 0); - omapfb_put_mem_region(ofbi->region); + omapfb_put_mem_region(ofbi->region); + } + + if (display && display->driver->update && display->driver->sync) { + DBG("sync_update(%d, %d)\n", var->xres, var->yres); + display->driver->sync(display); + display->driver->update(display, 0, 0, var->xres, var->yres); + } return r; } @@ -1241,8 +1251,10 @@ static int omapfb_blank(int blank, struct fb_info *fbi) switch (blank) { case FB_BLANK_UNBLANK: +#if !defined(CONFIG_WAKEUP_SOURCE_NOTIFY) && !defined(CONFIG_HAS_AMBIENTMODE) if (display->state == OMAP_DSS_DISPLAY_ACTIVE) goto exit; +#endif r = display->driver->enable(display); @@ -1259,8 +1271,10 @@ static int omapfb_blank(int blank, struct fb_info *fbi) case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_POWERDOWN: +#if !defined(CONFIG_WAKEUP_SOURCE_NOTIFY) && !defined(CONFIG_HAS_AMBIENTMODE) if (display->state != OMAP_DSS_DISPLAY_ACTIVE) goto exit; +#endif if (d->auto_update_work_enabled) omapfb_stop_auto_update(fbdev, display); @@ -1273,7 +1287,9 @@ static int omapfb_blank(int blank, struct fb_info *fbi) r = -EINVAL; } +#if !defined(CONFIG_WAKEUP_SOURCE_NOTIFY) && !defined(CONFIG_HAS_AMBIENTMODE) exit: +#endif omapfb_unlock(fbdev); return r; @@ -1290,6 +1306,97 @@ ssize_t omapfb_write(struct fb_info *info, const char __user *buf, } #endif +ssize_t omapfb_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct omapfb_info *ofbi = FB2OFB(info); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omap_dss_device *display = fb2display(info); + struct omapfb_display_data *d; + struct fb_var_screeninfo *var = &info->var; + unsigned long p = *ppos; + u8 *buffer, *src; + u8 __iomem *dst; + int c, cnt, err = 0; + unsigned long total_size; + + DBG("omapfb_write %d, %lu\n", count, (unsigned long)*ppos); + + total_size = var->xres * var->yres * (var->bits_per_pixel>>3); + if (p > total_size) + return -EFBIG; + if (count > total_size) { + err = -EFBIG; + count = total_size; + } + if (count + p > total_size) { + if (!err) + err = -ENOSPC; + + count = total_size - p; + } + + DBG("omapfb_write %d, %lu, %lu\n", count, p, total_size); + + buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, + GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + dst = (u8 __iomem *)(info->screen_base + p); + c = var->bits_per_pixel >> 3; + cnt = var->xres_virtual * c; + dst += var->yoffset*cnt + var->xoffset*c; + DBG("omapfb_write offset = %d, scr_size = %lu\n", + (int)(dst) - (int)(info->screen_base + p), info->screen_size); + + if (info->fbops->fb_sync) + info->fbops->fb_sync(info); + + for (cnt = 0; count; ) { + c = (count > PAGE_SIZE) ? PAGE_SIZE : count; + src = buffer; + + if (copy_from_user(src, buf, c)) { + err = -EFAULT; + break; + } + + fb_memcpy_tofb(dst, src, c); + dst += c; + src += c; + *ppos += c; + buf += c; + cnt += c; + count -= c; + } + + kfree(buffer); + + if (total_size <= *ppos && display && + display->driver->update && display->driver->sync) { + DBG("fb_var_screen info:\n"); + DBG("\txres = %u\n", var->xres); + DBG("\tyres = %u\n", var->yres); + DBG("\txres_virtual = %u\n", var->xres_virtual); + DBG("\tyres_virtual = %u\n", var->yres_virtual); + DBG("\txoffset = %u\n", var->xoffset); + DBG("\tyoffset = %u\n", var->yoffset); + DBG("\tbits_per_pixel = %u\n", var->bits_per_pixel); + omapfb_lock(fbdev); + d = get_display_data(fbdev, display); + if (d->update_mode == OMAPFB_MANUAL_UPDATE) { + DBG("sync_update(%d, %d)\n", var->xres, var->yres); + display->driver->sync(display); + display->driver->update(display, + 0, 0, var->xres, var->yres); + } + omapfb_unlock(fbdev); + } + + return (cnt) ? cnt : err; +} + static struct fb_ops omapfb_ops = { .owner = THIS_MODULE, .fb_open = omapfb_open, @@ -1305,7 +1412,7 @@ static struct fb_ops omapfb_ops = { .fb_mmap = omapfb_mmap, .fb_setcolreg = omapfb_setcolreg, .fb_setcmap = omapfb_setcmap, - /*.fb_write = omapfb_write,*/ + .fb_write = omapfb_write, }; static void omapfb_free_fbmem(struct fb_info *fbi) @@ -1464,7 +1571,7 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size, DBG("adjusting fb mem size for VRFB, %u -> %lu\n", w * h * bytespp, size); } else { - size = w * h * bytespp; + size = 2 * w * h * bytespp; } } @@ -1775,7 +1882,7 @@ static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi) } var->xres_virtual = var->xres; - var->yres_virtual = var->yres; + var->yres_virtual = 2 * var->yres; if (!var->bits_per_pixel) { switch (omapfb_get_recommended_bpp(fbdev, display)) { @@ -1796,7 +1903,7 @@ static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi) var->xres = 320; var->yres = 240; var->xres_virtual = var->xres; - var->yres_virtual = var->yres; + var->yres_virtual = 2 * var->yres; if (!var->bits_per_pixel) var->bits_per_pixel = 16; } @@ -2326,8 +2433,6 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, d->fbdev = fbdev; if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { - u16 w, h; - if (auto_update) { omapfb_start_auto_update(fbdev, dssdev); d->update_mode = OMAPFB_AUTO_UPDATE; @@ -2342,14 +2447,6 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, return r; } } - - dssdrv->get_resolution(dssdev, &w, &h); - r = dssdrv->update(dssdev, 0, 0, w, h); - if (r) { - dev_err(fbdev->dev, - "Failed to update display\n"); - return r; - } } else { d->update_mode = OMAPFB_AUTO_UPDATE; } |