summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/imx/dpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/imx/dpu')
-rw-r--r--drivers/gpu/drm/imx/dpu/Kconfig6
-rw-r--r--drivers/gpu/drm/imx/dpu/Makefile8
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-blit.c323
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-blit.h18
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-crc.c385
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-crc.h75
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-crtc.c1451
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-crtc.h115
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-kms.c728
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-kms.h20
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-plane.c1024
-rw-r--r--drivers/gpu/drm/imx/dpu/dpu-plane.h210
12 files changed, 4363 insertions, 0 deletions
diff --git a/drivers/gpu/drm/imx/dpu/Kconfig b/drivers/gpu/drm/imx/dpu/Kconfig
new file mode 100644
index 000000000000..ad480cdbe503
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/Kconfig
@@ -0,0 +1,6 @@
+config DRM_IMX_DPU
+ tristate "Freescale i.MX DPU DRM support"
+ depends on DRM_IMX
+ depends on IMX_DPU_CORE
+ default y if DRM_IMX=y
+ default m if DRM_IMX=m
diff --git a/drivers/gpu/drm/imx/dpu/Makefile b/drivers/gpu/drm/imx/dpu/Makefile
new file mode 100644
index 000000000000..22ff7e916b6f
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -I $(srctree)/$(src)/../
+
+imx-dpu-crtc-objs := dpu-crtc.o dpu-kms.o dpu-plane.o
+imx-dpu-crtc-$(CONFIG_DEBUG_FS) := dpu-crc.o
+obj-$(CONFIG_DRM_IMX_DPU) += imx-dpu-crtc.o
+
+imx-dpu-render-objs := dpu-blit.o
+obj-$(CONFIG_DRM_IMX_DPU) += imx-dpu-render.o
diff --git a/drivers/gpu/drm/imx/dpu/dpu-blit.c b/drivers/gpu/drm/imx/dpu/dpu-blit.c
new file mode 100644
index 000000000000..8573adbdcea0
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-blit.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2017,2021 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <drm/drm_vblank.h>
+#include <drm/drm_print.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/imx_drm.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <video/dpu.h>
+
+#include "imx-drm.h"
+
+struct imx_drm_dpu_bliteng {
+ struct dpu_bliteng *dpu_be;
+ struct list_head list;
+};
+
+static DEFINE_MUTEX(imx_drm_dpu_bliteng_lock);
+static LIST_HEAD(imx_drm_dpu_bliteng_list);
+
+static int imx_dpu_num;
+
+int dpu_be_get(struct dpu_bliteng *dpu_be);
+void dpu_be_put(struct dpu_bliteng *dpu_be);
+s32 dpu_bliteng_get_id(struct dpu_bliteng *dpu_be);
+void dpu_be_configure_prefetch(struct dpu_bliteng *dpu_be,
+ u32 width, u32 height,
+ u32 x_offset, u32 y_offset,
+ u32 stride, u32 format, u64 modifier,
+ u64 baddr, u64 uv_addr);
+u32 *dpu_bliteng_get_cmd_list(struct dpu_bliteng *dpu_be);
+void dpu_be_wait(struct dpu_bliteng *dpu_be);
+int dpu_bliteng_get_empty_instance(struct dpu_bliteng **dpu_be,
+ struct device *dev);
+void dpu_bliteng_set_id(struct dpu_bliteng *dpu_be, int id);
+void dpu_bliteng_set_dev(struct dpu_bliteng *dpu_be, struct device *dev);
+int dpu_bliteng_init(struct dpu_bliteng *dpu_bliteng);
+void dpu_bliteng_fini(struct dpu_bliteng *dpu_bliteng);
+int dpu_be_blit(struct dpu_bliteng *dpu_be,
+ u32 *cmdlist, u32 cmdnum);
+
+static struct imx_drm_dpu_bliteng *imx_drm_dpu_bliteng_find_by_id(s32 id)
+{
+ struct imx_drm_dpu_bliteng *bliteng;
+
+ mutex_lock(&imx_drm_dpu_bliteng_lock);
+
+ list_for_each_entry(bliteng, &imx_drm_dpu_bliteng_list, list) {
+ if (id == dpu_bliteng_get_id(bliteng->dpu_be)) {
+ mutex_unlock(&imx_drm_dpu_bliteng_lock);
+ return bliteng;
+ }
+ }
+
+ mutex_unlock(&imx_drm_dpu_bliteng_lock);
+
+ return NULL;
+}
+
+static int imx_drm_dpu_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_imx_dpu_set_cmdlist *req;
+ struct imx_drm_dpu_bliteng *bliteng;
+ struct dpu_bliteng *dpu_be;
+ u32 cmd_nr, *cmd, *cmd_list;
+ void *user_data;
+ s32 id = 0;
+ struct drm_imx_dpu_frame_info frame_info;
+ int ret;
+
+ req = data;
+ user_data = (void *)(unsigned long)req->user_data;
+ if (copy_from_user(&id, (void __user *)user_data,
+ sizeof(id))) {
+ return -EFAULT;
+ }
+
+ if (id != 0 && id != 1)
+ return -EINVAL;
+
+ user_data += sizeof(id);
+ if (copy_from_user(&frame_info, (void __user *)user_data,
+ sizeof(frame_info))) {
+ return -EFAULT;
+ }
+
+ bliteng = imx_drm_dpu_bliteng_find_by_id(id);
+ if (!bliteng) {
+ DRM_ERROR("Failed to get dpu_bliteng\n");
+ return -ENODEV;
+ }
+
+ dpu_be = bliteng->dpu_be;
+
+ ret = dpu_be_get(dpu_be);
+
+ cmd_nr = req->cmd_nr;
+ cmd = (u32 *)(unsigned long)req->cmd;
+ cmd_list = dpu_bliteng_get_cmd_list(dpu_be);
+
+ if (copy_from_user(cmd_list, (void __user *)cmd,
+ sizeof(*cmd) * cmd_nr)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ dpu_be_configure_prefetch(dpu_be, frame_info.width, frame_info.height,
+ frame_info.x_offset, frame_info.y_offset,
+ frame_info.stride, frame_info.format,
+ frame_info.modifier, frame_info.baddr,
+ frame_info.uv_addr);
+
+ ret = dpu_be_blit(dpu_be, cmd_list, cmd_nr);
+
+err:
+ dpu_be_put(dpu_be);
+
+ return ret;
+}
+
+static int imx_drm_dpu_wait_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_imx_dpu_wait *wait;
+ struct imx_drm_dpu_bliteng *bliteng;
+ struct dpu_bliteng *dpu_be;
+ void *user_data;
+ s32 id = 0;
+ int ret;
+
+ wait = data;
+ user_data = (void *)(unsigned long)wait->user_data;
+ if (copy_from_user(&id, (void __user *)user_data,
+ sizeof(id))) {
+ return -EFAULT;
+ }
+
+ if (id != 0 && id != 1)
+ return -EINVAL;
+
+ bliteng = imx_drm_dpu_bliteng_find_by_id(id);
+ if (!bliteng) {
+ DRM_ERROR("Failed to get dpu_bliteng\n");
+ return -ENODEV;
+ }
+
+ dpu_be = bliteng->dpu_be;
+
+ ret = dpu_be_get(dpu_be);
+
+ dpu_be_wait(dpu_be);
+
+ dpu_be_put(dpu_be);
+
+ return ret;
+}
+
+static int imx_drm_dpu_get_param_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ enum drm_imx_dpu_param *param = data;
+ int ret;
+
+ switch (*param) {
+ case (DRM_IMX_MAX_DPUS):
+ ret = imx_dpu_num;
+ break;
+ default:
+ ret = -EINVAL;
+ DRM_ERROR("Unknown param![%d]\n", *param);
+ break;
+ }
+
+ return ret;
+}
+
+const struct drm_ioctl_desc imx_drm_dpu_ioctls[3] = {
+ DRM_IOCTL_DEF_DRV(IMX_DPU_SET_CMDLIST, imx_drm_dpu_set_cmdlist_ioctl,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(IMX_DPU_WAIT, imx_drm_dpu_wait_ioctl,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(IMX_DPU_GET_PARAM, imx_drm_dpu_get_param_ioctl,
+ DRM_RENDER_ALLOW),
+};
+
+static int dpu_bliteng_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct imx_drm_dpu_bliteng *bliteng;
+ struct dpu_bliteng *dpu_bliteng = NULL;
+ int ret;
+
+ bliteng = devm_kzalloc(dev, sizeof(*bliteng), GFP_KERNEL);
+ if (!bliteng)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&bliteng->list);
+
+ ret = dpu_bliteng_get_empty_instance(&dpu_bliteng, dev);
+ if (ret)
+ return ret;
+
+ dpu_bliteng_set_id(dpu_bliteng, imx_dpu_num);
+ dpu_bliteng_set_dev(dpu_bliteng, dev);
+
+ ret = dpu_bliteng_init(dpu_bliteng);
+ if (ret)
+ return ret;
+
+ mutex_lock(&imx_drm_dpu_bliteng_lock);
+ bliteng->dpu_be = dpu_bliteng;
+ list_add_tail(&bliteng->list, &imx_drm_dpu_bliteng_list);
+ mutex_unlock(&imx_drm_dpu_bliteng_lock);
+
+ dev_set_drvdata(dev, dpu_bliteng);
+
+ imx_dpu_num++;
+
+ return 0;
+}
+
+static void dpu_bliteng_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct imx_drm_dpu_bliteng *bliteng;
+ struct dpu_bliteng *dpu_bliteng = dev_get_drvdata(dev);
+ s32 id = dpu_bliteng_get_id(dpu_bliteng);
+
+ bliteng = imx_drm_dpu_bliteng_find_by_id(id);
+ list_del(&bliteng->list);
+
+ dpu_bliteng_fini(dpu_bliteng);
+ dev_set_drvdata(dev, NULL);
+
+ imx_dpu_num--;
+}
+
+static const struct component_ops dpu_bliteng_ops = {
+ .bind = dpu_bliteng_bind,
+ .unbind = dpu_bliteng_unbind,
+};
+
+static int dpu_bliteng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (!dev->platform_data)
+ return -EINVAL;
+
+ return component_add(dev, &dpu_bliteng_ops);
+}
+
+static int dpu_bliteng_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dpu_bliteng_ops);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dpu_bliteng_suspend(struct device *dev)
+{
+ struct dpu_bliteng *dpu_bliteng = dev_get_drvdata(dev);
+ int ret;
+
+ if (dpu_bliteng == NULL)
+ return 0;
+
+ ret = dpu_be_get(dpu_bliteng);
+
+ dpu_be_wait(dpu_bliteng);
+
+ dpu_be_put(dpu_bliteng);
+
+ dpu_bliteng_fini(dpu_bliteng);
+
+ return 0;
+}
+
+static int dpu_bliteng_resume(struct device *dev)
+{
+ struct dpu_bliteng *dpu_bliteng = dev_get_drvdata(dev);
+
+ if (dpu_bliteng != NULL)
+ dpu_bliteng_init(dpu_bliteng);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dpu_bliteng_pm_ops,
+ dpu_bliteng_suspend, dpu_bliteng_resume);
+
+struct platform_driver dpu_bliteng_driver = {
+ .driver = {
+ .name = "imx-drm-dpu-bliteng",
+ .pm = &dpu_bliteng_pm_ops,
+ },
+ .probe = dpu_bliteng_probe,
+ .remove = dpu_bliteng_remove,
+};
+
+module_platform_driver(dpu_bliteng_driver);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_DESCRIPTION("i.MX DRM DPU BLITENG");
diff --git a/drivers/gpu/drm/imx/dpu/dpu-blit.h b/drivers/gpu/drm/imx/dpu/dpu-blit.h
new file mode 100644
index 000000000000..cf429086cdf4
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-blit.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef _DPU_DRM_BLIT_H_
+#define _DPU_DRM_BLIT_H_
+
+#include <drm/drm_ioctl.h>
+
+#ifdef CONFIG_DRM_IMX_DPU
+extern const struct drm_ioctl_desc imx_drm_dpu_ioctls[3];
+#else
+const struct drm_ioctl_desc imx_drm_dpu_ioctls[] = {};
+#endif
+
+#endif /* _DPU_DRM_BLIT_H_ */
diff --git a/drivers/gpu/drm/imx/dpu/dpu-crc.c b/drivers/gpu/drm/imx/dpu/dpu-crc.c
new file mode 100644
index 000000000000..cba4e1705e1c
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-crc.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright 2019-2021 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_plane.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <video/dpu.h>
+#include "dpu-crc.h"
+#include "dpu-crtc.h"
+
+static inline void get_left(struct drm_rect *r, struct drm_display_mode *m)
+{
+ r->x1 = 0;
+ r->y1 = 0;
+ r->x2 = m->hdisplay >> 1;
+ r->y2 = m->vdisplay;
+}
+
+static inline void get_right(struct drm_rect *r, struct drm_display_mode *m)
+{
+ r->x1 = m->hdisplay >> 1;
+ r->y1 = 0;
+ r->x2 = m->hdisplay;
+ r->y2 = m->vdisplay;
+}
+
+static void
+dpu_enable_signature_roi(struct dpu_signature *sig, struct drm_rect *roi)
+{
+ signature_continuous_mode(sig, true);
+ signature_win(sig, 0, roi->x1, roi->y1, roi->x2, roi->y2);
+ signature_eval_win(sig, 0, true);
+ signature_shdldreq(sig, 0x1);
+}
+
+static void dpu_disable_signature(struct dpu_signature *sig)
+{
+ signature_continuous_mode(sig, false);
+ signature_wait_for_idle(sig);
+ signature_eval_win(sig, 0, false);
+}
+
+/*
+ * Supported modes and source names:
+ * 1) auto mode:
+ * "auto" should be selected as the source name.
+ * The evaluation window is the same to the display region as
+ * indicated by drm_crtc_state->adjusted_mode.
+ *
+ * 2) region of interest(ROI) mode:
+ * "roi:x1,y1,x2,y2" should be selected as the source name.
+ * The region of interest is defined by the inclusive upper left
+ * position at (x1, y1) and the exclusive lower right position
+ * at (x2, y2), see struct drm_rect for the same idea.
+ * The evaluation window is the region of interest.
+ */
+static int
+dpu_crc_parse_source(const char *source_name, enum dpu_crc_source *s,
+ struct drm_rect *roi)
+{
+ static const char roi_prefix[] = "roi:";
+
+ if (!source_name) {
+ *s = DPU_CRC_SRC_NONE;
+ } else if (!strcmp(source_name, "auto")) {
+ *s = DPU_CRC_SRC_FRAMEGEN;
+ } else if (strstarts(source_name, roi_prefix)) {
+ char *options, *opt;
+ int len = strlen(roi_prefix);
+ int params[4];
+ int i = 0, ret;
+
+ options = kstrdup(source_name + len, GFP_KERNEL);
+
+ while ((opt = strsep(&options, ",")) != NULL) {
+ if (i > 3)
+ return -EINVAL;
+
+ ret = kstrtouint(opt, 10, &params[i]);
+ if (ret < 0)
+ return ret;
+
+ if (params[i] < 0)
+ return -EINVAL;
+
+ i++;
+ }
+
+ if (i != 4)
+ return -EINVAL;
+
+ roi->x1 = params[0];
+ roi->y1 = params[1];
+ roi->x2 = params[2];
+ roi->y2 = params[3];
+
+ if (!drm_rect_visible(roi))
+ return -EINVAL;
+
+ *s = DPU_CRC_SRC_FRAMEGEN_ROI;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct imx_crtc_state *imx_crtc_state;
+ struct dpu_crtc_state *dcstate;
+ struct drm_rect roi;
+ enum dpu_crc_source source;
+ int ret;
+
+ if (dpu_crc_parse_source(source_name, &source, &roi) < 0) {
+ dev_dbg(dpu_crtc->dev, "unknown source %s\n", source_name);
+ return -EINVAL;
+ }
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
+ if (ret)
+ return ret;
+
+ imx_crtc_state = to_imx_crtc_state(crtc->state);
+ dcstate = to_dpu_crtc_state(imx_crtc_state);
+ *values_cnt = dcstate->use_pc ? 6 : 3;
+
+ drm_modeset_unlock(&crtc->mutex);
+
+ return ret;
+}
+
+int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_rect roi = {0, 0, 0, 0};
+ enum dpu_crc_source source;
+ int ret;
+
+ if (dpu_crc_parse_source(source_name, &source, &roi) < 0) {
+ dev_dbg(dpu_crtc->dev, "unknown source %s\n", source_name);
+ return -EINVAL;
+ }
+
+ /* Perform an atomic commit to set the CRC source. */
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ state->acquire_ctx = &ctx;
+
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (!IS_ERR(crtc_state)) {
+ struct imx_crtc_state *imx_crtc_state;
+ struct dpu_crtc_state *dcstate;
+
+ imx_crtc_state = to_imx_crtc_state(crtc_state);
+ dcstate = to_dpu_crtc_state(imx_crtc_state);
+
+ if ((dcstate->use_pc && crtc->crc.values_cnt != 6) ||
+ (!dcstate->use_pc && crtc->crc.values_cnt != 3)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ dcstate->crc.source = source;
+ dpu_copy_roi(&roi, &dcstate->crc.roi);
+ dpu_crtc->use_dual_crc = dcstate->use_pc;
+
+ ret = drm_atomic_commit(state);
+ } else {
+ ret = PTR_ERR(crtc_state);
+ }
+
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+put:
+ drm_atomic_state_put(state);
+
+unlock:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+irqreturn_t dpu_crc_valid_irq_threaded_handler(int irq, void *dev_id)
+{
+ struct dpu_crtc *dpu_crtc = dev_id;
+ struct dpu_signature *sig = dpu_crtc->sig;
+ struct dpu_crtc *aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc);
+ bool dual_crc = dpu_crtc->use_dual_crc;
+ unsigned long ret;
+ uint32_t crcs[6] = {0, 0, 0, 0, 0, 0};
+
+ dev_dbg(dpu_crtc->dev, "CRC valid irq threaded handler\n");
+
+ signature_crc_value(sig, 0, &dpu_crtc->crc_red,
+ &dpu_crtc->crc_green,
+ &dpu_crtc->crc_blue);
+
+ if (dual_crc && dpu_crtc->stream_id == 1) {
+ complete(&aux_dpu_crtc->aux_crc_done);
+ return IRQ_HANDLED;
+ }
+
+ if (!dual_crc ||
+ (dual_crc && dpu_crtc->dual_crc_flag != DPU_DUAL_CRC_FLAG_RIGHT)) {
+ crcs[2] = dpu_crtc->crc_red;
+ crcs[1] = dpu_crtc->crc_green;
+ crcs[0] = dpu_crtc->crc_blue;
+ }
+
+ if (dual_crc && dpu_crtc->stream_id == 0) {
+ ret = wait_for_completion_timeout(&dpu_crtc->aux_crc_done,
+ HZ / 20);
+ if (ret == 0)
+ dev_warn(dpu_crtc->dev,
+ "wait for auxiliary CRC done timeout\n");
+
+ if (dpu_crtc->dual_crc_flag != DPU_DUAL_CRC_FLAG_LEFT) {
+ crcs[5] = aux_dpu_crtc->crc_red;
+ crcs[4] = aux_dpu_crtc->crc_green;
+ crcs[3] = aux_dpu_crtc->crc_blue;
+ }
+ }
+
+ drm_crtc_add_crc_entry(&dpu_crtc->base, false, 0, crcs);
+
+ return IRQ_HANDLED;
+}
+
+void dpu_crtc_enable_crc_source(struct drm_crtc *crtc,
+ enum dpu_crc_source source,
+ struct drm_rect *roi)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc *aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc);
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
+ struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct completion *shdld_done;
+ struct drm_rect left, right;
+ struct drm_rect r, aux_r, clip;
+ bool dual_crc = dpu_crtc->use_dual_crc;
+ bool use_left, use_right;
+ int half_hdisplay;
+ unsigned long ret;
+
+ if (source == DPU_CRC_SRC_NONE)
+ return;
+
+ if (dual_crc != dcstate->use_pc)
+ return;
+
+ if (dpu_crtc->crc_is_enabled)
+ return;
+
+ if (dual_crc) {
+ half_hdisplay = mode->hdisplay >> 1;
+
+ get_left(&left, mode);
+ get_right(&right, mode);
+
+ dpu_copy_roi(&left, &clip);
+ if (drm_rect_intersect(&clip, roi)) {
+ dpu_copy_roi(&clip, &r);
+ use_left = true;
+ } else {
+ dpu_copy_roi(&left, &r);
+ use_left = false;
+ }
+
+ if (drm_rect_intersect(&right, roi)) {
+ right.x1 -= half_hdisplay;
+ right.x2 -= half_hdisplay;
+ dpu_copy_roi(&right, &aux_r);
+ use_right = true;
+ } else {
+ dpu_copy_roi(&left, &aux_r);
+ use_right = false;
+ }
+
+ if (use_left && !use_right) {
+ dpu_crtc->dual_crc_flag = DPU_DUAL_CRC_FLAG_LEFT;
+ } else if (!use_left && use_right) {
+ dpu_crtc->dual_crc_flag = DPU_DUAL_CRC_FLAG_RIGHT;
+ } else if (use_left && use_right) {
+ dpu_crtc->dual_crc_flag = DPU_DUAL_CRC_FLAG_DUAL;
+ } else {
+ dpu_crtc->dual_crc_flag = DPU_DUAL_CRC_FLAG_ERR_NONE;
+ dev_err(dpu_crtc->dev, "error flag for dual CRC\n");
+ return;
+ }
+ } else {
+ dpu_copy_roi(roi, &r);
+ }
+
+ enable_irq(dpu_crtc->crc_valid_irq);
+ enable_irq(dpu_crtc->crc_shdld_irq);
+ disengcfg_sig_select(dpu_crtc->dec, DEC_SIG_SEL_FRAMEGEN);
+ dpu_enable_signature_roi(dpu_crtc->sig, &r);
+
+ if (dual_crc) {
+ aux_dpu_crtc->use_dual_crc = dual_crc;
+ enable_irq(aux_dpu_crtc->crc_valid_irq);
+ enable_irq(aux_dpu_crtc->crc_shdld_irq);
+ disengcfg_sig_select(dpu_crtc->aux_dec, DEC_SIG_SEL_FRAMEGEN);
+ dpu_enable_signature_roi(dpu_crtc->aux_sig, &aux_r);
+ }
+
+ shdld_done = &dpu_crtc->crc_shdld_done;
+ ret = wait_for_completion_timeout(shdld_done, HZ);
+ if (ret == 0)
+ dev_warn(dpu_crtc->dev, "wait for CRC shdld done timeout\n");
+
+ if (dual_crc) {
+ shdld_done = &aux_dpu_crtc->crc_shdld_done;
+ ret = wait_for_completion_timeout(shdld_done, HZ);
+ if (ret == 0)
+ dev_warn(dpu_crtc->dev,
+ "wait for auxiliary CRC shdld done timeout\n");
+ }
+
+ disable_irq(dpu_crtc->crc_shdld_irq);
+ if (dual_crc)
+ disable_irq(aux_dpu_crtc->crc_shdld_irq);
+
+ dpu_crtc->crc_is_enabled = true;
+
+ dev_dbg(dpu_crtc->dev, "enable CRC source %d, ROI:" DRM_RECT_FMT "\n",
+ source, DRM_RECT_ARG(roi));
+}
+
+void dpu_crtc_disable_crc_source(struct drm_crtc *crtc, bool dual_crc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc *aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc);
+
+ if (!dpu_crtc->crc_is_enabled)
+ return;
+
+ dpu_disable_signature(dpu_crtc->sig);
+ if (dual_crc)
+ dpu_disable_signature(dpu_crtc->aux_sig);
+
+ disable_irq(dpu_crtc->crc_valid_irq);
+ if (dual_crc) {
+ disable_irq(aux_dpu_crtc->crc_valid_irq);
+ reinit_completion(&dpu_crtc->aux_crc_done);
+ }
+
+ dpu_crtc->crc_is_enabled = false;
+
+ dev_dbg(dpu_crtc->dev, "disable CRC source\n");
+}
diff --git a/drivers/gpu/drm/imx/dpu/dpu-crc.h b/drivers/gpu/drm/imx/dpu/dpu-crc.h
new file mode 100644
index 000000000000..25c03937470e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-crc.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019,2020 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _DPU_CRC_H_
+#define _DPU_CRC_H_
+
+#include "dpu-crtc.h"
+
+enum {
+ DPU_DUAL_CRC_FLAG_DUAL,
+ DPU_DUAL_CRC_FLAG_LEFT,
+ DPU_DUAL_CRC_FLAG_RIGHT,
+ DPU_DUAL_CRC_FLAG_ERR_NONE,
+};
+
+static inline bool to_enable_dpu_crc(struct dpu_crtc_state *new_dcstate,
+ struct dpu_crtc_state *old_dcstate)
+{
+ return old_dcstate->crc.source == DPU_CRC_SRC_NONE &&
+ new_dcstate->crc.source != DPU_CRC_SRC_NONE;
+}
+
+static inline bool to_disable_dpu_crc(struct dpu_crtc_state *new_dcstate,
+ struct dpu_crtc_state *old_dcstate)
+{
+ return old_dcstate->crc.source != DPU_CRC_SRC_NONE &&
+ new_dcstate->crc.source == DPU_CRC_SRC_NONE;
+}
+
+static inline void dpu_copy_roi(struct drm_rect *from, struct drm_rect *to)
+{
+ to->x1 = from->x1;
+ to->y1 = from->y1;
+ to->x2 = from->x2;
+ to->y2 = from->y2;
+}
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt);
+int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
+irqreturn_t dpu_crc_valid_irq_threaded_handler(int irq, void *dev_id);
+void dpu_crtc_enable_crc_source(struct drm_crtc *crtc,
+ enum dpu_crc_source source,
+ struct drm_rect *roi);
+void dpu_crtc_disable_crc_source(struct drm_crtc *crtc, bool dual_crc);
+#else
+#define dpu_crtc_verify_crc_source NULL
+#define dpu_crtc_set_crc_source NULL
+irqreturn_t dpu_crc_valid_irq_threaded_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+void dpu_crtc_enable_crc_source(struct drm_crtc *crtc,
+ enum dpu_crc_source source,
+ struct drm_rect *roi)
+{
+}
+void dpu_crtc_disable_crc_source(struct drm_crtc *crtc, bool dual_crc)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/imx/dpu/dpu-crtc.c b/drivers/gpu/drm/imx/dpu/dpu-crtc.c
new file mode 100644
index 000000000000..94a60bce889d
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-crtc.c
@@ -0,0 +1,1451 @@
+/*
+ * Copyright 2017-2022 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <drm/drm_vblank.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <video/dpu.h>
+#include <video/imx8-pc.h>
+#include <video/imx8-prefetch.h>
+#include "dpu-crc.h"
+#include "dpu-crtc.h"
+#include "dpu-kms.h"
+#include "dpu-plane.h"
+#include "../imx-drm.h"
+
+static inline struct dpu_plane_state **
+alloc_dpu_plane_states(struct dpu_crtc *dpu_crtc)
+{
+ struct dpu_plane_state **states;
+
+ states = kcalloc(dpu_crtc->hw_plane_num, sizeof(*states), GFP_KERNEL);
+ if (!states)
+ return ERR_PTR(-ENOMEM);
+
+ return states;
+}
+
+static void dpu_crtc_queue_state_event(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ WARN_ON(dpu_crtc->event);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+struct dpu_plane_state **
+crtc_state_get_dpu_plane_states(struct drm_crtc_state *state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(state);
+ struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
+
+ return dcstate->dpu_plane_states;
+}
+
+struct dpu_crtc *dpu_crtc_get_aux_dpu_crtc(struct dpu_crtc *dpu_crtc)
+{
+ struct drm_crtc *crtc = &dpu_crtc->base, *tmp_crtc;
+ struct drm_device *dev = crtc->dev;
+ struct dpu_crtc *aux_dpu_crtc = NULL;
+
+ drm_for_each_crtc(tmp_crtc, dev) {
+ if (tmp_crtc == crtc)
+ continue;
+
+ aux_dpu_crtc = to_dpu_crtc(tmp_crtc);
+
+ if (dpu_crtc->crtc_grp_id == aux_dpu_crtc->crtc_grp_id)
+ break;
+ }
+
+ BUG_ON(!aux_dpu_crtc);
+
+ return aux_dpu_crtc;
+}
+
+static void dpu_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc *aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc);
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
+ struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
+ struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
+ struct dpu_plane_res *res = &dplane->grp->res;
+ struct dpu_extdst *plane_ed = res->ed[dplane->stream_id];
+ struct dpu_extdst *aux_plane_ed = dpu_aux_ed_peek(plane_ed);
+ struct dpu_extdst *m_plane_ed = NULL, *s_plane_ed;
+ struct completion *shdld_done;
+ struct completion *m_safety_shdld_done, *s_safety_shdld_done;
+ struct completion *m_content_shdld_done, *s_content_shdld_done;
+ struct completion *m_dec_shdld_done, *s_dec_shdld_done;
+ unsigned long ret, flags;
+
+ drm_crtc_vblank_on(crtc);
+
+ if (dcstate->use_pc) {
+ tcon_enable_pc(dpu_crtc->tcon);
+
+ if (extdst_is_master(plane_ed)) {
+ m_plane_ed = plane_ed;
+ s_plane_ed = aux_plane_ed;
+ } else {
+ m_plane_ed = aux_plane_ed;
+ s_plane_ed = plane_ed;
+ }
+ extdst_pixengcfg_syncmode_master(m_plane_ed, true);
+ extdst_pixengcfg_syncmode_master(s_plane_ed, false);
+ } else {
+ extdst_pixengcfg_syncmode_master(plane_ed, false);
+ }
+
+ enable_irq(dpu_crtc->safety_shdld_irq);
+ enable_irq(dpu_crtc->content_shdld_irq);
+ enable_irq(dpu_crtc->dec_shdld_irq);
+ if (dcstate->use_pc) {
+ enable_irq(aux_dpu_crtc->safety_shdld_irq);
+ enable_irq(aux_dpu_crtc->content_shdld_irq);
+ enable_irq(aux_dpu_crtc->dec_shdld_irq);
+ }
+
+ if (dcstate->use_pc) {
+ framegen_enable_clock(dpu_crtc->stream_id ?
+ dpu_crtc->aux_fg : dpu_crtc->fg);
+ extdst_pixengcfg_sync_trigger(m_plane_ed);
+ framegen_shdtokgen(dpu_crtc->m_fg);
+
+ /* don't relinquish CPU until TCONs are set to operation mode */
+ local_irq_save(flags);
+ preempt_disable();
+ /* First turn on the slave stream, second the master stream. */
+ framegen_enable(dpu_crtc->s_fg);
+ framegen_enable(dpu_crtc->m_fg);
+ /*
+ * TKT320590:
+ * Turn TCONs into operation mode as soon as the first dumb
+ * frame is generated by DPU from the master stream(we don't
+ * relinquish CPU to ensure this). This makes DPRs/PRGs of
+ * the dual stream be able to evade the dumb frames of the
+ * dual stream respectively.
+ */
+ framegen_wait_for_frame_counter_moving(dpu_crtc->m_fg);
+ /* again, slave first, then master */
+ tcon_set_operation_mode(dpu_crtc->s_tcon);
+ tcon_set_operation_mode(dpu_crtc->m_tcon);
+ local_irq_restore(flags);
+ preempt_enable();
+
+ framegen_enable_pixel_link(dpu_crtc->s_fg);
+ framegen_enable_pixel_link(dpu_crtc->m_fg);
+
+ if (dpu_crtc->aux_is_master) {
+ m_safety_shdld_done = &aux_dpu_crtc->safety_shdld_done;
+ m_content_shdld_done = &aux_dpu_crtc->content_shdld_done;
+ m_dec_shdld_done = &aux_dpu_crtc->dec_shdld_done;
+ s_safety_shdld_done = &dpu_crtc->safety_shdld_done;
+ s_content_shdld_done = &dpu_crtc->content_shdld_done;
+ s_dec_shdld_done = &dpu_crtc->dec_shdld_done;
+ } else {
+ m_safety_shdld_done = &dpu_crtc->safety_shdld_done;
+ m_content_shdld_done = &dpu_crtc->content_shdld_done;
+ m_dec_shdld_done = &dpu_crtc->dec_shdld_done;
+ s_safety_shdld_done = &aux_dpu_crtc->safety_shdld_done;
+ s_content_shdld_done = &aux_dpu_crtc->content_shdld_done;
+ s_dec_shdld_done = &aux_dpu_crtc->dec_shdld_done;
+ }
+
+ ret = wait_for_completion_timeout(m_safety_shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for master safety shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ ret = wait_for_completion_timeout(m_content_shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for master content shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ ret = wait_for_completion_timeout(m_dec_shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for master DEC shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+
+ ret = wait_for_completion_timeout(s_safety_shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for slave safety shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ ret = wait_for_completion_timeout(s_content_shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for slave content shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ ret = wait_for_completion_timeout(s_dec_shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for slave DEC shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ } else {
+ framegen_enable_clock(dpu_crtc->fg);
+ extdst_pixengcfg_sync_trigger(plane_ed);
+ extdst_pixengcfg_sync_trigger(dpu_crtc->ed);
+ framegen_shdtokgen(dpu_crtc->fg);
+
+ /* don't relinquish CPU until TCON is set to operation mode */
+ local_irq_save(flags);
+ preempt_disable();
+ framegen_enable(dpu_crtc->fg);
+ /*
+ * TKT320590:
+ * Turn TCON into operation mode as soon as the first dumb
+ * frame is generated by DPU(we don't relinquish CPU to ensure
+ * this). This makes DPR/PRG be able to evade the frame.
+ */
+ framegen_wait_for_frame_counter_moving(dpu_crtc->fg);
+ tcon_set_operation_mode(dpu_crtc->tcon);
+ local_irq_restore(flags);
+ preempt_enable();
+
+ framegen_enable_pixel_link(dpu_crtc->fg);
+
+ shdld_done = &dpu_crtc->safety_shdld_done;
+ ret = wait_for_completion_timeout(shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for safety shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ shdld_done = &dpu_crtc->content_shdld_done;
+ ret = wait_for_completion_timeout(shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for content shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ shdld_done = &dpu_crtc->dec_shdld_done;
+ ret = wait_for_completion_timeout(shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for DEC shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ }
+
+ disable_irq(dpu_crtc->safety_shdld_irq);
+ disable_irq(dpu_crtc->content_shdld_irq);
+ disable_irq(dpu_crtc->dec_shdld_irq);
+ if (dcstate->use_pc) {
+ disable_irq(aux_dpu_crtc->safety_shdld_irq);
+ disable_irq(aux_dpu_crtc->content_shdld_irq);
+ disable_irq(aux_dpu_crtc->dec_shdld_irq);
+ }
+
+ dpu_crtc_queue_state_event(crtc);
+
+ if (dcstate->use_pc) {
+ framegen_wait_for_secondary_syncup(dpu_crtc->m_fg);
+ framegen_wait_for_secondary_syncup(dpu_crtc->s_fg);
+
+ if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->m_fg)) {
+ framegen_secondary_clear_channel_status(dpu_crtc->m_fg);
+ DRM_WARN("[CRTC:%d:%s] %s: master FrameGen requests to read empty FIFO\n",
+ crtc->base.id, crtc->name, __func__);
+ }
+ if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->s_fg)) {
+ framegen_secondary_clear_channel_status(dpu_crtc->s_fg);
+ DRM_WARN("[CRTC:%d:%s] %s: slave FrameGen requests to read empty FIFO\n",
+ crtc->base.id, crtc->name, __func__);
+ }
+ } else {
+ framegen_wait_for_secondary_syncup(dpu_crtc->fg);
+
+ if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->fg)) {
+ framegen_secondary_clear_channel_status(dpu_crtc->fg);
+ DRM_WARN("[CRTC:%d:%s] %s: FrameGen requests to read empty FIFO\n",
+ crtc->base.id, crtc->name, __func__);
+ }
+ }
+
+ if (dcstate->crc.source != DPU_CRC_SRC_NONE)
+ dpu_crtc_enable_crc_source(crtc,
+ dcstate->crc.source, &dcstate->crc.roi);
+}
+
+static void dpu_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
+ struct imx_crtc_state *imx_crtc_state =
+ to_imx_crtc_state(old_crtc_state);
+ struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
+ struct drm_display_mode *adjusted_mode = &old_crtc_state->adjusted_mode;
+ struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
+ struct dpu_plane_res *res = &dplane->grp->res;
+ struct dpu_plane_state *dpstate;
+ struct dpu_fetchunit *fu;
+ unsigned long flags;
+ int i;
+
+ if (dcstate->crc.source != DPU_CRC_SRC_NONE)
+ dpu_crtc_disable_crc_source(crtc, dcstate->use_pc);
+
+ if (dcstate->use_pc) {
+ tcon_disable_pc(dpu_crtc->tcon);
+
+ framegen_disable_pixel_link(dpu_crtc->m_fg);
+ framegen_disable_pixel_link(dpu_crtc->s_fg);
+
+ /* don't relinquish CPU until DPRC repeat_en is disabled */
+ local_irq_save(flags);
+ preempt_disable();
+ /*
+ * Sync to FrameGen frame counter moving so that
+ * FrameGen can be disabled in the next frame.
+ */
+ framegen_wait_for_frame_counter_moving(dpu_crtc->m_fg);
+
+ /* First turn off the master stream, second the slave stream. */
+ framegen_disable(dpu_crtc->m_fg);
+ framegen_disable(dpu_crtc->s_fg);
+
+ /*
+ * There is one frame leftover after FrameGen disablement.
+ * Sync to FrameGen frame counter moving so that
+ * DPRC repeat_en can be disabled in the next frame.
+ */
+ framegen_wait_for_frame_counter_moving(dpu_crtc->m_fg);
+
+ for (i = 0; i < dpu_crtc->hw_plane_num; i++) {
+ lb_sec_sel_t source;
+ bool aux_source_flag;
+ bool use_prefetch;
+
+ dpstate = dcstate->dpu_plane_states[i];
+ if (!dpstate)
+ continue;
+
+ aux_source_flag = false;
+again:
+ source = aux_source_flag ? dpstate->aux_source :
+ dpstate->source;
+ use_prefetch = aux_source_flag ?
+ dpstate->use_aux_prefetch :
+ dpstate->use_prefetch;
+ fu = source_to_fu(res, source);
+ if (!fu) {
+ local_irq_restore(flags);
+ preempt_enable();
+ return;
+ }
+
+ if (fu->dprc && use_prefetch)
+ dprc_disable_repeat_en(fu->dprc);
+
+ if (dpstate->need_aux_source && !aux_source_flag) {
+ aux_source_flag = true;
+ goto again;
+ }
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+
+ framegen_wait_done(dpu_crtc->m_fg, adjusted_mode);
+ framegen_wait_done(dpu_crtc->s_fg, adjusted_mode);
+
+ framegen_disable_clock(dpu_crtc->stream_id ?
+ dpu_crtc->aux_fg : dpu_crtc->fg);
+ } else {
+ framegen_disable_pixel_link(dpu_crtc->fg);
+
+ /* don't relinquish CPU until DPRC repeat_en is disabled */
+ local_irq_save(flags);
+ preempt_disable();
+ /*
+ * Sync to FrameGen frame counter moving so that
+ * FrameGen can be disabled in the next frame.
+ */
+ framegen_wait_for_frame_counter_moving(dpu_crtc->fg);
+ framegen_disable(dpu_crtc->fg);
+ /*
+ * There is one frame leftover after FrameGen disablement.
+ * Sync to FrameGen frame counter moving so that
+ * DPRC repeat_en can be disabled in the next frame.
+ */
+ framegen_wait_for_frame_counter_moving(dpu_crtc->fg);
+
+ for (i = 0; i < dpu_crtc->hw_plane_num; i++) {
+ dpstate = dcstate->dpu_plane_states[i];
+ if (!dpstate)
+ continue;
+
+ fu = source_to_fu(res, dpstate->source);
+ if (!fu) {
+ local_irq_restore(flags);
+ preempt_enable();
+ return;
+ }
+
+ if (fu->dprc && dpstate->use_prefetch)
+ dprc_disable_repeat_en(fu->dprc);
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+
+ framegen_wait_done(dpu_crtc->fg, adjusted_mode);
+ framegen_disable_clock(dpu_crtc->fg);
+ }
+
+ drm_crtc_vblank_off(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event && !crtc->state->active) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void dpu_drm_crtc_reset(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct imx_crtc_state *imx_crtc_state;
+ struct dpu_crtc_state *state;
+
+ if (crtc->state) {
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ imx_crtc_state = to_imx_crtc_state(crtc->state);
+ state = to_dpu_crtc_state(imx_crtc_state);
+ kfree(state->dpu_plane_states);
+ kfree(state);
+ crtc->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->crc.source = DPU_CRC_SRC_NONE;
+ state->crc.roi.x1 = 0;
+ state->crc.roi.y1 = 0;
+ state->crc.roi.x2 = 0;
+ state->crc.roi.y2 = 0;
+
+ crtc->state = &state->imx_crtc_state.base;
+ crtc->state->crtc = crtc;
+
+ state->dpu_plane_states = alloc_dpu_plane_states(dpu_crtc);
+ if (IS_ERR(state->dpu_plane_states))
+ kfree(state);
+ }
+}
+
+static struct drm_crtc_state *
+dpu_drm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct imx_crtc_state *imx_crtc_state;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *state, *copy;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ copy->dpu_plane_states = alloc_dpu_plane_states(dpu_crtc);
+ if (IS_ERR(copy->dpu_plane_states)) {
+ kfree(copy);
+ return NULL;
+ }
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc,
+ &copy->imx_crtc_state.base);
+ imx_crtc_state = to_imx_crtc_state(crtc->state);
+ state = to_dpu_crtc_state(imx_crtc_state);
+ copy->use_pc = state->use_pc;
+ copy->crc.source = state->crc.source;
+ dpu_copy_roi(&state->crc.roi, &copy->crc.roi);
+
+ return &copy->imx_crtc_state.base;
+}
+
+static void dpu_drm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(state);
+ struct dpu_crtc_state *dcstate;
+
+ if (state) {
+ __drm_atomic_helper_crtc_destroy_state(state);
+ dcstate = to_dpu_crtc_state(imx_crtc_state);
+ kfree(dcstate->dpu_plane_states);
+ kfree(dcstate);
+ }
+}
+
+static int dpu_enable_vblank(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ enable_irq(dpu_crtc->vbl_irq);
+
+ return 0;
+}
+
+static void dpu_disable_vblank(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ disable_irq_nosync(dpu_crtc->vbl_irq);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = dpu_drm_crtc_reset,
+ .atomic_duplicate_state = dpu_drm_crtc_duplicate_state,
+ .atomic_destroy_state = dpu_drm_crtc_destroy_state,
+ .enable_vblank = dpu_enable_vblank,
+ .disable_vblank = dpu_disable_vblank,
+ .set_crc_source = dpu_crtc_set_crc_source,
+ .verify_crc_source = dpu_crtc_verify_crc_source,
+};
+
+static irqreturn_t dpu_vbl_irq_handler(int irq, void *dev_id)
+{
+ struct dpu_crtc *dpu_crtc = dev_id;
+ struct drm_crtc *crtc = &dpu_crtc->base;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(crtc);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (dpu_crtc->event) {
+ drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
+ dpu_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dpu_safety_shdld_irq_handler(int irq, void *dev_id)
+{
+ struct dpu_crtc *dpu_crtc = dev_id;
+
+ complete(&dpu_crtc->safety_shdld_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dpu_content_shdld_irq_handler(int irq, void *dev_id)
+{
+ struct dpu_crtc *dpu_crtc = dev_id;
+
+ complete(&dpu_crtc->content_shdld_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dpu_dec_shdld_irq_handler(int irq, void *dev_id)
+{
+ struct dpu_crtc *dpu_crtc = dev_id;
+
+ complete(&dpu_crtc->dec_shdld_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dpu_crc_shdld_irq_handler(int irq, void *dev_id)
+{
+ struct dpu_crtc *dpu_crtc = dev_id;
+
+ complete(&dpu_crtc->crc_shdld_done);
+
+ return IRQ_HANDLED;
+}
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct dpu_plane_state *dpstate;
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
+ struct imx_crtc_state *old_imx_crtc_state =
+ to_imx_crtc_state(crtc->state);
+ struct dpu_crtc_state *old_dcstate =
+ to_dpu_crtc_state(old_imx_crtc_state);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct videomode vm;
+ unsigned long encoder_type = DRM_MODE_ENCODER_NONE;
+ u32 encoder_mask;
+ int i = 0;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder_mask = 1 << drm_encoder_index(encoder);
+
+ if (!(crtc_state->encoder_mask & encoder_mask))
+ continue;
+
+ encoder_type = encoder->encoder_type;
+ }
+
+ if (crtc_state->enable && dcstate->use_pc) {
+ if (encoder_type != DRM_MODE_ENCODER_TMDS) {
+ DRM_DEBUG_KMS("[CRTC:%d:%s] enc type %lu doesn't support pc\n",
+ crtc->base.id, crtc->name, encoder_type);
+ return -EINVAL;
+ }
+
+ drm_display_mode_to_videomode(mode, &vm);
+ if ((vm.hactive % 2) || (vm.hfront_porch % 2) ||
+ (vm.hsync_len % 2) || (vm.hback_porch % 2)) {
+ DRM_DEBUG_KMS("[CRTC:%d:%s] video mode is invalid\n",
+ crtc->base.id, crtc->name);
+ return -EINVAL;
+ }
+ }
+
+ /* disallow to enable CRC when CRTC keeps at inactive status */
+ if (!crtc->state->active && !crtc_state->enable &&
+ to_enable_dpu_crc(dcstate, old_dcstate))
+ return -EINVAL;
+
+ if (crtc_state->enable && dcstate->crc.source == DPU_CRC_SRC_FRAMEGEN) {
+ dcstate->crc.roi.x1 = 0;
+ dcstate->crc.roi.y1 = 0;
+ dcstate->crc.roi.x2 = mode->hdisplay;
+ dcstate->crc.roi.y2 = mode->vdisplay;
+ }
+
+ if (crtc_state->enable && dcstate->crc.source != DPU_CRC_SRC_NONE) {
+ if (dcstate->crc.roi.x1 < 0 || dcstate->crc.roi.y1 < 0)
+ return -EINVAL;
+
+ if (dcstate->crc.roi.x2 > mode->hdisplay ||
+ dcstate->crc.roi.y2 > mode->vdisplay)
+ return -EINVAL;
+
+ if (!drm_rect_visible(&dcstate->crc.roi))
+ return -EINVAL;
+ }
+
+ /*
+ * cache the plane states so that the planes can be disabled in
+ * ->atomic_begin.
+ */
+ drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
+ plane_state =
+ drm_atomic_get_plane_state(crtc_state->state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ dpstate = to_dpu_plane_state(plane_state);
+ dcstate->dpu_plane_states[i++] = dpstate;
+ }
+
+ return 0;
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
+ struct imx_crtc_state *old_imx_crtc_state =
+ to_imx_crtc_state(old_crtc_state);
+ struct dpu_crtc_state *old_dcstate =
+ to_dpu_crtc_state(old_imx_crtc_state);
+ int i;
+
+ /*
+ * Disable all planes' resources in SHADOW only.
+ * Whether any of them would be disabled or kept running depends
+ * on new plane states' commit.
+ */
+ for (i = 0; i < dpu_crtc->hw_plane_num; i++) {
+ struct dpu_plane_state *old_dpstate;
+ struct drm_plane_state *plane_state;
+ struct dpu_plane *dplane;
+ struct dpu_plane_res *res;
+ struct dpu_fetchunit *fu;
+ struct dpu_fetchunit *fe = NULL;
+ struct dpu_hscaler *hs = NULL;
+ struct dpu_vscaler *vs = NULL;
+ struct dpu_layerblend *lb;
+ struct dpu_extdst *ed;
+ extdst_src_sel_t ed_src;
+ dpu_block_id_t blend;
+ lb_sec_sel_t source;
+ unsigned int stream_id;
+ int lb_id;
+ bool release_aux_source;
+
+ old_dpstate = old_dcstate->dpu_plane_states[i];
+ if (!old_dpstate)
+ continue;
+
+ plane_state = &old_dpstate->base;
+ dplane = to_dpu_plane(plane_state->plane);
+ res = &dplane->grp->res;
+
+ release_aux_source = false;
+again:
+ if (old_dcstate->use_pc) {
+ if (release_aux_source) {
+ source = old_dpstate->aux_source;
+ blend = old_dpstate->aux_blend;
+ stream_id = 1;
+ } else {
+ source = old_dpstate->source;
+ blend = old_dpstate->blend;
+ stream_id = old_dpstate->left_src_w ? 0 : 1;
+ }
+ } else {
+ source = old_dpstate->source;
+ blend = old_dpstate->blend;
+ stream_id = dplane->stream_id;
+ }
+
+ fu = source_to_fu(res, source);
+ if (!fu)
+ return;
+
+ lb_id = blend_to_id(blend);
+ if (lb_id < 0)
+ return;
+
+ lb = res->lb[lb_id];
+
+ layerblend_pixengcfg_clken(lb, CLKEN__DISABLE);
+ if (fetchunit_is_fetchdecode(fu)) {
+ fe = fetchdecode_get_fetcheco(fu);
+ hs = fetchdecode_get_hscaler(fu);
+ vs = fetchdecode_get_vscaler(fu);
+ hscaler_pixengcfg_clken(hs, CLKEN__DISABLE);
+ vscaler_pixengcfg_clken(vs, CLKEN__DISABLE);
+ hscaler_mode(hs, SCALER_NEUTRAL);
+ vscaler_mode(vs, SCALER_NEUTRAL);
+ }
+ if ((!old_dcstate->use_pc && old_dpstate->is_top) ||
+ (old_dcstate->use_pc &&
+ ((!stream_id && old_dpstate->is_left_top) ||
+ (stream_id && old_dpstate->is_right_top)))) {
+ ed = res->ed[stream_id];
+ ed_src = stream_id ?
+ ED_SRC_CONSTFRAME1 : ED_SRC_CONSTFRAME0;
+ extdst_pixengcfg_src_sel(ed, ed_src);
+ }
+
+ fu->ops->disable_src_buf(fu);
+ if (fetchunit_is_fetchdecode(fu)) {
+ fetchdecode_pixengcfg_dynamic_src_sel(fu,
+ FD_SRC_DISABLE);
+ fe->ops->disable_src_buf(fe);
+ }
+
+ if (old_dpstate->need_aux_source && !release_aux_source) {
+ release_aux_source = true;
+ goto again;
+ }
+ }
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc), *aux_dpu_crtc = NULL;
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
+ struct imx_crtc_state *old_imx_crtc_state =
+ to_imx_crtc_state(old_crtc_state);
+ struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
+ struct dpu_crtc_state *old_dcstate =
+ to_dpu_crtc_state(old_imx_crtc_state);
+ struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
+ struct dpu_plane_state *old_dpstate;
+ struct dpu_plane_res *res = &dplane->grp->res;
+ struct dpu_extdst *ed = res->ed[dplane->stream_id], *aux_ed;
+ struct dpu_fetchunit *fu;
+ lb_sec_sel_t source;
+ struct completion *shdld_done;
+ struct completion *m_content_shdld_done = NULL;
+ struct completion *s_content_shdld_done = NULL;
+ unsigned long ret;
+ int i;
+ bool need_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
+ bool need_wait4fgfcm = false, need_aux_wait4fgfcm = false;
+ bool use_prefetch;
+
+ if (!crtc->state->active && !old_crtc_state->active)
+ return;
+
+ if (!need_modeset && to_disable_dpu_crc(dcstate, old_dcstate))
+ dpu_crtc_disable_crc_source(crtc, old_dcstate->use_pc);
+
+ /*
+ * Scan over old plane fetchunits to determine if we
+ * need to wait for FrameGen frame counter moving in
+ * the next loop prior to DPRC repeat_en disablement
+ * or not.
+ */
+ for (i = 0; i < dpu_crtc->hw_plane_num; i++) {
+ bool aux_source_flag;
+
+ old_dpstate = old_dcstate->dpu_plane_states[i];
+ if (!old_dpstate)
+ continue;
+
+ aux_source_flag = false;
+again1:
+ source = aux_source_flag ?
+ old_dpstate->aux_source : old_dpstate->source;
+ use_prefetch = aux_source_flag ?
+ old_dpstate->use_aux_prefetch :
+ old_dpstate->use_prefetch;
+ fu = source_to_fu(res, source);
+ if (!fu)
+ return;
+
+ if (!fu->ops->is_enabled(fu) && use_prefetch && !need_modeset) {
+ if (aux_source_flag)
+ need_aux_wait4fgfcm = true;
+ else
+ need_wait4fgfcm = true;
+ }
+
+ if (old_dpstate->need_aux_source && !aux_source_flag) {
+ aux_source_flag = true;
+ goto again1;
+ }
+ }
+
+ /*
+ * Sync with FrameGen frame counter moving so that
+ * we may disable DPRC repeat_en correctly.
+ * FIXME: to disable preemption and irq to make sure
+ * DPRC repeat_en will be disabled ASAP.
+ */
+ if (need_wait4fgfcm || need_aux_wait4fgfcm)
+ framegen_wait_for_frame_counter_moving(dcstate->use_pc ?
+ dpu_crtc->m_fg :
+ dpu_crtc->fg);
+
+ for (i = 0; i < dpu_crtc->hw_plane_num; i++) {
+ struct dpu_fetchunit *fe;
+ struct dpu_hscaler *hs;
+ struct dpu_vscaler *vs;
+ bool aux_source_disable;
+
+ old_dpstate = old_dcstate->dpu_plane_states[i];
+ if (!old_dpstate)
+ continue;
+
+ aux_source_disable = false;
+again2:
+ source = aux_source_disable ?
+ old_dpstate->aux_source : old_dpstate->source;
+ use_prefetch = aux_source_disable ?
+ old_dpstate->use_aux_prefetch :
+ old_dpstate->use_prefetch;
+ fu = source_to_fu(res, source);
+ if (!fu)
+ return;
+
+ if (!fu->ops->is_enabled(fu)) {
+ fu->ops->set_stream_id(fu, DPU_PLANE_SRC_DISABLED);
+ if (fu->dprc && use_prefetch)
+ dprc_disable_repeat_en(fu->dprc);
+ }
+
+ if (!fetchunit_is_fetchdecode(fu))
+ continue;
+
+ fe = fetchdecode_get_fetcheco(fu);
+ if (!fe->ops->is_enabled(fe))
+ fe->ops->set_stream_id(fe, DPU_PLANE_SRC_DISABLED);
+
+ hs = fetchdecode_get_hscaler(fu);
+ if (!hscaler_is_enabled(hs))
+ hscaler_set_stream_id(hs, DPU_PLANE_SRC_DISABLED);
+
+ vs = fetchdecode_get_vscaler(fu);
+ if (!vscaler_is_enabled(vs))
+ vscaler_set_stream_id(vs, DPU_PLANE_SRC_DISABLED);
+
+ if (old_dpstate->need_aux_source && !aux_source_disable) {
+ aux_source_disable = true;
+ goto again2;
+ }
+ }
+
+ if (dcstate->use_pc) {
+ aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc);
+
+ if (dpu_crtc->aux_is_master) {
+ m_content_shdld_done = &aux_dpu_crtc->content_shdld_done;
+ s_content_shdld_done = &dpu_crtc->content_shdld_done;
+ } else {
+ m_content_shdld_done = &dpu_crtc->content_shdld_done;
+ s_content_shdld_done = &aux_dpu_crtc->content_shdld_done;
+ }
+ }
+
+ if (!need_modeset) {
+ enable_irq(dpu_crtc->content_shdld_irq);
+ if (dcstate->use_pc)
+ enable_irq(aux_dpu_crtc->content_shdld_irq);
+
+ if (dcstate->use_pc) {
+ if (extdst_is_master(ed)) {
+ extdst_pixengcfg_sync_trigger(ed);
+ } else {
+ aux_ed = dpu_aux_ed_peek(ed);
+ extdst_pixengcfg_sync_trigger(aux_ed);
+ }
+ } else {
+ extdst_pixengcfg_sync_trigger(ed);
+ }
+
+ if (dcstate->use_pc) {
+ shdld_done = m_content_shdld_done;
+ ret = wait_for_completion_timeout(shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for master content shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+
+ shdld_done = s_content_shdld_done;
+ ret = wait_for_completion_timeout(shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for slave content shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ } else {
+ shdld_done = &dpu_crtc->content_shdld_done;
+ ret = wait_for_completion_timeout(shdld_done, HZ);
+ if (ret == 0)
+ DRM_WARN("[CRTC:%d:%s] %s: wait for content shdld done timeout\n",
+ crtc->base.id, crtc->name, __func__);
+ }
+
+ disable_irq(dpu_crtc->content_shdld_irq);
+ if (dcstate->use_pc)
+ disable_irq(aux_dpu_crtc->content_shdld_irq);
+
+ if (dcstate->use_pc) {
+ if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->m_fg)) {
+ framegen_secondary_clear_channel_status(dpu_crtc->m_fg);
+ DRM_WARN("[CRTC:%d:%s] %s: master FrameGen requests to read empty FIFO\n",
+ crtc->base.id, crtc->name, __func__);
+ }
+ if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->s_fg)) {
+ framegen_secondary_clear_channel_status(dpu_crtc->s_fg);
+ DRM_WARN("[CRTC:%d:%s] %s: slave FrameGen requests to read empty FIFO\n",
+ crtc->base.id, crtc->name, __func__);
+ }
+ } else {
+ if (framegen_secondary_requests_to_read_empty_fifo(dpu_crtc->fg)) {
+ framegen_secondary_clear_channel_status(dpu_crtc->fg);
+ DRM_WARN("[CRTC:%d:%s] %s: FrameGen requests to read empty FIFO\n",
+ crtc->base.id, crtc->name, __func__);
+ }
+ }
+
+ dpu_crtc_queue_state_event(crtc);
+ } else if (!crtc->state->active) {
+ if (old_dcstate->use_pc) {
+ if (extdst_is_master(ed)) {
+ extdst_pixengcfg_sync_trigger(ed);
+ } else {
+ aux_ed = dpu_aux_ed_peek(ed);
+ extdst_pixengcfg_sync_trigger(aux_ed);
+ }
+ } else {
+ extdst_pixengcfg_sync_trigger(ed);
+ }
+ }
+
+ if (!need_modeset && to_enable_dpu_crc(dcstate, old_dcstate))
+ dpu_crtc_enable_crc_source(crtc,
+ dcstate->crc.source, &dcstate->crc.roi);
+}
+
+static void dpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc *aux_dpu_crtc = dpu_crtc_get_aux_dpu_crtc(dpu_crtc);
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
+ struct dpu_crtc_state *dcstate = to_dpu_crtc_state(imx_crtc_state);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
+ struct dpu_plane_res *res = &dplane->grp->res;
+ struct dpu_constframe *pa_cf, *sa_cf;
+ struct dpu_disengcfg *dec;
+ struct dpu_extdst *ed, *plane_ed;
+ struct dpu_framegen *fg;
+ struct dpu_tcon *tcon;
+ struct dpu_store *st;
+ struct drm_encoder *encoder;
+ unsigned long encoder_type = DRM_MODE_ENCODER_NONE;
+ unsigned int stream_id;
+ int crtc_hdisplay = dcstate->use_pc ?
+ (mode->crtc_hdisplay >> 1) : mode->crtc_hdisplay;
+ extdst_src_sel_t ed_src;
+ bool cfg_aux_pipe = false;
+
+ DRM_DEBUG_KMS("[CRTC:%d:%s] %s: mode->hdisplay: %d\n",
+ crtc->base.id, crtc->name, __func__, mode->hdisplay);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] %s: mode->vdisplay: %d\n",
+ crtc->base.id, crtc->name, __func__, mode->vdisplay);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] %s: mode->clock: %dKHz\n",
+ crtc->base.id, crtc->name, __func__, mode->clock);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] %s: mode->vrefresh: %dHz\n",
+ crtc->base.id, crtc->name, __func__,
+ drm_mode_vrefresh(mode));
+ if (dcstate->use_pc)
+ DRM_DEBUG_KMS("[CRTC:%d:%s] %s: use pixel combiner\n",
+ crtc->base.id, crtc->name, __func__);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ encoder_type = encoder->encoder_type;
+ break;
+ }
+ }
+
+again:
+ if (cfg_aux_pipe) {
+ pa_cf = dpu_crtc->aux_pa_cf;
+ sa_cf = dpu_crtc->aux_sa_cf;
+ dec = dpu_crtc->aux_dec;
+ ed = dpu_crtc->aux_ed;
+ fg = dpu_crtc->aux_fg;
+ tcon = dpu_crtc->aux_tcon;
+ st = aux_dpu_crtc->st;
+ stream_id = dpu_crtc->stream_id ^ 1;
+ } else {
+ pa_cf = dpu_crtc->pa_cf;
+ sa_cf = dpu_crtc->sa_cf;
+ dec = dpu_crtc->dec;
+ ed = dpu_crtc->ed;
+ fg = dpu_crtc->fg;
+ tcon = dpu_crtc->tcon;
+ st = dpu_crtc->st;
+ stream_id = dpu_crtc->stream_id;
+ }
+
+ if (dcstate->use_pc) {
+ store_pixengcfg_syncmode_fixup(st, true);
+ framegen_syncmode_fixup(fg,
+ framegen_is_master(fg) ? false : true);
+ framegen_syncmode(fg, framegen_is_master(fg) ?
+ FGSYNCMODE__MASTER : FGSYNCMODE__SLAVE_ONCE);
+ } else {
+ store_pixengcfg_syncmode_fixup(st, false);
+ framegen_syncmode_fixup(fg, false);
+ framegen_syncmode(fg, FGSYNCMODE__OFF);
+ }
+
+ framegen_cfg_videomode(fg, mode, dcstate->use_pc, encoder_type);
+ framegen_displaymode(fg, FGDM__SEC_ON_TOP);
+
+ framegen_panic_displaymode(fg, FGDM__TEST);
+
+ tcon_cfg_videomode(tcon, mode, dcstate->use_pc);
+ tcon_set_fmt(tcon, imx_crtc_state->bus_format);
+ tcon_configure_pc(tcon, stream_id, mode->crtc_hdisplay,
+ dcstate->use_pc ? PC_COMBINE : PC_BYPASS, 0);
+
+ constframe_framedimensions(pa_cf, crtc_hdisplay, mode->crtc_vdisplay);
+ constframe_framedimensions(sa_cf, crtc_hdisplay, mode->crtc_vdisplay);
+ constframe_constantcolor(sa_cf, 0, 0, 0, 0);
+
+ ed_src = stream_id ? ED_SRC_CONSTFRAME5 : ED_SRC_CONSTFRAME4;
+ extdst_pixengcfg_src_sel(ed, ed_src);
+
+ plane_ed = res->ed[stream_id];
+ ed_src = stream_id ? ED_SRC_CONSTFRAME1 : ED_SRC_CONSTFRAME0;
+ extdst_pixengcfg_src_sel(plane_ed, ed_src);
+
+ if (dcstate->use_pc && !cfg_aux_pipe) {
+ cfg_aux_pipe = true;
+ goto again;
+ }
+}
+
+static const struct drm_crtc_helper_funcs dpu_helper_funcs = {
+ .mode_set_nofb = dpu_crtc_mode_set_nofb,
+ .atomic_check = dpu_crtc_atomic_check,
+ .atomic_begin = dpu_crtc_atomic_begin,
+ .atomic_flush = dpu_crtc_atomic_flush,
+ .atomic_enable = dpu_crtc_atomic_enable,
+ .atomic_disable = dpu_crtc_atomic_disable,
+};
+
+static void dpu_crtc_put_resources(struct dpu_crtc *dpu_crtc)
+{
+ if (!IS_ERR_OR_NULL(dpu_crtc->pa_cf))
+ dpu_cf_put(dpu_crtc->pa_cf);
+ if (!IS_ERR_OR_NULL(dpu_crtc->sa_cf))
+ dpu_cf_put(dpu_crtc->sa_cf);
+ if (!IS_ERR_OR_NULL(dpu_crtc->dec))
+ dpu_dec_put(dpu_crtc->dec);
+ if (!IS_ERR_OR_NULL(dpu_crtc->ed))
+ dpu_ed_put(dpu_crtc->ed);
+ if (!IS_ERR_OR_NULL(dpu_crtc->fg))
+ dpu_fg_put(dpu_crtc->fg);
+ if (!IS_ERR_OR_NULL(dpu_crtc->sig))
+ dpu_sig_put(dpu_crtc->sig);
+ if (!IS_ERR_OR_NULL(dpu_crtc->tcon))
+ dpu_tcon_put(dpu_crtc->tcon);
+}
+
+static int dpu_crtc_get_resources(struct dpu_crtc *dpu_crtc)
+{
+ struct dpu_soc *dpu = dev_get_drvdata(dpu_crtc->dev->parent);
+ unsigned int stream_id = dpu_crtc->stream_id;
+ int ret;
+
+ dpu_crtc->pa_cf = dpu_cf_get(dpu, stream_id + 4);
+ if (IS_ERR(dpu_crtc->pa_cf)) {
+ ret = PTR_ERR(dpu_crtc->pa_cf);
+ goto err_out;
+ }
+ dpu_crtc->aux_pa_cf = dpu_aux_cf_peek(dpu_crtc->pa_cf);
+
+ dpu_crtc->sa_cf = dpu_cf_get(dpu, stream_id);
+ if (IS_ERR(dpu_crtc->sa_cf)) {
+ ret = PTR_ERR(dpu_crtc->sa_cf);
+ goto err_out;
+ }
+ dpu_crtc->aux_sa_cf = dpu_aux_cf_peek(dpu_crtc->sa_cf);
+
+ dpu_crtc->dec = dpu_dec_get(dpu, stream_id);
+ if (IS_ERR(dpu_crtc->dec)) {
+ ret = PTR_ERR(dpu_crtc->dec);
+ goto err_out;
+ }
+ dpu_crtc->aux_dec = dpu_aux_dec_peek(dpu_crtc->dec);
+
+ dpu_crtc->ed = dpu_ed_get(dpu, stream_id + 4);
+ if (IS_ERR(dpu_crtc->ed)) {
+ ret = PTR_ERR(dpu_crtc->ed);
+ goto err_out;
+ }
+ dpu_crtc->aux_ed = dpu_aux_ed_peek(dpu_crtc->ed);
+
+ dpu_crtc->fg = dpu_fg_get(dpu, stream_id);
+ if (IS_ERR(dpu_crtc->fg)) {
+ ret = PTR_ERR(dpu_crtc->fg);
+ goto err_out;
+ }
+ dpu_crtc->aux_fg = dpu_aux_fg_peek(dpu_crtc->fg);
+
+ dpu_crtc->sig = dpu_sig_get(dpu, stream_id);
+ if (IS_ERR(dpu_crtc->sig)) {
+ ret = PTR_ERR(dpu_crtc->sig);
+ goto err_out;
+ }
+ dpu_crtc->aux_sig = dpu_aux_sig_peek(dpu_crtc->sig);
+
+ dpu_crtc->tcon = dpu_tcon_get(dpu, stream_id);
+ if (IS_ERR(dpu_crtc->tcon)) {
+ ret = PTR_ERR(dpu_crtc->tcon);
+ goto err_out;
+ }
+ dpu_crtc->aux_tcon = dpu_aux_tcon_peek(dpu_crtc->tcon);
+
+ if (dpu_crtc->aux_is_master) {
+ dpu_crtc->m_pa_cf = dpu_crtc->aux_pa_cf;
+ dpu_crtc->m_sa_cf = dpu_crtc->aux_sa_cf;
+ dpu_crtc->m_dec = dpu_crtc->aux_dec;
+ dpu_crtc->m_ed = dpu_crtc->aux_ed;
+ dpu_crtc->m_fg = dpu_crtc->aux_fg;
+ dpu_crtc->m_tcon = dpu_crtc->aux_tcon;
+
+ dpu_crtc->s_pa_cf = dpu_crtc->pa_cf;
+ dpu_crtc->s_sa_cf = dpu_crtc->sa_cf;
+ dpu_crtc->s_dec = dpu_crtc->dec;
+ dpu_crtc->s_ed = dpu_crtc->ed;
+ dpu_crtc->s_fg = dpu_crtc->fg;
+ dpu_crtc->s_tcon = dpu_crtc->tcon;
+ } else {
+ dpu_crtc->m_pa_cf = dpu_crtc->pa_cf;
+ dpu_crtc->m_sa_cf = dpu_crtc->sa_cf;
+ dpu_crtc->m_dec = dpu_crtc->dec;
+ dpu_crtc->m_ed = dpu_crtc->ed;
+ dpu_crtc->m_fg = dpu_crtc->fg;
+ dpu_crtc->m_tcon = dpu_crtc->tcon;
+
+ dpu_crtc->s_pa_cf = dpu_crtc->aux_pa_cf;
+ dpu_crtc->s_sa_cf = dpu_crtc->aux_sa_cf;
+ dpu_crtc->s_dec = dpu_crtc->aux_dec;
+ dpu_crtc->s_ed = dpu_crtc->aux_ed;
+ dpu_crtc->s_fg = dpu_crtc->aux_fg;
+ dpu_crtc->s_tcon = dpu_crtc->aux_tcon;
+ }
+
+ return 0;
+err_out:
+ dpu_crtc_put_resources(dpu_crtc);
+
+ return ret;
+}
+
+static int dpu_crtc_init(struct dpu_crtc *dpu_crtc,
+ struct dpu_client_platformdata *pdata, struct drm_device *drm)
+{
+ struct dpu_soc *dpu = dev_get_drvdata(dpu_crtc->dev->parent);
+ struct device *dev = dpu_crtc->dev;
+ struct drm_crtc *crtc = &dpu_crtc->base;
+ struct dpu_plane_grp *plane_grp = pdata->plane_grp;
+ unsigned int stream_id = pdata->stream_id;
+ int i, ret;
+
+ init_completion(&dpu_crtc->safety_shdld_done);
+ init_completion(&dpu_crtc->content_shdld_done);
+ init_completion(&dpu_crtc->dec_shdld_done);
+ init_completion(&dpu_crtc->crc_shdld_done);
+ init_completion(&dpu_crtc->aux_crc_done);
+
+ dpu_crtc->stream_id = stream_id;
+ dpu_crtc->crtc_grp_id = pdata->di_grp_id;
+ dpu_crtc->hw_plane_num = plane_grp->hw_plane_num;
+ dpu_crtc->syncmode_min_prate = dpu_get_syncmode_min_prate(dpu);
+ dpu_crtc->singlemode_max_width = dpu_get_singlemode_max_width(dpu);
+ dpu_crtc->master_stream_id = dpu_get_master_stream_id(dpu);
+ dpu_crtc->aux_is_master = !(dpu_crtc->master_stream_id == stream_id);
+ dpu_crtc->st = pdata->st9;
+
+ dpu_crtc->plane = devm_kcalloc(dev, dpu_crtc->hw_plane_num,
+ sizeof(*dpu_crtc->plane), GFP_KERNEL);
+ if (!dpu_crtc->plane)
+ return -ENOMEM;
+
+ ret = dpu_crtc_get_resources(dpu_crtc);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "getting resources failed with %d.\n", ret);
+ return ret;
+ }
+
+ plane_grp->res.fg[stream_id] = dpu_crtc->fg;
+ dpu_crtc->plane[0] = dpu_plane_create(drm, 0, stream_id, plane_grp,
+ DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(dpu_crtc->plane[0])) {
+ ret = PTR_ERR(dpu_crtc->plane[0]);
+ DRM_DEV_ERROR(dev,
+ "initializing plane0 failed with %d.\n", ret);
+ goto err_put_resources;
+ }
+
+ crtc->port = pdata->of_node;
+ drm_crtc_helper_add(crtc, &dpu_helper_funcs);
+ ret = drm_crtc_init_with_planes(drm, crtc, &dpu_crtc->plane[0]->base, NULL,
+ &dpu_crtc_funcs, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "adding crtc failed with %d.\n", ret);
+ goto err_put_resources;
+ }
+
+ for (i = 1; i < dpu_crtc->hw_plane_num; i++) {
+ dpu_crtc->plane[i] = dpu_plane_create(drm,
+ drm_crtc_mask(&dpu_crtc->base),
+ stream_id, plane_grp,
+ DRM_PLANE_TYPE_OVERLAY);
+ if (IS_ERR(dpu_crtc->plane[i])) {
+ ret = PTR_ERR(dpu_crtc->plane[i]);
+ DRM_DEV_ERROR(dev,
+ "initializing plane%d failed with %d.\n",
+ i, ret);
+ goto err_put_resources;
+ }
+ }
+
+ dpu_crtc->vbl_irq = dpu_map_irq(dpu, stream_id ?
+ IRQ_DISENGCFG_FRAMECOMPLETE1 :
+ IRQ_DISENGCFG_FRAMECOMPLETE0);
+ irq_set_status_flags(dpu_crtc->vbl_irq, IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(dev, dpu_crtc->vbl_irq, dpu_vbl_irq_handler, 0,
+ "imx_drm", dpu_crtc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "vblank irq request failed with %d.\n", ret);
+ goto err_put_resources;
+ }
+ disable_irq(dpu_crtc->vbl_irq);
+
+ dpu_crtc->safety_shdld_irq = dpu_map_irq(dpu, stream_id ?
+ IRQ_EXTDST5_SHDLOAD : IRQ_EXTDST4_SHDLOAD);
+ irq_set_status_flags(dpu_crtc->safety_shdld_irq, IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(dev, dpu_crtc->safety_shdld_irq,
+ dpu_safety_shdld_irq_handler, 0, "imx_drm",
+ dpu_crtc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev,
+ "safety shadow load irq request failed with %d.\n",
+ ret);
+ goto err_put_resources;
+ }
+ disable_irq(dpu_crtc->safety_shdld_irq);
+
+ dpu_crtc->content_shdld_irq = dpu_map_irq(dpu, stream_id ?
+ IRQ_EXTDST1_SHDLOAD : IRQ_EXTDST0_SHDLOAD);
+ irq_set_status_flags(dpu_crtc->content_shdld_irq, IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(dev, dpu_crtc->content_shdld_irq,
+ dpu_content_shdld_irq_handler, 0, "imx_drm",
+ dpu_crtc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev,
+ "content shadow load irq request failed with %d.\n",
+ ret);
+ goto err_put_resources;
+ }
+ disable_irq(dpu_crtc->content_shdld_irq);
+
+ dpu_crtc->dec_shdld_irq = dpu_map_irq(dpu, stream_id ?
+ IRQ_DISENGCFG_SHDLOAD1 : IRQ_DISENGCFG_SHDLOAD0);
+ irq_set_status_flags(dpu_crtc->dec_shdld_irq, IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(dev, dpu_crtc->dec_shdld_irq,
+ dpu_dec_shdld_irq_handler, 0, "imx_drm",
+ dpu_crtc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev,
+ "DEC shadow load irq request failed with %d.\n",
+ ret);
+ goto err_put_resources;
+ }
+ disable_irq(dpu_crtc->dec_shdld_irq);
+
+ dpu_crtc->crc_valid_irq = dpu_map_irq(dpu, stream_id ?
+ IRQ_SIG1_VALID : IRQ_SIG0_VALID);
+ irq_set_status_flags(dpu_crtc->crc_valid_irq, IRQ_DISABLE_UNLAZY);
+ ret = devm_request_threaded_irq(dev, dpu_crtc->crc_valid_irq, NULL,
+ dpu_crc_valid_irq_threaded_handler,
+ IRQF_ONESHOT, "imx_drm", dpu_crtc);
+ if (ret < 0) {
+ dev_err(dev,
+ "CRC valid irq request failed with %d.\n",
+ ret);
+ goto err_put_resources;
+ }
+ disable_irq(dpu_crtc->crc_valid_irq);
+
+ dpu_crtc->crc_shdld_irq = dpu_map_irq(dpu, stream_id ?
+ IRQ_SIG1_SHDLOAD : IRQ_SIG0_SHDLOAD);
+ irq_set_status_flags(dpu_crtc->crc_shdld_irq, IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(dev, dpu_crtc->crc_shdld_irq,
+ dpu_crc_shdld_irq_handler, 0, "imx_drm",
+ dpu_crtc);
+ if (ret < 0) {
+ dev_err(dev,
+ "CRC shadow load irq request failed with %d.\n",
+ ret);
+ goto err_put_resources;
+ }
+ disable_irq(dpu_crtc->crc_shdld_irq);
+
+ return 0;
+
+err_put_resources:
+ dpu_crtc_put_resources(dpu_crtc);
+
+ return ret;
+}
+
+static int dpu_crtc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dpu_client_platformdata *pdata = dev->platform_data;
+ struct drm_device *drm = data;
+ struct dpu_crtc *dpu_crtc = dev_get_drvdata(dev);
+ int ret;
+
+ dpu_crtc->dev = dev;
+
+ drm->mode_config.max_width = 5120;
+ drm->mode_config.max_height = 4096;
+
+ ret = dpu_crtc_init(dpu_crtc, pdata, drm);
+ if (ret)
+ return ret;
+
+ if (!drm->mode_config.funcs)
+ drm->mode_config.funcs = &dpu_drm_mode_config_funcs;
+
+ return 0;
+}
+
+static void dpu_crtc_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct dpu_crtc *dpu_crtc = dev_get_drvdata(dev);
+
+ dpu_crtc_put_resources(dpu_crtc);
+}
+
+static const struct component_ops dpu_crtc_ops = {
+ .bind = dpu_crtc_bind,
+ .unbind = dpu_crtc_unbind,
+};
+
+static int dpu_crtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dpu_crtc *dpu_crtc;
+
+ if (!dev->platform_data)
+ return -EINVAL;
+
+ dpu_crtc = devm_kzalloc(dev, sizeof(*dpu_crtc), GFP_KERNEL);
+ if (!dpu_crtc)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, dpu_crtc);
+
+ return component_add(dev, &dpu_crtc_ops);
+}
+
+static int dpu_crtc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dpu_crtc_ops);
+ return 0;
+}
+
+static struct platform_driver dpu_crtc_driver = {
+ .driver = {
+ .name = "imx-dpu-crtc",
+ },
+ .probe = dpu_crtc_probe,
+ .remove = dpu_crtc_remove,
+};
+module_platform_driver(dpu_crtc_driver);
+
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_DESCRIPTION("i.MX DPU CRTC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-dpu-crtc");
diff --git a/drivers/gpu/drm/imx/dpu/dpu-crtc.h b/drivers/gpu/drm/imx/dpu/dpu-crtc.h
new file mode 100644
index 000000000000..423fe1339cf2
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-crtc.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <drm/drm_vblank.h>
+#include <video/dpu.h>
+#include "dpu-plane.h"
+#include "../imx-drm.h"
+
+struct dpu_crtc {
+ struct device *dev;
+ struct drm_crtc base;
+ struct imx_drm_crtc *imx_crtc;
+ struct dpu_constframe *pa_cf;
+ struct dpu_constframe *sa_cf;
+ struct dpu_disengcfg *dec;
+ struct dpu_extdst *ed;
+ struct dpu_framegen *fg;
+ struct dpu_signature *sig;
+ struct dpu_tcon *tcon;
+ struct dpu_store *st;
+ struct dpu_constframe *aux_pa_cf;
+ struct dpu_constframe *aux_sa_cf;
+ struct dpu_disengcfg *aux_dec;
+ struct dpu_extdst *aux_ed;
+ struct dpu_framegen *aux_fg;
+ struct dpu_signature *aux_sig;
+ struct dpu_tcon *aux_tcon;
+ /* master */
+ struct dpu_constframe *m_pa_cf;
+ struct dpu_constframe *m_sa_cf;
+ struct dpu_disengcfg *m_dec;
+ struct dpu_extdst *m_ed;
+ struct dpu_framegen *m_fg;
+ struct dpu_tcon *m_tcon;
+ /* slave */
+ struct dpu_constframe *s_pa_cf;
+ struct dpu_constframe *s_sa_cf;
+ struct dpu_disengcfg *s_dec;
+ struct dpu_extdst *s_ed;
+ struct dpu_framegen *s_fg;
+ struct dpu_tcon *s_tcon;
+ struct dpu_plane **plane;
+ unsigned int hw_plane_num;
+ unsigned int stream_id;
+ unsigned int crtc_grp_id;
+ unsigned int syncmode_min_prate;
+ unsigned int singlemode_max_width;
+ unsigned int master_stream_id;
+ int vbl_irq;
+ int safety_shdld_irq;
+ int content_shdld_irq;
+ int dec_shdld_irq;
+ int crc_valid_irq;
+ int crc_shdld_irq;
+
+ bool aux_is_master;
+ bool use_dual_crc;
+ bool crc_is_enabled;
+
+ struct completion safety_shdld_done;
+ struct completion content_shdld_done;
+ struct completion dec_shdld_done;
+ struct completion crc_shdld_done;
+ struct completion aux_crc_done;
+
+ struct drm_pending_vblank_event *event;
+
+ u32 crc_red;
+ u32 crc_green;
+ u32 crc_blue;
+ u32 dual_crc_flag;
+};
+
+struct dpu_crc {
+ enum dpu_crc_source source;
+ struct drm_rect roi;
+};
+
+struct dpu_crtc_state {
+ struct imx_crtc_state imx_crtc_state;
+ struct dpu_plane_state **dpu_plane_states;
+ struct dpu_crc crc;
+ bool use_pc;
+};
+
+static inline struct dpu_crtc_state *to_dpu_crtc_state(struct imx_crtc_state *s)
+{
+ return container_of(s, struct dpu_crtc_state, imx_crtc_state);
+}
+
+static inline struct dpu_crtc *to_dpu_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct dpu_crtc, base);
+}
+
+struct dpu_plane_state **
+crtc_state_get_dpu_plane_states(struct drm_crtc_state *state);
+
+struct dpu_crtc *dpu_crtc_get_aux_dpu_crtc(struct dpu_crtc *dpu_crtc);
+
+#endif
diff --git a/drivers/gpu/drm/imx/dpu/dpu-kms.c b/drivers/gpu/drm/imx/dpu/dpu-kms.c
new file mode 100644
index 000000000000..fd4897012810
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-kms.c
@@ -0,0 +1,728 @@
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_print.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <linux/sort.h>
+#include <video/dpu.h>
+#include "dpu-crtc.h"
+#include "dpu-plane.h"
+#include "../imx-drm.h"
+
+static struct drm_plane_state **
+dpu_atomic_alloc_tmp_planes_per_crtc(struct drm_device *dev)
+{
+ int total_planes = dev->mode_config.num_total_plane;
+ struct drm_plane_state **states;
+
+ states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
+ if (!states)
+ return ERR_PTR(-ENOMEM);
+
+ return states;
+}
+
+static int zpos_cmp(const void *a, const void *b)
+{
+ const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
+ const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
+
+ return sa->normalized_zpos - sb->normalized_zpos;
+}
+
+static int dpu_atomic_sort_planes_per_crtc(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state **states)
+{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_device *dev = state->dev;
+ struct drm_plane *plane;
+ int n = 0;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ states[n++] = plane_state;
+ }
+
+ sort(states, n, sizeof(*states), zpos_cmp, NULL);
+
+ return n;
+}
+
+static void
+dpu_atomic_compute_plane_lrx_per_crtc(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state **states, int n)
+{
+ struct dpu_plane_state *dpstate;
+ struct drm_plane_state *plane_state;
+ int i;
+ int half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1;
+ bool lo, ro, bo;
+
+ /* compute left/right_crtc_x if pixel combiner is needed */
+ for (i = 0; i < n; i++) {
+ plane_state = states[i];
+ dpstate = to_dpu_plane_state(plane_state);
+
+ lo = dpstate->left_src_w && !dpstate->right_src_w;
+ ro = !dpstate->left_src_w && dpstate->right_src_w;
+ bo = dpstate->left_src_w && dpstate->right_src_w;
+
+ if (lo || bo) {
+ dpstate->left_crtc_x = plane_state->crtc_x;
+ dpstate->right_crtc_x = 0;
+ } else if (ro) {
+ dpstate->left_crtc_x = 0;
+ dpstate->right_crtc_x =
+ plane_state->crtc_x - half_hdisplay;
+ }
+ }
+}
+
+static void
+dpu_atomic_set_top_plane_per_crtc(struct drm_plane_state **states, int n,
+ bool use_pc)
+{
+ struct dpu_plane_state *dpstate;
+ bool found_l_top = false, found_r_top = false;
+ int i;
+
+ for (i = n - 1; i >= 0; i--) {
+ dpstate = to_dpu_plane_state(states[i]);
+ if (use_pc) {
+ if (dpstate->left_src_w && !found_l_top) {
+ dpstate->is_left_top = true;
+ found_l_top = true;
+ } else {
+ dpstate->is_left_top = false;
+ }
+
+ if (dpstate->right_src_w && !found_r_top) {
+ dpstate->is_right_top = true;
+ found_r_top = true;
+ } else {
+ dpstate->is_right_top = false;
+ }
+ } else {
+ dpstate->is_top = (i == (n - 1)) ? true : false;
+ }
+ }
+}
+
+static int
+dpu_atomic_assign_plane_source_per_crtc(struct drm_plane_state **states,
+ int n, bool use_pc)
+{
+ struct dpu_plane_state *dpstate;
+ struct dpu_plane *dplane;
+ struct dpu_plane_grp *grp;
+ struct drm_framebuffer *fb;
+ struct dpu_fetchunit *fu;
+ struct dpu_fetchunit *fe;
+ struct dpu_hscaler *hs;
+ struct dpu_vscaler *vs;
+ lb_prim_sel_t stage;
+ dpu_block_id_t blend;
+ unsigned int sid, src_sid;
+ unsigned int num_planes;
+ int bit;
+ int i, j, k = 0, m;
+ int total_asrc_num;
+ int s0_layer_cnt = 0, s1_layer_cnt = 0;
+ int s0_n = 0, s1_n = 0;
+ u32 src_a_mask, cap_mask, fe_mask, hs_mask, vs_mask;
+ bool need_fetcheco, need_hscaler, need_vscaler;
+ bool fmt_is_yuv;
+ bool alloc_aux_source;
+
+ if (use_pc) {
+ for (i = 0; i < n; i++) {
+ dpstate = to_dpu_plane_state(states[i]);
+
+ if (dpstate->left_src_w)
+ s0_n++;
+
+ if (dpstate->right_src_w)
+ s1_n++;
+ }
+ } else {
+ s0_n = n;
+ s1_n = n;
+ }
+
+ /* for active planes only */
+ for (i = 0; i < n; i++) {
+ dpstate = to_dpu_plane_state(states[i]);
+ dplane = to_dpu_plane(states[i]->plane);
+ fb = states[i]->fb;
+ num_planes = fb->format->num_planes;
+ fmt_is_yuv = drm_format_is_yuv(fb->format->format);
+ grp = dplane->grp;
+ alloc_aux_source = false;
+
+ if (use_pc)
+ sid = dpstate->left_src_w ? 0 : 1;
+ else
+ sid = dplane->stream_id;
+
+again:
+ if (alloc_aux_source)
+ sid ^= 1;
+
+ need_fetcheco = (num_planes > 1);
+ need_hscaler = (states[i]->src_w >> 16 != states[i]->crtc_w);
+ need_vscaler = (states[i]->src_h >> 16 != states[i]->crtc_h);
+
+ total_asrc_num = 0;
+ src_a_mask = grp->src_a_mask;
+ fe_mask = 0;
+ hs_mask = 0;
+ vs_mask = 0;
+
+ for_each_set_bit(bit, (unsigned long *)&src_a_mask, 32)
+ total_asrc_num++;
+
+ /* assign source */
+ mutex_lock(&grp->mutex);
+ for (j = 0; j < total_asrc_num; j++) {
+ k = ffs(src_a_mask) - 1;
+ if (k < 0)
+ return -EINVAL;
+
+ fu = source_to_fu(&grp->res, sources[k]);
+ if (!fu)
+ return -EINVAL;
+
+ /* avoid on-the-fly/hot migration */
+ src_sid = fu->ops->get_stream_id(fu);
+ if (src_sid && src_sid != BIT(sid))
+ goto next;
+
+ if (fetchunit_is_fetchdecode(fu)) {
+ cap_mask = fetchdecode_get_vproc_mask(fu);
+
+ if (need_fetcheco) {
+ fe = fetchdecode_get_fetcheco(fu);
+
+ /* avoid on-the-fly/hot migration */
+ src_sid = fu->ops->get_stream_id(fe);
+ if (src_sid && src_sid != BIT(sid))
+ goto next;
+
+ /* fetch unit has the fetcheco cap? */
+ if (!dpu_vproc_has_fetcheco_cap(cap_mask))
+ goto next;
+
+ fe_mask =
+ dpu_vproc_get_fetcheco_cap(cap_mask);
+
+ /* fetcheco available? */
+ if (grp->src_use_vproc_mask & fe_mask)
+ goto next;
+ }
+
+ if (need_hscaler) {
+ hs = fetchdecode_get_hscaler(fu);
+
+ /* avoid on-the-fly/hot migration */
+ src_sid = hscaler_get_stream_id(hs);
+ if (src_sid && src_sid != BIT(sid))
+ goto next;
+
+ /* fetch unit has the hscale cap */
+ if (!dpu_vproc_has_hscale_cap(cap_mask))
+ goto next;
+
+ hs_mask =
+ dpu_vproc_get_hscale_cap(cap_mask);
+
+ /* hscaler available? */
+ if (grp->src_use_vproc_mask & hs_mask)
+ goto next;
+ }
+
+ if (need_vscaler) {
+ vs = fetchdecode_get_vscaler(fu);
+
+ /* avoid on-the-fly/hot migration */
+ src_sid = vscaler_get_stream_id(vs);
+ if (src_sid && src_sid != BIT(sid))
+ goto next;
+
+ /* fetch unit has the vscale cap? */
+ if (!dpu_vproc_has_vscale_cap(cap_mask))
+ goto next;
+
+ vs_mask =
+ dpu_vproc_get_vscale_cap(cap_mask);
+
+ /* vscaler available? */
+ if (grp->src_use_vproc_mask & vs_mask)
+ goto next;
+ }
+ } else {
+ if (fmt_is_yuv || need_fetcheco ||
+ need_hscaler || need_vscaler)
+ goto next;
+ }
+
+ grp->src_a_mask &= ~BIT(k);
+ grp->src_use_vproc_mask |= fe_mask | hs_mask | vs_mask;
+ break;
+next:
+ src_a_mask &= ~BIT(k);
+ fe_mask = 0;
+ hs_mask = 0;
+ vs_mask = 0;
+ }
+ mutex_unlock(&grp->mutex);
+
+ if (j == total_asrc_num)
+ return -EINVAL;
+
+ if (alloc_aux_source)
+ dpstate->aux_source = sources[k];
+ else
+ dpstate->source = sources[k];
+
+ /* assign stage and blend */
+ if (sid) {
+ m = grp->hw_plane_num - (s1_n - s1_layer_cnt);
+ stage = s1_layer_cnt ? stages[m - 1] : cf_stages[sid];
+ blend = blends[m];
+
+ s1_layer_cnt++;
+ } else {
+ stage = s0_layer_cnt ?
+ stages[s0_layer_cnt - 1] : cf_stages[sid];
+ blend = blends[s0_layer_cnt];
+
+ s0_layer_cnt++;
+ }
+
+ if (alloc_aux_source) {
+ dpstate->aux_stage = stage;
+ dpstate->aux_blend = blend;
+ } else {
+ dpstate->stage = stage;
+ dpstate->blend = blend;
+ }
+
+ if (dpstate->need_aux_source && !alloc_aux_source) {
+ alloc_aux_source = true;
+ goto again;
+ }
+ }
+
+ return 0;
+}
+
+static void
+dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(struct drm_crtc *crtc,
+ u32 crtc_mask,
+ struct drm_atomic_state *state,
+ bool *puts)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ bool found_pstate = false;
+ int i;
+
+ if ((crtc_mask & drm_crtc_mask(crtc)) == 0) {
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
+ found_pstate = true;
+ break;
+ }
+ }
+
+ if (!found_pstate)
+ puts[drm_crtc_index(crtc)] = true;
+ }
+}
+
+static void
+dpu_atomic_put_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ int index = drm_plane_index(plane);
+
+ plane->funcs->atomic_destroy_state(plane, state->planes[index].state);
+ state->planes[index].ptr = NULL;
+ state->planes[index].state = NULL;
+
+ drm_modeset_unlock(&plane->mutex);
+}
+
+static void
+dpu_atomic_put_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ int index = drm_crtc_index(crtc);
+
+ crtc->funcs->atomic_destroy_state(crtc, state->crtcs[index].state);
+ state->crtcs[index].ptr = NULL;
+ state->crtcs[index].state = NULL;
+
+ drm_modeset_unlock(&crtc->mutex);
+}
+
+static void
+dpu_atomic_put_possible_states_per_crtc(struct drm_crtc_state *crtc_state)
+{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct drm_crtc_state *old_crtc_state = crtc->state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
+ struct dpu_plane_state **old_dpstates;
+ struct dpu_plane_state *old_dpstate, *new_dpstate;
+ u32 active_mask = 0;
+ int i;
+
+ old_dpstates = crtc_state_get_dpu_plane_states(old_crtc_state);
+ if (WARN_ON(!old_dpstates))
+ return;
+
+ for (i = 0; i < dplane->grp->hw_plane_num; i++) {
+ old_dpstate = old_dpstates[i];
+ if (!old_dpstate)
+ continue;
+
+ active_mask |= BIT(i);
+
+ drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
+ if (drm_plane_index(plane) !=
+ drm_plane_index(old_dpstate->base.plane))
+ continue;
+
+ plane_state =
+ drm_atomic_get_existing_plane_state(state,
+ plane);
+ if (WARN_ON(!plane_state))
+ return;
+
+ new_dpstate = to_dpu_plane_state(plane_state);
+
+ active_mask &= ~BIT(i);
+
+ /*
+ * Should be enough to check the below real HW plane
+ * resources only.
+ * Things like vproc resources should be fine.
+ */
+ if (old_dpstate->stage != new_dpstate->stage ||
+ old_dpstate->source != new_dpstate->source ||
+ old_dpstate->blend != new_dpstate->blend ||
+ old_dpstate->aux_stage != new_dpstate->aux_stage ||
+ old_dpstate->aux_source != new_dpstate->aux_source ||
+ old_dpstate->aux_blend != new_dpstate->aux_blend)
+ return;
+ }
+ }
+
+ /* pure software check */
+ if (WARN_ON(active_mask))
+ return;
+
+ drm_atomic_crtc_state_for_each_plane(plane, crtc_state)
+ dpu_atomic_put_plane_state(state, plane);
+
+ dpu_atomic_put_crtc_state(state, crtc);
+}
+
+static int dpu_drm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct dpu_plane *dpu_plane;
+ struct drm_plane_state *plane_state;
+ struct dpu_plane_state *dpstate;
+ struct drm_framebuffer *fb;
+ struct dpu_plane_grp *grp[MAX_DPU_PLANE_GRP];
+ int ret, i, grp_id;
+ int active_plane[MAX_DPU_PLANE_GRP];
+ int active_plane_fetcheco[MAX_DPU_PLANE_GRP];
+ int active_plane_hscale[MAX_DPU_PLANE_GRP];
+ int active_plane_vscale[MAX_DPU_PLANE_GRP];
+ int half_hdisplay = 0;
+ bool pipe_states_prone_to_put[MAX_CRTC];
+ bool use_pc[MAX_DPU_PLANE_GRP];
+ u32 crtc_mask_in_state = 0;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret) {
+ DRM_DEBUG_KMS("%s: failed to check modeset\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < MAX_CRTC; i++)
+ pipe_states_prone_to_put[i] = false;
+
+ for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
+ active_plane[i] = 0;
+ active_plane_fetcheco[i] = 0;
+ active_plane_hscale[i] = 0;
+ active_plane_vscale[i] = 0;
+ use_pc[i] = false;
+ grp[i] = NULL;
+ }
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i)
+ crtc_mask_in_state |= drm_crtc_mask(crtc);
+
+ drm_for_each_crtc(crtc, dev) {
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct imx_crtc_state *imx_crtc_state;
+ struct dpu_crtc_state *dcstate;
+ bool need_left, need_right, need_aux_source, use_pc_per_crtc;
+
+ use_pc_per_crtc = false;
+
+ dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(crtc,
+ crtc_mask_in_state, state,
+ pipe_states_prone_to_put);
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ imx_crtc_state = to_imx_crtc_state(crtc_state);
+ dcstate = to_dpu_crtc_state(imx_crtc_state);
+
+ if (crtc_state->enable) {
+ if (use_pc[dpu_crtc->crtc_grp_id]) {
+ DRM_DEBUG_KMS("other crtc needs pixel combiner\n");
+ return -EINVAL;
+ }
+
+ if (crtc_state->adjusted_mode.clock >
+ dpu_crtc->syncmode_min_prate ||
+ crtc_state->adjusted_mode.hdisplay >
+ dpu_crtc->singlemode_max_width)
+ use_pc_per_crtc = true;
+ }
+
+ if (use_pc_per_crtc) {
+ use_pc[dpu_crtc->crtc_grp_id] = true;
+ half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1;
+ }
+
+ dcstate->use_pc = use_pc_per_crtc;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ DRM_DEBUG_KMS("failed to get plane state\n");
+ return PTR_ERR(plane_state);
+ }
+
+ dpstate = to_dpu_plane_state(plane_state);
+ fb = plane_state->fb;
+ dpu_plane = to_dpu_plane(plane);
+ grp_id = dpu_plane->grp->id;
+ active_plane[grp_id]++;
+
+ need_left = false;
+ need_right = false;
+ need_aux_source = false;
+
+ if (use_pc_per_crtc) {
+ if (plane_state->crtc_x < half_hdisplay)
+ need_left = true;
+
+ if ((plane_state->crtc_w +
+ plane_state->crtc_x) > half_hdisplay)
+ need_right = true;
+
+ if (need_left && need_right) {
+ need_aux_source = true;
+ active_plane[grp_id]++;
+ }
+ }
+
+ if (need_left && need_right) {
+ dpstate->left_crtc_w = half_hdisplay;
+ dpstate->left_crtc_w -= plane_state->crtc_x;
+
+ dpstate->left_src_w = dpstate->left_crtc_w;
+ } else if (need_left) {
+ dpstate->left_crtc_w = plane_state->crtc_w;
+ dpstate->left_src_w = plane_state->src_w >> 16;
+ } else {
+ dpstate->left_crtc_w = 0;
+ dpstate->left_src_w = 0;
+ }
+
+ if (need_right && need_left) {
+ dpstate->right_crtc_w = plane_state->crtc_x +
+ plane_state->crtc_w;
+ dpstate->right_crtc_w -= half_hdisplay;
+
+ dpstate->right_src_w = dpstate->right_crtc_w;
+ } else if (need_right) {
+ dpstate->right_crtc_w = plane_state->crtc_w;
+ dpstate->right_src_w = plane_state->src_w >> 16;
+ } else {
+ dpstate->right_crtc_w = 0;
+ dpstate->right_src_w = 0;
+ }
+
+ if (fb->format->num_planes > 1) {
+ active_plane_fetcheco[grp_id]++;
+ if (need_aux_source)
+ active_plane_fetcheco[grp_id]++;
+ }
+
+ if (plane_state->src_w >> 16 != plane_state->crtc_w) {
+ if (use_pc_per_crtc)
+ return -EINVAL;
+
+ active_plane_hscale[grp_id]++;
+ }
+
+ if (plane_state->src_h >> 16 != plane_state->crtc_h) {
+ if (use_pc_per_crtc)
+ return -EINVAL;
+
+ active_plane_vscale[grp_id]++;
+ }
+
+ if (grp[grp_id] == NULL)
+ grp[grp_id] = dpu_plane->grp;
+
+ dpstate->need_aux_source = need_aux_source;
+ }
+ }
+
+ /* enough resources? */
+ for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
+ if (!grp[i])
+ continue;
+
+ if (active_plane[i] > grp[i]->hw_plane_num) {
+ DRM_DEBUG_KMS("no enough fetch units\n");
+ return -EINVAL;
+ }
+
+ if (active_plane_fetcheco[i] > grp[i]->hw_plane_fetcheco_num) {
+ DRM_DEBUG_KMS("no enough FetchEcos\n");
+ return -EINVAL;
+ }
+
+ if (active_plane_hscale[i] > grp[i]->hw_plane_hscaler_num) {
+ DRM_DEBUG_KMS("no enough Hscalers\n");
+ return -EINVAL;
+ }
+
+ if (active_plane_vscale[i] > grp[i]->hw_plane_vscaler_num) {
+ DRM_DEBUG_KMS("no enough Vscalers\n");
+ return -EINVAL;
+ }
+ }
+
+ /* initialize resource mask */
+ for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
+ if (!grp[i])
+ continue;
+
+ mutex_lock(&grp[i]->mutex);
+ grp[i]->src_a_mask = grp[i]->src_mask;
+ grp[i]->src_use_vproc_mask = 0;
+ mutex_unlock(&grp[i]->mutex);
+ }
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_plane_state **states;
+ int n;
+
+ states = dpu_atomic_alloc_tmp_planes_per_crtc(dev);
+ if (IS_ERR(states)) {
+ DRM_DEBUG_KMS(
+ "[CRTC:%d:%s] cannot alloc plane state ptrs\n",
+ crtc->base.id, crtc->name);
+ return PTR_ERR(states);
+ }
+
+ n = dpu_atomic_sort_planes_per_crtc(crtc_state, states);
+ if (n < 0) {
+ DRM_DEBUG_KMS("[CRTC:%d:%s] failed to sort planes\n",
+ crtc->base.id, crtc->name);
+ kfree(states);
+ return n;
+ }
+
+ /* no active planes? */
+ if (n == 0) {
+ kfree(states);
+ continue;
+ }
+
+ if (use_pc[dpu_crtc->crtc_grp_id])
+ dpu_atomic_compute_plane_lrx_per_crtc(crtc_state,
+ states, n);
+
+ dpu_atomic_set_top_plane_per_crtc(states, n,
+ use_pc[dpu_crtc->crtc_grp_id]);
+
+ ret = dpu_atomic_assign_plane_source_per_crtc(states, n,
+ use_pc[dpu_crtc->crtc_grp_id]);
+ if (ret) {
+ DRM_DEBUG_KMS("[CRTC:%d:%s] cannot assign plane rscs\n",
+ crtc->base.id, crtc->name);
+ kfree(states);
+ return ret;
+ }
+
+ kfree(states);
+ }
+
+ drm_for_each_crtc(crtc, dev) {
+ if (pipe_states_prone_to_put[drm_crtc_index(crtc)]) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (WARN_ON(IS_ERR(crtc_state)))
+ return PTR_ERR(crtc_state);
+
+ dpu_atomic_put_possible_states_per_crtc(crtc_state);
+ }
+ }
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret) {
+ DRM_DEBUG_KMS("%s: failed to check planes\n", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+const struct drm_mode_config_funcs dpu_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = dpu_drm_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
diff --git a/drivers/gpu/drm/imx/dpu/dpu-kms.h b/drivers/gpu/drm/imx/dpu/dpu-kms.h
new file mode 100644
index 000000000000..73723e500239
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-kms.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _DPU_KMS_H_
+#define _DPU_KMS_H_
+
+extern const struct drm_mode_config_funcs dpu_drm_mode_config_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/imx/dpu/dpu-plane.c b/drivers/gpu/drm/imx/dpu/dpu-plane.c
new file mode 100644
index 000000000000..7dafd34d7edc
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-plane.c
@@ -0,0 +1,1024 @@
+/*
+ * Copyright 2017-2019 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <drm/drm_vblank.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <video/dpu.h>
+#include <video/imx8-prefetch.h>
+#include "dpu-plane.h"
+#include "../imx-drm.h"
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
+static const uint32_t dpu_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+};
+
+static const uint64_t dpu_format_modifiers[] = {
+ DRM_FORMAT_MOD_VIVANTE_TILED,
+ DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
+ DRM_FORMAT_MOD_AMPHION_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static unsigned int dpu_plane_get_default_zpos(enum drm_plane_type type)
+{
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ return 0;
+ else if (type == DRM_PLANE_TYPE_OVERLAY)
+ return 1;
+
+ return 0;
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+ struct dpu_plane *dpu_plane = to_dpu_plane(plane);
+
+ drm_plane_cleanup(plane);
+ kfree(dpu_plane);
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+ struct dpu_plane_state *state;
+
+ if (plane->state) {
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+ kfree(to_dpu_plane_state(plane->state));
+ plane->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+
+ __drm_atomic_helper_plane_reset(plane, &state->base);
+
+ plane->state->zpos = dpu_plane_get_default_zpos(plane->type);
+ plane->state->color_encoding = DRM_COLOR_YCBCR_BT601;
+ plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE;
+}
+
+static struct drm_plane_state *
+dpu_drm_atomic_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dpu_plane_state *state, *copy;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ copy = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+ state = to_dpu_plane_state(plane->state);
+ copy->stage = state->stage;
+ copy->source = state->source;
+ copy->blend = state->blend;
+ copy->aux_stage = state->aux_stage;
+ copy->aux_source = state->aux_source;
+ copy->aux_blend = state->aux_blend;
+ copy->is_top = state->is_top;
+ copy->use_prefetch = state->use_prefetch;
+ copy->use_aux_prefetch = state->use_aux_prefetch;
+ copy->need_aux_source = state->need_aux_source;
+ copy->left_src_w = state->left_src_w;
+ copy->left_crtc_w = state->left_crtc_w;
+ copy->left_crtc_x = state->left_crtc_x;
+ copy->right_src_w = state->right_src_w;
+ copy->right_crtc_w = state->right_crtc_w;
+ copy->right_crtc_x = state->right_crtc_x;
+ copy->is_left_top = state->is_left_top;
+ copy->is_right_top = state->is_right_top;
+
+ return &copy->base;
+}
+
+static bool dpu_drm_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_RGB565:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
+ modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == DRM_FORMAT_MOD_AMPHION_TILED;
+ default:
+ return false;
+ }
+}
+
+static void dpu_drm_atomic_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_dpu_plane_state(state));
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dpu_plane_destroy,
+ .reset = dpu_plane_reset,
+ .atomic_duplicate_state = dpu_drm_atomic_plane_duplicate_state,
+ .atomic_destroy_state = dpu_drm_atomic_plane_destroy_state,
+ .format_mod_supported = dpu_drm_plane_format_mod_supported,
+};
+
+static inline dma_addr_t
+drm_plane_state_to_baseaddr(struct drm_plane_state *state, bool aux_source)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ struct dpu_plane_state *dpstate = to_dpu_plane_state(state);
+ unsigned int x = (state->src.x1 >> 16) +
+ (aux_source ? dpstate->left_src_w : 0);
+ unsigned int y = state->src.y1 >> 16;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ BUG_ON(!cma_obj);
+
+ if (fb->modifier)
+ return cma_obj->paddr + fb->offsets[0];
+
+ if (fb->flags & DRM_MODE_FB_INTERLACED)
+ y /= 2;
+
+ return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y +
+ fb->format->cpp[0] * x;
+}
+
+static inline dma_addr_t
+drm_plane_state_to_uvbaseaddr(struct drm_plane_state *state, bool aux_source)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ struct dpu_plane_state *dpstate = to_dpu_plane_state(state);
+ int x = (state->src.x1 >> 16) + (aux_source ? dpstate->left_src_w : 0);
+ int y = state->src.y1 >> 16;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
+ BUG_ON(!cma_obj);
+
+ if (fb->modifier)
+ return cma_obj->paddr + fb->offsets[1];
+
+ x /= fb->format->hsub;
+ y /= fb->format->vsub;
+
+ if (fb->flags & DRM_MODE_FB_INTERLACED)
+ y /= 2;
+
+ return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
+ fb->format->cpp[1] * x;
+}
+
+static inline bool dpu_plane_fb_format_is_yuv(u32 fmt)
+{
+ return fmt == DRM_FORMAT_YUYV || fmt == DRM_FORMAT_UYVY ||
+ fmt == DRM_FORMAT_NV12 || fmt == DRM_FORMAT_NV21;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dpu_plane *dplane = to_dpu_plane(plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct dpu_plane_state *dpstate = to_dpu_plane_state(new_plane_state);
+ struct dpu_plane_res *res = &dplane->grp->res;
+ struct drm_crtc_state *crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ struct dpu_fetchunit *fu;
+ struct dprc *dprc;
+ dma_addr_t baseaddr, uv_baseaddr = 0;
+ u32 src_w, src_h, src_x, src_y;
+ unsigned int frame_width;
+ int min_scale, bpp, ret;
+ bool fb_is_interlaced;
+ bool check_aux_source = false;
+
+ /* ok to disable */
+ if (!fb) {
+ dpstate->stage = LB_PRIM_SEL__DISABLE;
+ dpstate->source = LB_SEC_SEL__DISABLE;
+ dpstate->blend = ID_NONE;
+ dpstate->aux_stage = LB_PRIM_SEL__DISABLE;
+ dpstate->aux_source = LB_SEC_SEL__DISABLE;
+ dpstate->aux_blend = ID_NONE;
+ dpstate->is_top = false;
+ dpstate->use_prefetch = false;
+ dpstate->use_aux_prefetch = false;
+ dpstate->need_aux_source = false;
+ dpstate->left_src_w = 0;
+ dpstate->left_crtc_w = 0;
+ dpstate->left_crtc_x = 0;
+ dpstate->right_src_w = 0;
+ dpstate->right_crtc_w = 0;
+ dpstate->right_crtc_x = 0;
+ dpstate->is_left_top = false;
+ dpstate->is_right_top = false;
+ return 0;
+ }
+
+ if (!new_plane_state->crtc) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] has no CRTC in plane state\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ src_w = drm_rect_width(&new_plane_state->src) >> 16;
+ src_h = drm_rect_height(&new_plane_state->src) >> 16;
+ src_x = new_plane_state->src.x1 >> 16;
+ src_y = new_plane_state->src.y1 >> 16;
+
+ fb_is_interlaced = !!(fb->flags & DRM_MODE_FB_INTERLACED);
+
+ if (fb->modifier &&
+ fb->modifier != DRM_FORMAT_MOD_AMPHION_TILED &&
+ fb->modifier != DRM_FORMAT_MOD_VIVANTE_TILED &&
+ fb->modifier != DRM_FORMAT_MOD_VIVANTE_SUPER_TILED) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] unsupported fb modifier\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ min_scale = dplane->grp->has_vproc ?
+ FRAC_16_16(min(src_w, src_h), 8192) :
+ DRM_PLANE_HELPER_NO_SCALING;
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ min_scale,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, false);
+ if (ret) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] failed to check plane state\n",
+ plane->base.id, plane->name);
+ return ret;
+ }
+
+ /* no off screen */
+ if (new_plane_state->dst.x1 < 0 || new_plane_state->dst.y1 < 0 ||
+ (new_plane_state->dst.x2 > crtc_state->adjusted_mode.hdisplay) ||
+ (new_plane_state->dst.y2 > crtc_state->adjusted_mode.vdisplay)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] no off screen\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ /* pixel/line count and position parameters check */
+ if (fb->format->hsub == 2) {
+ if (dpstate->left_src_w || dpstate->right_src_w) {
+ if ((dpstate->left_src_w % 2) ||
+ (dpstate->right_src_w % 2) || (src_x % 2)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad left/right uv width or xoffset\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ } else {
+ if ((src_w % 2) || (src_x % 2)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv width or xoffset\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ }
+ }
+ if (fb->format->vsub == 2) {
+ if (src_h % (fb_is_interlaced ? 4 : 2)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv height\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ if (src_y % (fb_is_interlaced ? 4 : 2)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv yoffset\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ }
+
+ /* for tile formats, framebuffer has to be tile aligned */
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_AMPHION_TILED:
+ if (fb->width % 8) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb width for AMPHION tile\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ if (fb->height % 256) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb height for AMPHION tile\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_MOD_VIVANTE_TILED:
+ if (fb->width % 4) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb width for VIVANTE tile\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ if (fb->height % 4) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb height for VIVANTE tile\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
+ if (fb->width % 64) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb width for VIVANTE super tile\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ if (fb->height % 64) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad fb height for VIVANTE super tile\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* do not support BT709 full range */
+ if (dpu_plane_fb_format_is_yuv(fb->format->format) &&
+ new_plane_state->color_encoding == DRM_COLOR_YCBCR_BT709 &&
+ new_plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ return -EINVAL;
+
+again:
+ fu = source_to_fu(res,
+ check_aux_source ? dpstate->aux_source : dpstate->source);
+ if (!fu) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] cannot get fetch unit\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ dprc = fu->dprc;
+
+ if (dpstate->need_aux_source)
+ frame_width = check_aux_source ?
+ dpstate->right_src_w : dpstate->left_src_w;
+ else
+ frame_width = src_w;
+
+ if (dprc &&
+ dprc_format_supported(dprc, fb->format->format, fb->modifier) &&
+ dprc_stride_supported(dprc, fb->pitches[0], fb->pitches[1],
+ frame_width, fb->format->format)) {
+ if (check_aux_source)
+ dpstate->use_aux_prefetch = true;
+ else
+ dpstate->use_prefetch = true;
+ } else {
+ if (check_aux_source)
+ dpstate->use_aux_prefetch = false;
+ else
+ dpstate->use_prefetch = false;
+ }
+
+ if (fb->modifier) {
+ if (check_aux_source && !dpstate->use_aux_prefetch) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] cannot do tile resolving wo prefetch\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ } else if (!check_aux_source && !dpstate->use_prefetch) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] cannot do tile resolving wo prefetch\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ }
+
+ /* base address alignment check */
+ baseaddr = drm_plane_state_to_baseaddr(new_plane_state,
+ check_aux_source);
+ switch (fb->format->format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ bpp = 16;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ bpp = 8;
+ break;
+ default:
+ bpp = fb->format->cpp[0] * 8;
+ break;
+ }
+ switch (bpp) {
+ case 32:
+ if (baseaddr & 0x3) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] 32bpp fb bad baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ break;
+ case 16:
+ if (fb->modifier) {
+ if (baseaddr & 0x1) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] 16bpp tile fb bad baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ } else {
+ if (check_aux_source) {
+ if (baseaddr &
+ (dpstate->use_aux_prefetch ? 0x7 : 0x1)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] 16bpp fb bad baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ } else {
+ if (baseaddr &
+ (dpstate->use_prefetch ? 0x7 : 0x1)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] 16bpp fb bad baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ }
+ }
+ break;
+ }
+
+ if (fb->pitches[0] > 0x10000) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] fb pitch[0] is too big\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ /* UV base address alignment check, assuming 16bpp */
+ if (fb->format->num_planes > 1) {
+ uv_baseaddr = drm_plane_state_to_uvbaseaddr(new_plane_state,
+ check_aux_source);
+ if (fb->modifier) {
+ if (uv_baseaddr & 0x1) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment for tile fb\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ } else {
+ if (check_aux_source) {
+ if (uv_baseaddr &
+ (dpstate->use_aux_prefetch ? 0x7 : 0x1)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ } else {
+ if (uv_baseaddr &
+ (dpstate->use_prefetch ? 0x7 : 0x1)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (fb->pitches[1] > 0x10000) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] fb pitch[1] is too big\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+ }
+
+ if (!check_aux_source && dpstate->use_prefetch &&
+ !dprc_stride_double_check(dprc, frame_width, src_x,
+ fb->format->format,
+ fb->modifier,
+ baseaddr, uv_baseaddr)) {
+ if (fb->modifier) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad pitch\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ if (bpp == 16 && (baseaddr & 0x1)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ if (uv_baseaddr & 0x1) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ dpstate->use_prefetch = false;
+ } else if (check_aux_source && dpstate->use_aux_prefetch &&
+ !dprc_stride_double_check(dprc, frame_width, src_x,
+ fb->format->format,
+ fb->modifier,
+ baseaddr, uv_baseaddr)) {
+ if (fb->modifier) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad pitch\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ if (bpp == 16 && (baseaddr & 0x1)) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ if (uv_baseaddr & 0x1) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bad uv baddr alignment\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ dpstate->use_aux_prefetch = false;
+ }
+
+ if (dpstate->need_aux_source && !check_aux_source) {
+ check_aux_source = true;
+ goto again;
+ }
+
+ return 0;
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dpu_plane *dplane = to_dpu_plane(plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct dpu_plane_state *dpstate = to_dpu_plane_state(new_plane_state);
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ struct dpu_plane_res *res = &dplane->grp->res;
+ struct dpu_fetchunit *fu;
+ struct dpu_fetchunit *fe = NULL;
+ struct dprc *dprc;
+ struct dpu_hscaler *hs = NULL;
+ struct dpu_vscaler *vs = NULL;
+ struct dpu_layerblend *lb;
+ struct dpu_extdst *ed;
+ struct dpu_framegen *fg;
+ dma_addr_t baseaddr, uv_baseaddr = 0;
+ dpu_block_id_t blend, fe_id, vs_id = ID_NONE, hs_id;
+ lb_sec_sel_t source;
+ lb_prim_sel_t stage;
+ unsigned int stream_id;
+ unsigned int src_w, src_h, src_x, src_y, dst_w, dst_h;
+ unsigned int crtc_x;
+ unsigned int mt_w = 0, mt_h = 0; /* w/h in a micro-tile */
+ int bpp, lb_id;
+ bool need_fetcheco, need_hscaler = false, need_vscaler = false;
+ bool prefetch_start, uv_prefetch_start;
+ bool crtc_use_pc = dpstate->left_src_w || dpstate->right_src_w;
+ bool update_aux_source = false;
+ bool use_prefetch;
+ bool need_modeset;
+ bool fb_is_interlaced;
+
+ /*
+ * Do nothing since the plane is disabled by
+ * crtc_func->atomic_begin/flush.
+ */
+ if (!fb)
+ return;
+
+ need_modeset =
+ drm_atomic_crtc_needs_modeset(new_plane_state->crtc->state);
+ fb_is_interlaced = !!(fb->flags & DRM_MODE_FB_INTERLACED);
+
+again:
+ need_fetcheco = false;
+ prefetch_start = false;
+ uv_prefetch_start = false;
+
+ source = update_aux_source ? dpstate->aux_source : dpstate->source;
+ blend = update_aux_source ? dpstate->aux_blend : dpstate->blend;
+ stage = update_aux_source ? dpstate->aux_stage : dpstate->stage;
+ use_prefetch = update_aux_source ?
+ dpstate->use_aux_prefetch : dpstate->use_prefetch;
+
+ if (crtc_use_pc) {
+ if (update_aux_source) {
+ stream_id = 1;
+ crtc_x = dpstate->right_crtc_x;
+ } else {
+ stream_id = dpstate->left_src_w ? 0 : 1;
+ crtc_x = dpstate->left_src_w ?
+ dpstate->left_crtc_x : dpstate->right_crtc_x;
+ }
+ } else {
+ stream_id = dplane->stream_id;
+ crtc_x = new_plane_state->crtc_x;
+ }
+
+ fg = res->fg[stream_id];
+
+ fu = source_to_fu(res, source);
+ if (!fu)
+ return;
+
+ dprc = fu->dprc;
+
+ lb_id = blend_to_id(blend);
+ if (lb_id < 0)
+ return;
+
+ lb = res->lb[lb_id];
+
+ if (crtc_use_pc) {
+ if (update_aux_source || !dpstate->left_src_w)
+ src_w = dpstate->right_src_w;
+ else
+ src_w = dpstate->left_src_w;
+ } else {
+ src_w = drm_rect_width(&new_plane_state->src) >> 16;
+ }
+ src_h = drm_rect_height(&new_plane_state->src) >> 16;
+ if (crtc_use_pc && update_aux_source) {
+ if (fb->modifier)
+ src_x = (new_plane_state->src_x >> 16) +
+ dpstate->left_src_w;
+ else
+ src_x = 0;
+ } else {
+ src_x = fb->modifier ? (new_plane_state->src_x >> 16) : 0;
+ }
+ src_y = fb->modifier ? (new_plane_state->src_y >> 16) : 0;
+ dst_w = drm_rect_width(&new_plane_state->dst);
+ dst_h = drm_rect_height(&new_plane_state->dst);
+
+ if (fetchunit_is_fetchdecode(fu)) {
+ if (fetchdecode_need_fetcheco(fu, fb->format->format)) {
+ need_fetcheco = true;
+ fe = fetchdecode_get_fetcheco(fu);
+ if (IS_ERR(fe))
+ return;
+ }
+
+ /* assume pixel combiner is unused */
+ if ((src_w != dst_w) && !crtc_use_pc) {
+ need_hscaler = true;
+ hs = fetchdecode_get_hscaler(fu);
+ if (IS_ERR(hs))
+ return;
+ }
+
+ if ((src_h != dst_h) || fb_is_interlaced) {
+ need_vscaler = true;
+ vs = fetchdecode_get_vscaler(fu);
+ if (IS_ERR(vs))
+ return;
+ }
+ }
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ bpp = 16;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ bpp = 8;
+ break;
+ default:
+ bpp = fb->format->cpp[0] * 8;
+ break;
+ }
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_AMPHION_TILED:
+ mt_w = 8;
+ mt_h = 8;
+ break;
+ case DRM_FORMAT_MOD_VIVANTE_TILED:
+ case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
+ mt_w = (bpp == 16) ? 8 : 4;
+ mt_h = 4;
+ break;
+ default:
+ break;
+ }
+
+ baseaddr = drm_plane_state_to_baseaddr(new_plane_state,
+ update_aux_source);
+ if (need_fetcheco)
+ uv_baseaddr = drm_plane_state_to_uvbaseaddr(new_plane_state,
+ update_aux_source);
+
+ if (use_prefetch &&
+ (fu->ops->get_stream_id(fu) == DPU_PLANE_SRC_DISABLED ||
+ need_modeset))
+ prefetch_start = true;
+
+ fu->ops->set_burstlength(fu, src_x, mt_w, bpp, baseaddr, use_prefetch);
+ fu->ops->set_src_bpp(fu, bpp);
+ fu->ops->set_src_stride(fu, src_w, src_w, mt_w, bpp, fb->pitches[0],
+ baseaddr, use_prefetch);
+ fu->ops->set_src_buf_dimensions(fu, src_w, src_h, 0, fb_is_interlaced);
+ fu->ops->set_pixel_blend_mode(fu, new_plane_state->pixel_blend_mode,
+ new_plane_state->alpha,
+ fb->format->format);
+ fu->ops->set_fmt(fu, fb->format->format,
+ new_plane_state->color_encoding,
+ new_plane_state->color_range, fb_is_interlaced);
+ fu->ops->enable_src_buf(fu);
+ fu->ops->set_framedimensions(fu, src_w, src_h, fb_is_interlaced);
+ fu->ops->set_baseaddress(fu, src_w, src_x, src_y, mt_w, mt_h, bpp,
+ baseaddr);
+ fu->ops->set_stream_id(fu, stream_id ?
+ DPU_PLANE_SRC_TO_DISP_STREAM1 :
+ DPU_PLANE_SRC_TO_DISP_STREAM0);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] %s-0x%02x\n",
+ plane->base.id, plane->name, fu->name, fu->id);
+
+ if (need_fetcheco) {
+ fe_id = fetcheco_get_block_id(fe);
+ if (fe_id == ID_NONE)
+ return;
+
+ if (use_prefetch &&
+ (fe->ops->get_stream_id(fe) == DPU_PLANE_SRC_DISABLED ||
+ need_modeset))
+ uv_prefetch_start = true;
+
+ fetchdecode_pixengcfg_dynamic_src_sel(fu,
+ (fd_dynamic_src_sel_t)fe_id);
+ fe->ops->set_burstlength(fe, src_w, mt_w, bpp, uv_baseaddr,
+ use_prefetch);
+ fe->ops->set_src_bpp(fe, 16);
+ fe->ops->set_src_stride(fe, src_w, src_x, mt_w, bpp,
+ fb->pitches[1],
+ uv_baseaddr, use_prefetch);
+ fe->ops->set_fmt(fe, fb->format->format,
+ new_plane_state->color_encoding,
+ new_plane_state->color_range,
+ fb_is_interlaced);
+ fe->ops->set_src_buf_dimensions(fe, src_w, src_h,
+ fb->format->format,
+ fb_is_interlaced);
+ fe->ops->set_framedimensions(fe, src_w, src_h,
+ fb_is_interlaced);
+ fe->ops->set_baseaddress(fe, src_w, src_x, src_y / 2,
+ mt_w, mt_h, bpp, uv_baseaddr);
+ fe->ops->enable_src_buf(fe);
+ fe->ops->set_stream_id(fe, stream_id ?
+ DPU_PLANE_SRC_TO_DISP_STREAM1 :
+ DPU_PLANE_SRC_TO_DISP_STREAM0);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] %s-0x%02x\n",
+ plane->base.id, plane->name, fe->name, fe_id);
+ } else {
+ if (fetchunit_is_fetchdecode(fu))
+ fetchdecode_pixengcfg_dynamic_src_sel(fu,
+ FD_SRC_DISABLE);
+ }
+
+ /* vscaler comes first */
+ if (need_vscaler) {
+ vs_id = vscaler_get_block_id(vs);
+ if (vs_id == ID_NONE)
+ return;
+
+ vscaler_pixengcfg_dynamic_src_sel(vs, (vs_src_sel_t)source);
+ vscaler_pixengcfg_clken(vs, CLKEN__AUTOMATIC);
+ vscaler_setup1(vs, src_h, new_plane_state->crtc_h,
+ fb_is_interlaced);
+ vscaler_setup2(vs, fb_is_interlaced);
+ vscaler_setup3(vs, fb_is_interlaced);
+ vscaler_output_size(vs, dst_h);
+ vscaler_field_mode(vs, fb_is_interlaced ?
+ SCALER_ALWAYS0 : SCALER_INPUT);
+ vscaler_filter_mode(vs, SCALER_LINEAR);
+ vscaler_scale_mode(vs, SCALER_UPSCALE);
+ vscaler_mode(vs, SCALER_ACTIVE);
+ vscaler_set_stream_id(vs, dplane->stream_id ?
+ DPU_PLANE_SRC_TO_DISP_STREAM1 :
+ DPU_PLANE_SRC_TO_DISP_STREAM0);
+
+ source = (lb_sec_sel_t)vs_id;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] vscaler-0x%02x\n",
+ plane->base.id, plane->name, vs_id);
+ }
+
+ /* and then, hscaler */
+ if (need_hscaler) {
+ hs_id = hscaler_get_block_id(hs);
+ if (hs_id == ID_NONE)
+ return;
+
+ hscaler_pixengcfg_dynamic_src_sel(hs, need_vscaler ?
+ (hs_src_sel_t)vs_id :
+ (hs_src_sel_t)source);
+ hscaler_pixengcfg_clken(hs, CLKEN__AUTOMATIC);
+ hscaler_setup1(hs, src_w, dst_w);
+ hscaler_output_size(hs, dst_w);
+ hscaler_filter_mode(hs, SCALER_LINEAR);
+ hscaler_scale_mode(hs, SCALER_UPSCALE);
+ hscaler_mode(hs, SCALER_ACTIVE);
+ hscaler_set_stream_id(hs, dplane->stream_id ?
+ DPU_PLANE_SRC_TO_DISP_STREAM1 :
+ DPU_PLANE_SRC_TO_DISP_STREAM0);
+
+ source = (lb_sec_sel_t)hs_id;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] hscaler-0x%02x\n",
+ plane->base.id, plane->name, hs_id);
+ }
+
+ if (use_prefetch) {
+ dprc_configure(dprc, stream_id,
+ src_w, src_h, src_x, src_y,
+ fb->pitches[0], fb->format->format,
+ fb->modifier, baseaddr, uv_baseaddr,
+ prefetch_start, uv_prefetch_start,
+ fb_is_interlaced);
+
+ dprc_enable(dprc);
+
+ dprc_reg_update(dprc);
+
+ if (prefetch_start || uv_prefetch_start) {
+ dprc_first_frame_handle(dprc);
+
+ if (!need_modeset &&
+ new_plane_state->normalized_zpos != 0)
+ framegen_wait_for_frame_counter_moving(fg);
+ }
+
+ if (update_aux_source)
+ DRM_DEBUG_KMS("[PLANE:%d:%s] use aux prefetch\n",
+ plane->base.id, plane->name);
+ else
+ DRM_DEBUG_KMS("[PLANE:%d:%s] use prefetch\n",
+ plane->base.id, plane->name);
+ } else if (dprc) {
+ dprc_disable(dprc);
+
+ if (update_aux_source)
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bypass aux prefetch\n",
+ plane->base.id, plane->name);
+ else
+ DRM_DEBUG_KMS("[PLANE:%d:%s] bypass prefetch\n",
+ plane->base.id, plane->name);
+ }
+
+ layerblend_pixengcfg_dynamic_prim_sel(lb, stage);
+ layerblend_pixengcfg_dynamic_sec_sel(lb, source);
+ layerblend_control(lb, LB_BLEND);
+ layerblend_blendcontrol(lb, new_plane_state->normalized_zpos,
+ new_plane_state->pixel_blend_mode,
+ new_plane_state->alpha);
+ layerblend_pixengcfg_clken(lb, CLKEN__AUTOMATIC);
+ layerblend_position(lb, crtc_x, new_plane_state->crtc_y);
+
+ if (crtc_use_pc) {
+ if ((!stream_id && dpstate->is_left_top) ||
+ (stream_id && dpstate->is_right_top)) {
+ ed = res->ed[stream_id];
+ extdst_pixengcfg_src_sel(ed, (extdst_src_sel_t)blend);
+ }
+ } else {
+ if (dpstate->is_top) {
+ ed = res->ed[stream_id];
+ extdst_pixengcfg_src_sel(ed, (extdst_src_sel_t)blend);
+ }
+ }
+
+ if (update_aux_source)
+ DRM_DEBUG_KMS("[PLANE:%d:%s] *aux* source-0x%02x stage-0x%02x blend-0x%02x\n",
+ plane->base.id, plane->name,
+ source, dpstate->stage, dpstate->blend);
+ else
+ DRM_DEBUG_KMS("[PLANE:%d:%s] source-0x%02x stage-0x%02x blend-0x%02x\n",
+ plane->base.id, plane->name,
+ source, dpstate->stage, dpstate->blend);
+
+ if (dpstate->need_aux_source && !update_aux_source) {
+ update_aux_source = true;
+ goto again;
+ }
+}
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
+ .atomic_check = dpu_plane_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+struct dpu_plane *dpu_plane_create(struct drm_device *drm,
+ unsigned int possible_crtcs,
+ unsigned int stream_id,
+ struct dpu_plane_grp *grp,
+ enum drm_plane_type type)
+{
+ struct dpu_plane *dpu_plane;
+ struct drm_plane *plane;
+ unsigned int zpos = dpu_plane_get_default_zpos(type);
+ int ret;
+
+ dpu_plane = kzalloc(sizeof(*dpu_plane), GFP_KERNEL);
+ if (!dpu_plane)
+ return ERR_PTR(-ENOMEM);
+
+ dpu_plane->stream_id = stream_id;
+ dpu_plane->grp = grp;
+
+ plane = &dpu_plane->base;
+
+ ret = drm_universal_plane_init(drm, plane, possible_crtcs,
+ &dpu_plane_funcs,
+ dpu_formats, ARRAY_SIZE(dpu_formats),
+ dpu_format_modifiers, type, NULL);
+ if (ret)
+ goto err;
+
+ drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+ ret = drm_plane_create_zpos_property(plane,
+ zpos, 0, grp->hw_plane_num - 1);
+ if (ret)
+ goto err;
+
+ ret = drm_plane_create_alpha_property(plane);
+ if (ret)
+ goto err;
+
+ ret = drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+ if (ret)
+ goto err;
+
+ ret = drm_plane_create_color_properties(plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_FULL_RANGE);
+ if (ret)
+ goto err;
+
+ return dpu_plane;
+
+err:
+ kfree(dpu_plane);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/imx/dpu/dpu-plane.h b/drivers/gpu/drm/imx/dpu/dpu-plane.h
new file mode 100644
index 000000000000..8d42ad0c9ff4
--- /dev/null
+++ b/drivers/gpu/drm/imx/dpu/dpu-plane.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2017-2019,2022 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __DPU_PLANE_H__
+#define __DPU_PLANE_H__
+
+#include <video/dpu.h>
+#include "../imx-drm.h"
+
+#define MAX_DPU_PLANE_GRP (MAX_CRTC / 2)
+
+enum dpu_plane_src_type {
+ DPU_PLANE_SRC_FL,
+ DPU_PLANE_SRC_FW,
+ DPU_PLANE_SRC_FD,
+};
+
+struct dpu_plane {
+ struct drm_plane base;
+ struct dpu_plane_grp *grp;
+ struct list_head head;
+ unsigned int stream_id;
+};
+
+struct dpu_plane_state {
+ struct drm_plane_state base;
+ lb_prim_sel_t stage;
+ lb_sec_sel_t source;
+ dpu_block_id_t blend;
+ lb_prim_sel_t aux_stage;
+ lb_sec_sel_t aux_source;
+ dpu_block_id_t aux_blend;
+
+ bool is_top;
+ bool use_prefetch;
+ bool use_aux_prefetch;
+ bool need_aux_source;
+
+ /* used when pixel combiner is needed */
+ unsigned int left_src_w;
+ unsigned int left_crtc_w;
+ unsigned int left_crtc_x;
+ unsigned int right_src_w;
+ unsigned int right_crtc_w;
+ unsigned int right_crtc_x;
+
+ bool is_left_top;
+ bool is_right_top;
+};
+
+static const lb_prim_sel_t cf_stages[] = {LB_PRIM_SEL__CONSTFRAME0,
+ LB_PRIM_SEL__CONSTFRAME1};
+static const lb_prim_sel_t stages[] = {LB_PRIM_SEL__LAYERBLEND0,
+ LB_PRIM_SEL__LAYERBLEND1,
+ LB_PRIM_SEL__LAYERBLEND2,
+ LB_PRIM_SEL__LAYERBLEND3};
+/* TODO: Add source entries for subsidiary layers. */
+static const lb_sec_sel_t sources[] = {LB_SEC_SEL__FETCHLAYER0,
+ LB_SEC_SEL__FETCHWARP2,
+ LB_SEC_SEL__FETCHDECODE0,
+ LB_SEC_SEL__FETCHDECODE1};
+static const dpu_block_id_t blends[] = {ID_LAYERBLEND0, ID_LAYERBLEND1,
+ ID_LAYERBLEND2, ID_LAYERBLEND3};
+
+static inline struct dpu_plane *to_dpu_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct dpu_plane, base);
+}
+
+static inline struct dpu_plane_state *
+to_dpu_plane_state(struct drm_plane_state *plane_state)
+{
+ return container_of(plane_state, struct dpu_plane_state, base);
+}
+
+static inline int source_to_type(lb_sec_sel_t source)
+{
+ switch (source) {
+ case LB_SEC_SEL__FETCHLAYER0:
+ return DPU_PLANE_SRC_FL;
+ case LB_SEC_SEL__FETCHWARP2:
+ return DPU_PLANE_SRC_FW;
+ case LB_SEC_SEL__FETCHDECODE0:
+ case LB_SEC_SEL__FETCHDECODE1:
+ return DPU_PLANE_SRC_FD;
+ default:
+ break;
+ }
+
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline int source_to_id(lb_sec_sel_t source)
+{
+ int i, offset = 0;
+ int type = source_to_type(source);
+
+ for (i = 0; i < ARRAY_SIZE(sources); i++) {
+ if (source != sources[i])
+ continue;
+
+ /* FetchLayer */
+ if (type == DPU_PLANE_SRC_FL)
+ return i;
+
+ /* FetchWarp or FetchDecode */
+ while (offset < ARRAY_SIZE(sources)) {
+ if (source_to_type(sources[offset]) == type)
+ break;
+ offset++;
+ }
+ return i - offset;
+ }
+
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline struct dpu_fetchunit *
+source_to_fu(struct dpu_plane_res *res, lb_sec_sel_t source)
+{
+ int fu_type = source_to_type(source);
+ int fu_id = source_to_id(source);
+
+ if (fu_type < 0 || fu_id < 0)
+ return NULL;
+
+ switch (fu_type) {
+ case DPU_PLANE_SRC_FD:
+ if (fu_id >= ARRAY_SIZE(res->fd)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return res->fd[fu_id];
+ case DPU_PLANE_SRC_FL:
+ if (fu_id >= ARRAY_SIZE(res->fl)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return res->fl[fu_id];
+ case DPU_PLANE_SRC_FW:
+ if (fu_id >= ARRAY_SIZE(res->fw)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return res->fw[fu_id];
+ }
+
+ return NULL;
+}
+
+static inline struct dpu_fetchunit *
+dpstate_to_fu(struct dpu_plane_state *dpstate)
+{
+ struct drm_plane *plane = dpstate->base.plane;
+ struct dpu_plane *dplane = to_dpu_plane(plane);
+ struct dpu_plane_res *res = &dplane->grp->res;
+
+ return source_to_fu(res, dpstate->source);
+}
+
+static inline int blend_to_id(dpu_block_id_t blend)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(blends); i++) {
+ if (blend == blends[i])
+ return i;
+ }
+
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline bool drm_format_is_yuv(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+struct dpu_plane *dpu_plane_create(struct drm_device *drm,
+ unsigned int possible_crtcs,
+ unsigned int stream_id,
+ struct dpu_plane_grp *grp,
+ enum drm_plane_type type);
+#endif