drm-misc-next for 4.19:

Cross-subsystem Changes:
 - fix compile breakage on ION due to the dma-buf cleanups (Christian König)
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJbK4N3AAoJEEN0HIUfOBk00S0P/ikY6Xvkbjs4m6BkpFpuAguy
 n5biz7PgEc8xFsp7hs3rcax0U8cd/spJEOdvxhur18+RiEXwlT+9l2cn+7l++DjR
 uQ+bNHLlKSzX4weoj+MT/9U3gQmaOeVw8h2A8y0Un2fnM9Y+FWGR2PR75+Zl0Tl9
 Zfbj2Zzx84lg0YEoX+HUBcgBEZ3jAPED3Wax7oEgNo0gAXHSTPQxSeHPgr6U3jxn
 NwcawNd5huZhMJ8KyiieTs7DXrlPjHl/wU7ogjQLOVQN0xh26GlZMX5AAkJTIu21
 5trTNhV/EdKunbBJzt+RFQDslyFFwfSZM1DAuICbjrY10Hmuzkki7+MDxBprJMRj
 N9fkJqQUrot2XDisauCvCJ2QUKOVhTqwODx6iU7uvJa/kbCgTY/gY/yZHJ46DCTQ
 IVMUmoqxqA6eHrT7dyL2pvH5q6is5XjsrP8ehJ9m4zlcZDfRn7JwExY5kLKIEWtW
 vXBGatF4heZZznNuoo02TRLmiSZnfsYLdr1b1vpnxGvWiDL4FdQr7CIntRiw/qhy
 aj7dP+XaQoPAewkiK77FGLb44zgHtGvWQC/6L6+d20+X4/EoruMOesvoH8lCsL2c
 Ov3D9kORyjHQTNwW4BT6DdJRnnwdMuwlppT+z4jq51WoDIfwEn8lFZe1goRvn8o0
 GvR2rUgq259pFn9tnsEu
 =qsFF
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2018-06-21' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 4.19:

UAPI Changes:
- Add writeback connector (Brian Starkey/Liviu Dudau)
- Add "content type" property to HDMI connectors (Stanislav Lisovskiy)

Cross-subsystem Changes:
- some devicetree Docs update
- fix compile breakage on ION due to the dma-buf cleanups (Christian König)

Core Changes:
- Reject over-sized allocation requests early (Chris Wilson)
- gem-fb-helper: Always do implicit sync (Daniel Vetter)
- dma-buf cleanups (Christian König)

Driver Changes:
- Fixes for the otm8009a panel driver (Philippe Cornu)
- Add Innolux TV123WAM panel driver support (Sandeep Panda)
- Move GEM BO to drm_framebuffer in few drivers (Daniel Stone)
- i915 pinning improvements (Chris Wilson)
- Stop consulting plane->fb/crtc in a few drivers (Ville Syrjälä)

Signed-off-by: Dave Airlie <airlied@redhat.com>

Link: https://patchwork.freedesktop.org/patch/msgid/20180621105428.GA20795@juma
This commit is contained in:
Dave Airlie 2018-06-22 12:56:48 +10:00
commit f4366e44ef
131 changed files with 2282 additions and 1245 deletions

View File

@ -0,0 +1,29 @@
AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
Required properties:
- compatible: should be "auo,g070vvn01"
- backlight: phandle of the backlight device attached to the panel
- power-supply: single regulator to provide the supply voltage
Required nodes:
- port: Parallel port mapping to connect this display
This panel needs single power supply voltage. Its backlight is conntrolled
via PWM signal.
Example:
--------
Example device-tree definition when connected to iMX6Q based board
lcd_panel: lcd-panel {
compatible = "auo,g070vvn01";
backlight = <&backlight_lcd>;
power-supply = <&reg_display>;
port {
lcd_panel_in: endpoint {
remote-endpoint = <&lcd_display_out>;
};
};
};

View File

@ -0,0 +1,20 @@
Innolux TV123WAM 12.3 inch eDP 2K display panel
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.
Required properties:
- compatible: should be "innolux,tv123wam"
- power-supply: regulator to provide the supply voltage
Optional properties:
- enable-gpios: GPIO pin to enable or disable the panel
- backlight: phandle of the backlight device attached to the panel
Example:
panel_edp: panel-edp {
compatible = "innolux,tv123wam";
enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
power-supply = <&pm8916_l2>;
backlight = <&backlight>;
};

View File

@ -373,6 +373,15 @@ Connector Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:export:
Writeback Connectors
--------------------
.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
:doc: overview
.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
:export:
Encoder Abstraction
===================
@ -517,6 +526,12 @@ Standard Connector Properties
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:doc: standard connector properties
HDMI Specific Connector Properties
-----------------------------
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:doc: HDMI connector properties
Plane Composition Properties
----------------------------

View File

@ -17,6 +17,7 @@ Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,De
,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
,Optional,"""content type""",ENUM,"{ ""No Data"", ""Graphics"", ""Photo"", ""Cinema"", ""Game"" }",Connector,TBD
i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD

1 Owner Module/Drivers Group Property Name Type Property Values Object attached Description/Restrictions
17 Virtual GPU “suggested X” RANGE Min=0, Max=0xffffffff Connector property to suggest an X offset for a connector
18 “suggested Y” RANGE Min=0, Max=0xffffffff Connector property to suggest an Y offset for a connector
19 Optional "aspect ratio" ENUM { "None", "4:3", "16:9" } Connector TDB
20 Optional "content type" ENUM { "No Data", "Graphics", "Photo", "Cinema", "Game" } Connector TBD
21 i915 Generic "Broadcast RGB" ENUM { "Automatic", "Full", "Limited 16:235" } Connector When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255.
22 “audio” ENUM { "force-dvi", "off", "auto", "on" } Connector TBD
23 SDVO-TV “mode” ENUM { "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc. Connector TBD

View File

@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release
|| !exp_info->ops->map_atomic
|| !exp_info->ops->map
|| !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
@ -568,7 +567,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
mutex_lock(&dmabuf->lock);
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, dev, attach);
ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
@ -687,26 +686,14 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
* void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
*
* There are also atomic variants of these interfaces. Like for kmap they
* facilitate non-blocking fast-paths. Neither the importer nor the exporter
* (in the callback) is allowed to block when using these.
*
* Interfaces::
* void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
* void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
*
* For importers all the restrictions of using kmap apply, like the limited
* supply of kmap_atomic slots. Hence an importer shall only hold onto at
* max 2 atomic dma_buf kmaps at the same time (in any given process context).
* Implementing the functions is optional for exporters and for importers all
* the restrictions of using kmap apply.
*
* dma_buf kmap calls outside of the range specified in begin_cpu_access are
* undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
* the partial chunks at the beginning and end but may return stale or bogus
* data outside of the range (in these partial chunks).
*
* Note that these calls need to always succeed. The exporter needs to
* complete any preparations that might fail in begin_cpu_access.
*
* For some cases the overhead of kmap can be too high, a vmap interface
* is introduced. This interface should be used very carefully, as vmalloc
* space is a limited resources on many architectures.
@ -859,41 +846,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
/**
* dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
* space. The same restrictions as for kmap_atomic and friends apply.
* @dmabuf: [in] buffer to map page from.
* @page_num: [in] page in PAGE_SIZE units to map.
*
* This call must always succeed, any necessary preparations that might fail
* need to be done in begin_cpu_access.
*/
void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
{
WARN_ON(!dmabuf);
return dmabuf->ops->map_atomic(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
/**
* dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
* @dmabuf: [in] buffer to unmap page from.
* @page_num: [in] page in PAGE_SIZE units to unmap.
* @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
*
* This call must always succeed.
*/
void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
void *vaddr)
{
WARN_ON(!dmabuf);
if (dmabuf->ops->unmap_atomic)
dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
/**
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
* same restrictions as for kmap and friends apply.
@ -907,6 +859,8 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
{
WARN_ON(!dmabuf);
if (!dmabuf->ops->map)
return NULL;
return dmabuf->ops->map(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap);

View File

@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o
drm_syncobj.o drm_lease.o drm_writeback.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o

View File

@ -133,7 +133,6 @@ error:
}
static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
@ -141,7 +140,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
r = drm_gem_map_attach(dma_buf, target_dev, attach);
r = drm_gem_map_attach(dma_buf, attach);
if (r)
return r;
@ -245,9 +244,7 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
.map = drm_gem_dmabuf_kmap,
.map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
.unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,

View File

@ -3914,8 +3914,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
/* Flip */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* update crtc fb */
crtc->primary->fb = fb;
WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
WARN_ON(!acrtc_state->stream);

View File

@ -136,9 +136,6 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
if (!crtc->primary->fb)
return;
clk_disable_unprepare(arcpgu->clk);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &

View File

@ -7,30 +7,15 @@
*/
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
static void armada_fb_destroy(struct drm_framebuffer *fb)
{
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
drm_framebuffer_cleanup(&dfb->fb);
drm_gem_object_put_unlocked(&dfb->obj->obj);
kfree(dfb);
}
static int armada_fb_create_handle(struct drm_framebuffer *fb,
struct drm_file *dfile, unsigned int *handle)
{
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
}
static const struct drm_framebuffer_funcs armada_fb_funcs = {
.destroy = armada_fb_destroy,
.create_handle = armada_fb_create_handle,
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
@ -78,7 +63,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->fmt = format;
dfb->mod = config;
dfb->obj = obj;
dfb->fb.obj[0] = &obj->obj;
drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);

View File

@ -10,13 +10,12 @@
struct armada_framebuffer {
struct drm_framebuffer fb;
struct armada_gem_object *obj;
uint8_t fmt;
uint8_t mod;
};
#define drm_fb_to_armada_fb(dfb) \
container_of(dfb, struct armada_framebuffer, fb)
#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);

View File

@ -490,8 +490,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map_atomic = armada_gem_dmabuf_no_kmap,
.unmap_atomic = armada_gem_dmabuf_no_kunmap,
.map = armada_gem_dmabuf_no_kmap,
.unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,

View File

@ -681,6 +681,7 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
drm_fb_cma_fbdev_fini(dev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
pm_runtime_get_sync(dev->dev);

View File

@ -412,9 +412,10 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg);
}
static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane)
static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
{
struct drm_crtc *crtc = plane->base.crtc;
struct drm_crtc *crtc = state->base.crtc;
struct drm_color_lut *lut;
int idx;
@ -779,7 +780,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
atmel_hlcdc_plane_update_pos_and_size(plane, state);
atmel_hlcdc_plane_update_general_settings(plane, state);
atmel_hlcdc_plane_update_format(plane, state);
atmel_hlcdc_plane_update_clut(plane);
atmel_hlcdc_plane_update_clut(plane, state);
atmel_hlcdc_plane_update_buffers(plane, state);
atmel_hlcdc_plane_update_disc_area(plane, state);
@ -816,16 +817,6 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
}
static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
if (plane->base.fb)
drm_framebuffer_put(plane->base.fb);
drm_plane_cleanup(p);
}
static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
@ -1002,7 +993,7 @@ static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
static const struct drm_plane_funcs layer_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = atmel_hlcdc_plane_destroy,
.destroy = drm_plane_cleanup,
.reset = atmel_hlcdc_plane_reset,
.atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
.atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,

View File

@ -82,9 +82,11 @@ config DRM_PARADE_PS8622
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF && RC_CORE
depends on OF
select DRM_KMS_HELPER
imply EXTCON
select INPUT
select RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.

View File

@ -1337,7 +1337,7 @@ static const struct mipi_dsi_host_ops cdns_dsi_ops = {
.transfer = cdns_dsi_transfer,
};
static int cdns_dsi_resume(struct device *dev)
static int __maybe_unused cdns_dsi_resume(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
@ -1350,7 +1350,7 @@ static int cdns_dsi_resume(struct device *dev)
return 0;
}
static int cdns_dsi_suspend(struct device *dev)
static int __maybe_unused cdns_dsi_suspend(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);

View File

@ -92,7 +92,6 @@
#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
struct cirrus_crtc {
struct drm_crtc base;
@ -117,11 +116,6 @@ struct cirrus_connector {
struct drm_connector base;
};
struct cirrus_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
};
struct cirrus_mc {
resource_size_t vram_size;
resource_size_t vram_base;
@ -152,7 +146,7 @@ struct cirrus_device {
struct cirrus_fbdev {
struct drm_fb_helper helper;
struct cirrus_framebuffer gfb;
struct drm_framebuffer gfb;
void *sysram;
int size;
int x1, y1, x2, y2; /* dirty rect */
@ -198,7 +192,7 @@ int cirrus_dumb_create(struct drm_file *file,
struct drm_mode_create_dumb *args);
int cirrus_framebuffer_init(struct drm_device *dev,
struct cirrus_framebuffer *gfb,
struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);

View File

@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
struct drm_gem_object *obj;
struct cirrus_bo *bo;
int src_offset, dst_offset;
int bpp = afbdev->gfb.base.format->cpp[0];
int bpp = afbdev->gfb.format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
obj = afbdev->gfb.obj;
obj = afbdev->gfb.obj[0];
bo = gem_to_cirrus_bo(obj);
/*
@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
}
for (i = y; i < y + height; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
}
@ -204,7 +204,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
gfbdev->sysram = sysram;
gfbdev->size = size;
fb = &gfbdev->gfb.base;
fb = &gfbdev->gfb;
if (!fb) {
DRM_INFO("fb is NULL\n");
return -EINVAL;
@ -246,19 +246,19 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
struct cirrus_framebuffer *gfb = &gfbdev->gfb;
struct drm_framebuffer *gfb = &gfbdev->gfb;
drm_fb_helper_unregister_fbi(&gfbdev->helper);
if (gfb->obj) {
drm_gem_object_put_unlocked(gfb->obj);
gfb->obj = NULL;
if (gfb->obj[0]) {
drm_gem_object_put_unlocked(gfb->obj[0]);
gfb->obj[0] = NULL;
}
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
drm_framebuffer_unregister_private(&gfb->base);
drm_framebuffer_cleanup(&gfb->base);
drm_framebuffer_unregister_private(gfb);
drm_framebuffer_cleanup(gfb);
return 0;
}

View File

@ -10,42 +10,25 @@
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "cirrus_drv.h"
static int cirrus_create_handle(struct drm_framebuffer *fb,
struct drm_file* file_priv,
unsigned int* handle)
{
struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
return drm_gem_handle_create(file_priv, cirrus_fb->obj, handle);
}
static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
drm_gem_object_put_unlocked(cirrus_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
.create_handle = cirrus_create_handle,
.destroy = cirrus_user_framebuffer_destroy,
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
};
int cirrus_framebuffer_init(struct drm_device *dev,
struct cirrus_framebuffer *gfb,
struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
gfb->obj = obj;
ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
gfb->obj[0] = obj;
ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
@ -60,7 +43,7 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
{
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
struct cirrus_framebuffer *cirrus_fb;
struct drm_framebuffer *fb;
u32 bpp;
int ret;
@ -74,19 +57,19 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
if (obj == NULL)
return ERR_PTR(-ENOENT);
cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
if (!cirrus_fb) {
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb) {
drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
drm_gem_object_put_unlocked(obj);
kfree(cirrus_fb);
kfree(fb);
return ERR_PTR(ret);
}
return &cirrus_fb->base;
return fb;
}
static const struct drm_mode_config_funcs cirrus_mode_funcs = {

View File

@ -101,17 +101,13 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
int x, int y, int atomic)
{
struct cirrus_device *cdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct cirrus_framebuffer *cirrus_fb;
struct cirrus_bo *bo;
int ret;
u64 gpu_addr;
/* push the previous fb to system ram */
if (!atomic && fb) {
cirrus_fb = to_cirrus_framebuffer(fb);
obj = cirrus_fb->obj;
bo = gem_to_cirrus_bo(obj);
bo = gem_to_cirrus_bo(fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
return ret;
@ -119,9 +115,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
cirrus_bo_unreserve(bo);
}
cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
obj = cirrus_fb->obj;
bo = gem_to_cirrus_bo(obj);
bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
@ -133,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
return ret;
}
if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
/* if pushing console in kmap it */
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret)

View File

@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_writeback.h>
#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
@ -325,6 +326,35 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
return fence_ptr;
}
static int set_out_fence_for_connector(struct drm_atomic_state *state,
struct drm_connector *connector,
s32 __user *fence_ptr)
{
unsigned int index = drm_connector_index(connector);
if (!fence_ptr)
return 0;
if (put_user(-1, fence_ptr))
return -EFAULT;
state->connectors[index].out_fence_ptr = fence_ptr;
return 0;
}
static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
struct drm_connector *connector)
{
unsigned int index = drm_connector_index(connector);
s32 __user *fence_ptr;
fence_ptr = state->connectors[index].out_fence_ptr;
state->connectors[index].out_fence_ptr = NULL;
return fence_ptr;
}
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
@ -339,6 +369,7 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
const struct drm_display_mode *mode)
{
struct drm_crtc *crtc = state->crtc;
struct drm_mode_modeinfo umode;
/* Early return for no change. */
@ -359,13 +390,13 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
drm_mode_copy(&state->mode, mode);
state->enable = true;
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
mode->name, state);
DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
mode->name, crtc->base.id, crtc->name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
state);
DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
crtc->base.id, crtc->name, state);
}
return 0;
@ -388,6 +419,8 @@ EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
struct drm_property_blob *blob)
{
struct drm_crtc *crtc = state->crtc;
if (blob == state->mode_blob)
return 0;
@ -397,19 +430,34 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
memset(&state->mode, 0, sizeof(state->mode));
if (blob) {
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
drm_mode_convert_umode(state->crtc->dev, &state->mode,
blob->data))
int ret;
if (blob->length != sizeof(struct drm_mode_modeinfo)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
crtc->base.id, crtc->name,
blob->length);
return -EINVAL;
}
ret = drm_mode_convert_umode(crtc->dev,
&state->mode, blob->data);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
crtc->base.id, crtc->name,
ret, drm_get_mode_status_name(state->mode.status));
drm_mode_debug_printmodeline(&state->mode);
return -EINVAL;
}
state->mode_blob = drm_property_blob_get(blob);
state->enable = true;
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
state->mode.name, state);
DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
state->mode.name, crtc->base.id, crtc->name,
state);
} else {
state->enable = false;
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
state);
DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
crtc->base.id, crtc->name, state);
}
return 0;
@ -539,10 +587,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
return -EFAULT;
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (crtc->funcs->atomic_set_property)
} else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
else
} else {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
crtc->base.id, crtc->name,
property->base.id, property->name);
return -EINVAL;
}
return 0;
}
@ -676,6 +728,51 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
crtc->funcs->atomic_print_state(p, state);
}
/**
* drm_atomic_connector_check - check connector state
* @connector: connector to check
* @state: connector state to check
*
* Provides core sanity checks for connector state.
*
* RETURNS:
* Zero on success, error code on failure
*/
static int drm_atomic_connector_check(struct drm_connector *connector,
struct drm_connector_state *state)
{
struct drm_crtc_state *crtc_state;
struct drm_writeback_job *writeback_job = state->writeback_job;
if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
return 0;
if (writeback_job->fb && !state->crtc) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
connector->base.id, connector->name);
return -EINVAL;
}
if (state->crtc)
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
if (writeback_job->fb && !crtc_state->active) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
connector->base.id, connector->name,
state->crtc->base.id);
return -EINVAL;
}
if (writeback_job->out_fence && !writeback_job->fb) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
connector->base.id, connector->name);
return -EINVAL;
}
return 0;
}
/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
@ -700,6 +797,11 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
WARN_ON(!state->acquire_ctx);
/* the legacy pointers should never be set */
WARN_ON(plane->fb);
WARN_ON(plane->old_fb);
WARN_ON(plane->crtc);
plane_state = drm_atomic_get_existing_plane_state(state, plane);
if (plane_state)
return plane_state;
@ -794,8 +896,11 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (property == plane->alpha_property) {
state->alpha = val;
} else if (property == plane->rotation_property) {
if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
plane->base.id, plane->name, val);
return -EINVAL;
}
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@ -807,6 +912,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return plane->funcs->atomic_set_property(plane, state,
property, val);
} else {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
plane->base.id, plane->name,
property->base.id, property->name);
return -EINVAL;
}
@ -914,10 +1022,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* either *both* CRTC and FB must be set, or neither */
if (state->crtc && !state->fb) {
DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
plane->base.id, plane->name);
return -EINVAL;
} else if (state->fb && !state->crtc) {
DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
plane->base.id, plane->name);
return -EINVAL;
}
@ -927,7 +1037,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
state->crtc->base.id, state->crtc->name,
plane->base.id, plane->name);
return -EINVAL;
}
@ -936,7 +1048,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n",
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
plane->base.id, plane->name,
drm_get_format_name(state->fb->format->format,
&format_name),
state->fb->modifier);
@ -948,7 +1061,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
state->crtc_h > INT_MAX ||
state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
plane->base.id, plane->name,
state->crtc_w, state->crtc_h,
state->crtc_x, state->crtc_y);
return -ERANGE;
@ -962,8 +1076,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->src_x > fb_width - state->src_w ||
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
DRM_DEBUG_ATOMIC("Invalid source coordinates "
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
plane->base.id, plane->name,
state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
@ -1120,6 +1235,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
state->private_objs[index].old_state = obj->state;
state->private_objs[index].new_state = obj_state;
state->private_objs[index].ptr = obj;
obj_state->state = state;
state->num_private_objs = num_objs;
@ -1278,6 +1394,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->link_status = val;
} else if (property == config->aspect_ratio_property) {
state->picture_aspect_ratio = val;
} else if (property == config->content_type_property) {
state->content_type = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
} else if (property == connector->content_protection_property) {
@ -1286,10 +1404,24 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
} else if (property == config->writeback_fb_id_property) {
struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
if (fb)
drm_framebuffer_put(fb);
return ret;
} else if (property == config->writeback_out_fence_ptr_property) {
s32 __user *fence_ptr = u64_to_user_ptr(val);
return set_out_fence_for_connector(state->state, connector,
fence_ptr);
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
connector->base.id, connector->name,
property->base.id, property->name);
return -EINVAL;
}
@ -1363,10 +1495,17 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->link_status;
} else if (property == config->aspect_ratio_property) {
*val = state->picture_aspect_ratio;
} else if (property == config->content_type_property) {
*val = state->content_type;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
} else if (property == connector->content_protection_property) {
*val = state->content_protection;
} else if (property == config->writeback_fb_id_property) {
/* Writeback framebuffer is one-shot, write and forget */
*val = 0;
} else if (property == config->writeback_out_fence_ptr_property) {
*val = 0;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
@ -1456,11 +1595,12 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
}
if (crtc)
DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
plane_state, crtc->base.id, crtc->name);
DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
plane->base.id, plane->name, plane_state,
crtc->base.id, crtc->name);
else
DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
plane_state);
DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
plane->base.id, plane->name, plane_state);
return 0;
}
@ -1480,12 +1620,15 @@ void
drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb)
{
struct drm_plane *plane = plane_state->plane;
if (fb)
DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
fb->base.id, plane_state);
else
DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
fb->base.id, plane->base.id, plane->name,
plane_state);
else
DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
plane->base.id, plane->name, plane_state);
drm_framebuffer_assign(&plane_state->fb, fb);
}
@ -1546,6 +1689,7 @@ int
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc)
{
struct drm_connector *connector = conn_state->connector;
struct drm_crtc_state *crtc_state;
if (conn_state->crtc == crtc)
@ -1573,10 +1717,12 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
drm_connector_get(conn_state->connector);
conn_state->crtc = crtc;
DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
connector->base.id, connector->name,
conn_state, crtc->base.id, crtc->name);
} else {
DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
connector->base.id, connector->name,
conn_state);
}
@ -1584,6 +1730,70 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
}
EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
/*
* drm_atomic_get_writeback_job - return or allocate a writeback job
* @conn_state: Connector state to get the job for
*
* Writeback jobs have a different lifetime to the atomic state they are
* associated with. This convenience function takes care of allocating a job
* if there isn't yet one associated with the connector state, otherwise
* it just returns the existing job.
*
* Returns: The writeback job for the given connector state
*/
static struct drm_writeback_job *
drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
{
WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
if (!conn_state->writeback_job)
conn_state->writeback_job =
kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
return conn_state->writeback_job;
}
/**
* drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
* @conn_state: atomic state object for the connector
* @fb: fb to use for the connector
*
* This is used to set the framebuffer for a writeback connector, which outputs
* to a buffer instead of an actual physical connector.
* Changing the assigned framebuffer requires us to grab a reference to the new
* fb and drop the reference to the old fb, if there is one. This function
* takes care of all these details besides updating the pointer in the
* state object itself.
*
* Note: The only way conn_state can already have an fb set is if the commit
* sets the property more than once.
*
* See also: drm_writeback_connector_init()
*
* Returns: 0 on success
*/
int drm_atomic_set_writeback_fb_for_connector(
struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
struct drm_writeback_job *job =
drm_atomic_get_writeback_job(conn_state);
if (!job)
return -ENOMEM;
drm_framebuffer_assign(&job->fb, fb);
if (fb)
DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
fb->base.id, conn_state);
else
DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
conn_state);
return 0;
}
EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
/**
* drm_atomic_add_affected_connectors - add connectors for crtc
* @state: atomic state
@ -1672,6 +1882,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
crtc->base.id, crtc->name, state);
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
@ -1702,6 +1915,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_plane_state *plane_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
@ -1724,6 +1939,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_connector_check(conn, conn_state);
if (ret) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
conn->base.id, conn->name);
return ret;
}
}
if (config->funcs->atomic_check) {
ret = config->funcs->atomic_check(state->dev, state);
@ -2047,45 +2271,6 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
return ret;
}
/**
* drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
*
* @dev: drm device to check.
* @plane_mask: plane mask for planes that were updated.
* @ret: return value, can be -EDEADLK for a retry.
*
* Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
* dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
* is a common operation for each atomic update, so this call is split off as a
* helper.
*/
void drm_atomic_clean_old_fb(struct drm_device *dev,
unsigned plane_mask,
int ret)
{
struct drm_plane *plane;
/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
* locks (ie. while it is still safe to deref plane->state). We
* need to do this here because the driver entry points cannot
* distinguish between legacy and atomic ioctls.
*/
drm_for_each_plane_mask(plane, dev, plane_mask) {
if (ret == 0) {
struct drm_framebuffer *new_fb = plane->state->fb;
if (new_fb)
drm_framebuffer_get(new_fb);
plane->fb = new_fb;
plane->crtc = plane->state->crtc;
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
}
plane->old_fb = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_clean_old_fb);
/**
* DOC: explicit fencing properties
*
@ -2161,7 +2346,7 @@ static int setup_out_fence(struct drm_out_fence_state *fence_state,
return 0;
}
static int prepare_crtc_signaling(struct drm_device *dev,
static int prepare_signaling(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_mode_atomic *arg,
struct drm_file *file_priv,
@ -2170,6 +2355,8 @@ static int prepare_crtc_signaling(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
int i, c = 0, ret;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
@ -2235,6 +2422,43 @@ static int prepare_crtc_signaling(struct drm_device *dev,
c++;
}
for_each_new_connector_in_state(state, conn, conn_state, i) {
struct drm_writeback_job *job;
struct drm_out_fence_state *f;
struct dma_fence *fence;
s32 __user *fence_ptr;
fence_ptr = get_out_fence_for_connector(state, conn);
if (!fence_ptr)
continue;
job = drm_atomic_get_writeback_job(conn_state);
if (!job)
return -ENOMEM;
f = krealloc(*fence_state, sizeof(**fence_state) *
(*num_fences + 1), GFP_KERNEL);
if (!f)
return -ENOMEM;
memset(&f[*num_fences], 0, sizeof(*f));
f[*num_fences].out_fence_ptr = fence_ptr;
*fence_state = f;
fence = drm_writeback_get_out_fence((struct drm_writeback_connector *)conn);
if (!fence)
return -ENOMEM;
ret = setup_out_fence(&f[(*num_fences)++], fence);
if (ret) {
dma_fence_put(fence);
return ret;
}
job->out_fence = fence;
}
/*
* Having this flag means user mode pends on event which will never
* reach due to lack of at least one CRTC for signaling
@ -2245,11 +2469,11 @@ static int prepare_crtc_signaling(struct drm_device *dev,
return 0;
}
static void complete_crtc_signaling(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_out_fence_state *fence_state,
unsigned int num_fences,
bool install_fds)
static void complete_signaling(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_out_fence_state *fence_state,
unsigned int num_fences,
bool install_fds)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@ -2306,9 +2530,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
unsigned int copied_objs, copied_props;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
struct drm_out_fence_state *fence_state;
unsigned plane_mask;
int ret = 0;
unsigned int i, j, num_fences;
@ -2348,7 +2570,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
retry:
plane_mask = 0;
copied_objs = 0;
copied_props = 0;
fence_state = NULL;
@ -2419,17 +2640,11 @@ retry:
copied_props++;
}
if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
!(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
plane = obj_to_plane(obj);
plane_mask |= (1 << drm_plane_index(plane));
plane->old_fb = plane->fb;
}
drm_mode_object_put(obj);
}
ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
&num_fences);
ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
&num_fences);
if (ret)
goto out;
@ -2445,9 +2660,7 @@ retry:
}
out:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
complete_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);

View File

@ -30,6 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_writeback.h>
#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
@ -1172,6 +1173,25 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
const struct drm_connector_helper_funcs *funcs;
funcs = connector->helper_private;
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
funcs->atomic_commit(connector, new_conn_state->writeback_job);
}
}
}
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
@ -1251,6 +1271,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_bridge_enable(encoder->bridge);
}
drm_atomic_helper_commit_writebacks(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
@ -2914,7 +2936,6 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned plane_mask = 0;
int ret, i;
state = drm_atomic_state_alloc(dev);
@ -2957,17 +2978,10 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
if (clean_old_fbs) {
plane->old_fb = plane->fb;
plane_mask |= BIT(drm_plane_index(plane));
}
}
ret = drm_atomic_commit(state);
free:
if (plane_mask)
drm_atomic_clean_old_fb(dev, plane_mask, ret);
drm_atomic_state_put(state);
return ret;
}
@ -3129,13 +3143,8 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
state->acquire_ctx = ctx;
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
WARN_ON(plane->crtc != new_plane_state->crtc);
WARN_ON(plane->fb != new_plane_state->fb);
WARN_ON(plane->old_fb);
for_each_new_plane_in_state(state, plane, new_plane_state, i)
state->planes[i].old_state = plane->state;
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
state->crtcs[i].old_state = crtc->state;
@ -3660,6 +3669,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
if (state->crtc)
drm_connector_get(connector);
state->commit = NULL;
/* Don't copy over a writeback job, they are used only once */
state->writeback_job = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);

View File

@ -87,6 +87,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
{ DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
};
void drm_connector_ida_init(void)
@ -195,6 +196,10 @@ int drm_connector_init(struct drm_device *dev,
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
(!funcs->atomic_destroy_state ||
!funcs->atomic_duplicate_state));
ret = __drm_mode_object_add(dev, &connector->base,
DRM_MODE_OBJECT_CONNECTOR,
false, drm_connector_free);
@ -249,7 +254,8 @@ int drm_connector_init(struct drm_device *dev,
config->num_connector++;
spin_unlock_irq(&config->connector_list_lock);
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_object_attach_property(&connector->base,
config->edid_property,
0);
@ -720,6 +726,14 @@ static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
};
static const struct drm_prop_enum_list drm_content_type_enum_list[] = {
{ DRM_MODE_CONTENT_TYPE_NO_DATA, "No Data" },
{ DRM_MODE_CONTENT_TYPE_GRAPHICS, "Graphics" },
{ DRM_MODE_CONTENT_TYPE_PHOTO, "Photo" },
{ DRM_MODE_CONTENT_TYPE_CINEMA, "Cinema" },
{ DRM_MODE_CONTENT_TYPE_GAME, "Game" },
};
static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = {
{ DRM_MODE_PANEL_ORIENTATION_NORMAL, "Normal" },
{ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down" },
@ -996,6 +1010,84 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
/**
* DOC: HDMI connector properties
*
* content type (HDMI specific):
* Indicates content type setting to be used in HDMI infoframes to indicate
* content type for the external device, so that it adjusts it's display
* settings accordingly.
*
* The value of this property can be one of the following:
*
* No Data:
* Content type is unknown
* Graphics:
* Content type is graphics
* Photo:
* Content type is photo
* Cinema:
* Content type is cinema
* Game:
* Content type is game
*
* Drivers can set up this property by calling
* drm_connector_attach_content_type_property(). Decoding to
* infoframe values is done through
* drm_hdmi_get_content_type_from_property() and
* drm_hdmi_get_itc_bit_from_property().
*/
/**
* drm_connector_attach_content_type_property - attach content-type property
* @connector: connector to attach content type property on.
*
* Called by a driver the first time a HDMI connector is made.
*/
int drm_connector_attach_content_type_property(struct drm_connector *connector)
{
if (!drm_mode_create_content_type_property(connector->dev))
drm_object_attach_property(&connector->base,
connector->dev->mode_config.content_type_property,
DRM_MODE_CONTENT_TYPE_NO_DATA);
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_content_type_property);
/**
* drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe
* content type information, based
* on correspondent DRM property.
* @frame: HDMI AVI infoframe
* @conn_state: DRM display connector state
*
*/
void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state)
{
switch (conn_state->content_type) {
case DRM_MODE_CONTENT_TYPE_GRAPHICS:
frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
break;
case DRM_MODE_CONTENT_TYPE_CINEMA:
frame->content_type = HDMI_CONTENT_TYPE_CINEMA;
break;
case DRM_MODE_CONTENT_TYPE_GAME:
frame->content_type = HDMI_CONTENT_TYPE_GAME;
break;
case DRM_MODE_CONTENT_TYPE_PHOTO:
frame->content_type = HDMI_CONTENT_TYPE_PHOTO;
break;
default:
/* Graphics is the default(0) */
frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
}
frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
/**
* drm_create_tv_properties - create TV specific connector properties
* @dev: DRM device
@ -1260,6 +1352,33 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
/**
* drm_mode_create_content_type_property - create content type property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_create_content_type_property(struct drm_device *dev)
{
if (dev->mode_config.content_type_property)
return 0;
dev->mode_config.content_type_property =
drm_property_create_enum(dev, 0, "content type",
drm_content_type_enum_list,
ARRAY_SIZE(drm_content_type_enum_list));
if (dev->mode_config.content_type_property == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_content_type_property);
/**
* drm_mode_create_suggested_offset_properties - create suggests offset properties
* @dev: DRM device

View File

@ -286,6 +286,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (WARN_ON(config->num_crtc >= 32))
return -EINVAL;
WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
(!funcs->atomic_destroy_state ||
!funcs->atomic_duplicate_state));
crtc->dev = dev;
crtc->funcs = funcs;
@ -469,23 +473,32 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
drm_for_each_crtc(tmp, crtc->dev)
tmp->primary->old_fb = tmp->primary->fb;
drm_for_each_crtc(tmp, crtc->dev) {
struct drm_plane *plane = tmp->primary;
plane->old_fb = plane->fb;
}
fb = set->fb;
ret = crtc->funcs->set_config(set, ctx);
if (ret == 0) {
crtc->primary->crtc = fb ? crtc : NULL;
crtc->primary->fb = fb;
struct drm_plane *plane = crtc->primary;
if (!plane->state) {
plane->crtc = fb ? crtc : NULL;
plane->fb = fb;
}
}
drm_for_each_crtc(tmp, crtc->dev) {
if (tmp->primary->fb)
drm_framebuffer_get(tmp->primary->fb);
if (tmp->primary->old_fb)
drm_framebuffer_put(tmp->primary->old_fb);
tmp->primary->old_fb = NULL;
struct drm_plane *plane = tmp->primary;
if (plane->fb)
drm_framebuffer_get(plane->fb);
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
}
return ret;
@ -640,7 +653,9 @@ retry:
ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode\n");
DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
ret, drm_get_mode_status_name(mode->status));
drm_mode_debug_printmodeline(mode);
goto out;
}

View File

@ -56,6 +56,9 @@ int drm_mode_setcrtc(struct drm_device *dev,
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
/* drm_modes.c */
const char *drm_get_mode_status_name(enum drm_mode_status status);
/* IOCTLs */
int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);

View File

@ -163,8 +163,9 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
/* HTC Vive VR Headset */
/* HTC Vive and Vive Pro VR Headsets */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
/* Oculus Rift DK1, DK2, and CV1 VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
@ -687,562 +688,562 @@ static const struct minimode extra_modes[] = {
static const struct drm_display_mode edid_cea_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
/* 1 - 640x480@60Hz */
/* 1 - 640x480@60Hz 4:3 */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 2 - 720x480@60Hz */
/* 2 - 720x480@60Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 3 - 720x480@60Hz */
/* 3 - 720x480@60Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 1280x720@60Hz */
/* 4 - 1280x720@60Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 5 - 1920x1080i@60Hz */
/* 5 - 1920x1080i@60Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 6 - 720(1440)x480i@60Hz */
/* 6 - 720(1440)x480i@60Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 7 - 720(1440)x480i@60Hz */
/* 7 - 720(1440)x480i@60Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 8 - 720(1440)x240@60Hz */
/* 8 - 720(1440)x240@60Hz 4:3 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 9 - 720(1440)x240@60Hz */
/* 9 - 720(1440)x240@60Hz 16:9 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 10 - 2880x480i@60Hz */
/* 10 - 2880x480i@60Hz 4:3 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 11 - 2880x480i@60Hz */
/* 11 - 2880x480i@60Hz 16:9 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 12 - 2880x240@60Hz */
/* 12 - 2880x240@60Hz 4:3 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 13 - 2880x240@60Hz */
/* 13 - 2880x240@60Hz 16:9 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 14 - 1440x480@60Hz */
/* 14 - 1440x480@60Hz 4:3 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 15 - 1440x480@60Hz */
/* 15 - 1440x480@60Hz 16:9 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 16 - 1920x1080@60Hz */
/* 16 - 1920x1080@60Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 17 - 720x576@50Hz */
/* 17 - 720x576@50Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 18 - 720x576@50Hz */
/* 18 - 720x576@50Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 19 - 1280x720@50Hz */
/* 19 - 1280x720@50Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 20 - 1920x1080i@50Hz */
/* 20 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 21 - 720(1440)x576i@50Hz */
/* 21 - 720(1440)x576i@50Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 22 - 720(1440)x576i@50Hz */
/* 22 - 720(1440)x576i@50Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 23 - 720(1440)x288@50Hz */
/* 23 - 720(1440)x288@50Hz 4:3 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 24 - 720(1440)x288@50Hz */
/* 24 - 720(1440)x288@50Hz 16:9 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 25 - 2880x576i@50Hz */
/* 25 - 2880x576i@50Hz 4:3 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 26 - 2880x576i@50Hz */
/* 26 - 2880x576i@50Hz 16:9 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 27 - 2880x288@50Hz */
/* 27 - 2880x288@50Hz 4:3 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 28 - 2880x288@50Hz */
/* 28 - 2880x288@50Hz 16:9 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 29 - 1440x576@50Hz */
/* 29 - 1440x576@50Hz 4:3 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 30 - 1440x576@50Hz */
/* 30 - 1440x576@50Hz 16:9 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 31 - 1920x1080@50Hz */
/* 31 - 1920x1080@50Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 32 - 1920x1080@24Hz */
/* 32 - 1920x1080@24Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 33 - 1920x1080@25Hz */
/* 33 - 1920x1080@25Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 34 - 1920x1080@30Hz */
/* 34 - 1920x1080@30Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 35 - 2880x480@60Hz */
/* 35 - 2880x480@60Hz 4:3 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 36 - 2880x480@60Hz */
/* 36 - 2880x480@60Hz 16:9 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 37 - 2880x576@50Hz */
/* 37 - 2880x576@50Hz 4:3 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 38 - 2880x576@50Hz */
/* 38 - 2880x576@50Hz 16:9 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 39 - 1920x1080i@50Hz */
/* 39 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 40 - 1920x1080i@100Hz */
/* 40 - 1920x1080i@100Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 41 - 1280x720@100Hz */
/* 41 - 1280x720@100Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 42 - 720x576@100Hz */
/* 42 - 720x576@100Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 43 - 720x576@100Hz */
/* 43 - 720x576@100Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 44 - 720(1440)x576i@100Hz */
/* 44 - 720(1440)x576i@100Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 45 - 720(1440)x576i@100Hz */
/* 45 - 720(1440)x576i@100Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 46 - 1920x1080i@120Hz */
/* 46 - 1920x1080i@120Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 47 - 1280x720@120Hz */
/* 47 - 1280x720@120Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 48 - 720x480@120Hz */
/* 48 - 720x480@120Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 49 - 720x480@120Hz */
/* 49 - 720x480@120Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 50 - 720(1440)x480i@120Hz */
/* 50 - 720(1440)x480i@120Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 51 - 720(1440)x480i@120Hz */
/* 51 - 720(1440)x480i@120Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 52 - 720x576@200Hz */
/* 52 - 720x576@200Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 53 - 720x576@200Hz */
/* 53 - 720x576@200Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 54 - 720(1440)x576i@200Hz */
/* 54 - 720(1440)x576i@200Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 55 - 720(1440)x576i@200Hz */
/* 55 - 720(1440)x576i@200Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 56 - 720x480@240Hz */
/* 56 - 720x480@240Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 57 - 720x480@240Hz */
/* 57 - 720x480@240Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 58 - 720(1440)x480i@240Hz */
/* 58 - 720(1440)x480i@240Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 59 - 720(1440)x480i@240Hz */
/* 59 - 720(1440)x480i@240Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 60 - 1280x720@24Hz */
/* 60 - 1280x720@24Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 61 - 1280x720@25Hz */
/* 61 - 1280x720@25Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 62 - 1280x720@30Hz */
/* 62 - 1280x720@30Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 63 - 1920x1080@120Hz */
/* 63 - 1920x1080@120Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 64 - 1920x1080@100Hz */
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 64 - 1920x1080@100Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 65 - 1280x720@24Hz */
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 65 - 1280x720@24Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 66 - 1280x720@25Hz */
/* 66 - 1280x720@25Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 67 - 1280x720@30Hz */
/* 67 - 1280x720@30Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 68 - 1280x720@50Hz */
/* 68 - 1280x720@50Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 69 - 1280x720@60Hz */
/* 69 - 1280x720@60Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 70 - 1280x720@100Hz */
/* 70 - 1280x720@100Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 71 - 1280x720@120Hz */
/* 71 - 1280x720@120Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 72 - 1920x1080@24Hz */
/* 72 - 1920x1080@24Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 73 - 1920x1080@25Hz */
/* 73 - 1920x1080@25Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 74 - 1920x1080@30Hz */
/* 74 - 1920x1080@30Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 75 - 1920x1080@50Hz */
/* 75 - 1920x1080@50Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 76 - 1920x1080@60Hz */
/* 76 - 1920x1080@60Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 77 - 1920x1080@100Hz */
/* 77 - 1920x1080@100Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 78 - 1920x1080@120Hz */
/* 78 - 1920x1080@120Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 79 - 1680x720@24Hz */
/* 79 - 1680x720@24Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 80 - 1680x720@25Hz */
/* 80 - 1680x720@25Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
2948, 3168, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 81 - 1680x720@30Hz */
/* 81 - 1680x720@30Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
2420, 2640, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 82 - 1680x720@50Hz */
/* 82 - 1680x720@50Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 83 - 1680x720@60Hz */
/* 83 - 1680x720@60Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 84 - 1680x720@100Hz */
/* 84 - 1680x720@100Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 85 - 1680x720@120Hz */
/* 85 - 1680x720@120Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 86 - 2560x1080@24Hz */
/* 86 - 2560x1080@24Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 87 - 2560x1080@25Hz */
/* 87 - 2560x1080@25Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 88 - 2560x1080@30Hz */
/* 88 - 2560x1080@30Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 89 - 2560x1080@50Hz */
/* 89 - 2560x1080@50Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 90 - 2560x1080@60Hz */
/* 90 - 2560x1080@60Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 91 - 2560x1080@100Hz */
/* 91 - 2560x1080@100Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 92 - 2560x1080@120Hz */
/* 92 - 2560x1080@120Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 93 - 3840x2160p@24Hz 16:9 */
/* 93 - 3840x2160@24Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 94 - 3840x2160p@25Hz 16:9 */
/* 94 - 3840x2160@25Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 95 - 3840x2160p@30Hz 16:9 */
/* 95 - 3840x2160@30Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 96 - 3840x2160p@50Hz 16:9 */
/* 96 - 3840x2160@50Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 97 - 3840x2160p@60Hz 16:9 */
/* 97 - 3840x2160@60Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 98 - 4096x2160p@24Hz 256:135 */
/* 98 - 4096x2160@24Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 99 - 4096x2160p@25Hz 256:135 */
/* 99 - 4096x2160@25Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 100 - 4096x2160p@30Hz 256:135 */
/* 100 - 4096x2160@30Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 101 - 4096x2160p@50Hz 256:135 */
/* 101 - 4096x2160@50Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 102 - 4096x2160p@60Hz 256:135 */
/* 102 - 4096x2160@60Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 103 - 3840x2160p@24Hz 64:27 */
/* 103 - 3840x2160@24Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 104 - 3840x2160p@25Hz 64:27 */
/* 104 - 3840x2160@25Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 105 - 3840x2160p@30Hz 64:27 */
/* 105 - 3840x2160@30Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 106 - 3840x2160p@50Hz 64:27 */
/* 106 - 3840x2160@50Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 107 - 3840x2160p@60Hz 64:27 */
/* 107 - 3840x2160@60Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
@ -4873,6 +4874,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
/*
* As some drivers don't support atomic, we can't use connector state.
* So just initialize the frame with default values, just the same way
* as it's done with other properties here.
*/
frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
frame->itc = 0;
/*
* Populate picture aspect ratio from either
* user input (if specified) or from the CEA mode list.

View File

@ -368,7 +368,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
struct drm_plane *plane;
struct drm_atomic_state *state;
int i, ret;
unsigned int plane_mask;
struct drm_modeset_acquire_ctx ctx;
drm_modeset_acquire_init(&ctx, 0);
@ -381,7 +380,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
state->acquire_ctx = &ctx;
retry:
plane_mask = 0;
drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@ -391,9 +389,6 @@ retry:
plane_state->rotation = DRM_MODE_ROTATE_0;
plane->old_fb = plane->fb;
plane_mask |= 1 << drm_plane_index(plane);
/* disable non-primary: */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
continue;
@ -430,8 +425,6 @@ retry:
ret = drm_atomic_commit(state);
out_state:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK)
goto backoff;
@ -1164,7 +1157,7 @@ EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
* @info: fbdev registered by the helper
* @rect: info about rectangle to fill
*
* A wrapper around cfb_imageblit implemented by fbdev core
* A wrapper around cfb_fillrect implemented by fbdev core
*/
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)

View File

@ -836,8 +836,6 @@ retry:
goto unlock;
plane_mask |= BIT(drm_plane_index(plane));
plane->old_fb = plane->fb;
}
/* This list is only filled when disable_crtcs is set. */
@ -852,9 +850,6 @@ retry:
ret = drm_atomic_commit(state);
unlock:
if (plane_mask)
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);

View File

@ -253,7 +253,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct dma_buf *dma_buf;
struct dma_fence *fence;
if (plane->state->fb == state->fb || !state->fb)
if (!state->fb)
return 0;
dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;

View File

@ -334,6 +334,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->aspect_ratio_allowed = req->value;
break;
case DRM_CLIENT_CAP_WRITEBACK_CONNECTORS:
if (!file_priv->atomic)
return -EINVAL;
if (req->value > 1)
return -EINVAL;
file_priv->writeback_connectors = req->value;
break;
default:
return -EINVAL;
}

View File

@ -239,6 +239,32 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
static u64 rb_to_hole_size(struct rb_node *rb)
{
return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
}
static void insert_hole_size(struct rb_root_cached *root,
struct drm_mm_node *node)
{
struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
u64 x = node->hole_size;
bool first = true;
while (*link) {
rb = *link;
if (x > rb_to_hole_size(rb)) {
link = &rb->rb_left;
} else {
link = &rb->rb_right;
first = false;
}
}
rb_link_node(&node->rb_hole_size, rb, link);
rb_insert_color_cached(&node->rb_hole_size, root, first);
}
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
@ -247,7 +273,7 @@ static void add_hole(struct drm_mm_node *node)
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
insert_hole_size(&mm->holes_size, node);
RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
list_add(&node->hole_stack, &mm->hole_stack);
@ -258,7 +284,7 @@ static void rm_hole(struct drm_mm_node *node)
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
list_del(&node->hole_stack);
rb_erase(&node->rb_hole_size, &node->mm->holes_size);
rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
node->hole_size = 0;
@ -282,38 +308,39 @@ static inline u64 rb_hole_size(struct rb_node *rb)
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
{
struct rb_node *best = NULL;
struct rb_node **link = &mm->holes_size.rb_node;
struct rb_node *rb = mm->holes_size.rb_root.rb_node;
struct drm_mm_node *best = NULL;
while (*link) {
struct rb_node *rb = *link;
do {
struct drm_mm_node *node =
rb_entry(rb, struct drm_mm_node, rb_hole_size);
if (size <= rb_hole_size(rb)) {
link = &rb->rb_left;
best = rb;
if (size <= node->hole_size) {
best = node;
rb = rb->rb_right;
} else {
link = &rb->rb_right;
rb = rb->rb_left;
}
}
} while (rb);
return rb_hole_size_to_node(best);
return best;
}
static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
{
struct rb_node *rb = mm->holes_addr.rb_node;
struct drm_mm_node *node = NULL;
struct rb_node **link = &mm->holes_addr.rb_node;
while (*link) {
while (rb) {
u64 hole_start;
node = rb_hole_addr_to_node(*link);
node = rb_hole_addr_to_node(rb);
hole_start = __drm_mm_hole_node_start(node);
if (addr < hole_start)
link = &node->rb_hole_addr.rb_left;
rb = node->rb_hole_addr.rb_left;
else if (addr > hole_start + node->hole_size)
link = &node->rb_hole_addr.rb_right;
rb = node->rb_hole_addr.rb_right;
else
break;
}
@ -326,9 +353,6 @@ first_hole(struct drm_mm *mm,
u64 start, u64 end, u64 size,
enum drm_mm_insert_mode mode)
{
if (RB_EMPTY_ROOT(&mm->holes_size))
return NULL;
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
@ -355,7 +379,7 @@ next_hole(struct drm_mm *mm,
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
@ -426,6 +450,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_reserve_node);
static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
{
return rb ? rb_to_hole_size(rb) : 0;
}
/**
* drm_mm_insert_node_in_range - ranged search for space and insert @node
* @mm: drm_mm to allocate from
@ -451,18 +480,26 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
{
struct drm_mm_node *hole;
u64 remainder_mask;
bool once;
DRM_MM_BUG_ON(range_start >= range_end);
if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
return -ENOSPC;
if (alignment <= 1)
alignment = 0;
once = mode & DRM_MM_INSERT_ONCE;
mode &= ~DRM_MM_INSERT_ONCE;
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
hole = next_hole(mm, hole, mode)) {
for (hole = first_hole(mm, range_start, range_end, size, mode);
hole;
hole = once ? NULL : next_hole(mm, hole, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
@ -587,9 +624,9 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
rb_replace_node(&old->rb_hole_size,
&new->rb_hole_size,
&mm->holes_size);
rb_replace_node_cached(&old->rb_hole_size,
&new->rb_hole_size,
&mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
&mm->holes_addr);
@ -885,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
INIT_LIST_HEAD(&mm->hole_stack);
mm->interval_tree = RB_ROOT_CACHED;
mm->holes_size = RB_ROOT;
mm->holes_size = RB_ROOT_CACHED;
mm->holes_addr = RB_ROOT;
/* Clever trick to avoid a special case in the free hole tracking. */

View File

@ -145,6 +145,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
(connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
continue;
if (drm_lease_held(file_priv, connector->base.id)) {
if (count < card_res->count_connectors &&
put_user(connector->base.id, connector_id + count)) {

View File

@ -1257,7 +1257,7 @@ static const char * const drm_mode_status_names[] = {
#undef MODE_STATUS
static const char *drm_get_mode_status_name(enum drm_mode_status status)
const char *drm_get_mode_status_name(enum drm_mode_status status)
{
int index = status + 3;

View File

@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <drm/drm_device.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@ -94,6 +95,9 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* An error is returned if the panel is already attached to another connector.
*
* When unloading, the driver should detach from the panel by calling
* drm_panel_detach().
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
@ -101,6 +105,13 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector)
return -EBUSY;
panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
if (!panel->link) {
dev_err(panel->dev, "failed to link panel to %s\n",
dev_name(connector->dev->dev));
return -EINVAL;
}
panel->connector = connector;
panel->drm = connector->dev;
@ -115,10 +126,15 @@ EXPORT_SYMBOL(drm_panel_attach);
* Detaches a panel from the connector it is attached to. If a panel is not
* attached to any connector this is effectively a no-op.
*
* This function should not be called by the panel device itself. It
* is only for the drm device that called drm_panel_attach().
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_detach(struct drm_panel *panel)
{
device_link_del(panel->link);
panel->connector = NULL;
panel->drm = NULL;

View File

@ -177,6 +177,10 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (WARN_ON(config->num_total_plane >= 32))
return -EINVAL;
WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
(!funcs->atomic_destroy_state ||
!funcs->atomic_duplicate_state));
ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
return ret;
@ -561,19 +565,20 @@ int drm_plane_check_pixel_format(struct drm_plane *plane,
if (i == plane->format_count)
return -EINVAL;
if (!plane->modifier_count)
return 0;
if (plane->funcs->format_mod_supported) {
if (!plane->funcs->format_mod_supported(plane, format, modifier))
return -EINVAL;
} else {
if (!plane->modifier_count)
return 0;
for (i = 0; i < plane->modifier_count; i++) {
if (modifier == plane->modifiers[i])
break;
for (i = 0; i < plane->modifier_count; i++) {
if (modifier == plane->modifiers[i])
break;
}
if (i == plane->modifier_count)
return -EINVAL;
}
if (i == plane->modifier_count)
return -EINVAL;
if (plane->funcs->format_mod_supported &&
!plane->funcs->format_mod_supported(plane, format, modifier))
return -EINVAL;
return 0;
}
@ -650,9 +655,11 @@ static int __setplane_internal(struct drm_plane *plane,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
drm_framebuffer_get(plane->fb);
if (!plane->state) {
plane->crtc = crtc;
plane->fb = fb;
drm_framebuffer_get(plane->fb);
}
} else {
plane->old_fb = NULL;
}
@ -1092,8 +1099,10 @@ retry:
/* Keep the old fb, don't unref it. */
plane->old_fb = NULL;
} else {
plane->fb = fb;
drm_framebuffer_get(fb);
if (!plane->state) {
plane->fb = fb;
drm_framebuffer_get(fb);
}
}
out:

View File

@ -502,6 +502,7 @@ EXPORT_SYMBOL(drm_plane_helper_update);
int drm_plane_helper_disable(struct drm_plane *plane)
{
struct drm_plane_state *plane_state;
struct drm_framebuffer *old_fb;
/* crtc helpers love to call disable functions for already disabled hw
* functions. So cope with that. */
@ -521,8 +522,9 @@ int drm_plane_helper_disable(struct drm_plane *plane)
plane_state->plane = plane;
plane_state->crtc = NULL;
old_fb = plane_state->fb;
drm_atomic_set_fb_for_plane(plane_state, NULL);
return drm_plane_helper_commit(plane, plane_state, plane->fb);
return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_plane_helper_disable);

View File

@ -186,7 +186,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
/**
* drm_gem_map_attach - dma_buf attach implementation for GEM
* @dma_buf: buffer to attach device to
* @target_dev: not used
* @attach: buffer attachment data
*
* Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
@ -195,7 +194,7 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
*
* Returns 0 on success, negative error code on failure.
*/
int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach;
@ -434,35 +433,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
/**
* drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
* @dma_buf: buffer to be mapped
* @page_num: page number within the buffer
*
* Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
*/
void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
return NULL;
}
EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
/**
* drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
* @dma_buf: buffer to be unmapped
* @page_num: page number within the buffer
* @addr: virtual address of the buffer
*
* Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
*/
void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
}
EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
/**
* drm_gem_dmabuf_kmap - map implementation for GEM
* @dma_buf: buffer to be mapped
@ -520,9 +490,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = drm_gem_dmabuf_kmap,
.map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
.unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,

View File

@ -100,7 +100,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it.
*/
#if IS_ENABLED(CONFIG_AGP)
static int drm_vm_fault(struct vm_fault *vmf)
static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@ -173,7 +173,7 @@ vm_fault_error:
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else
static int drm_vm_fault(struct vm_fault *vmf)
static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
@ -189,7 +189,7 @@ static int drm_vm_fault(struct vm_fault *vmf)
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
static int drm_vm_shm_fault(struct vm_fault *vmf)
static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
@ -291,7 +291,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
static int drm_vm_dma_fault(struct vm_fault *vmf)
static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@ -326,7 +326,7 @@ static int drm_vm_dma_fault(struct vm_fault *vmf)
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
static int drm_vm_sg_fault(struct vm_fault *vmf)
static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;

View File

@ -0,0 +1,350 @@
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
* Author: Brian Starkey <brian.starkey@arm.com>
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*/
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_property.h>
#include <drm/drm_writeback.h>
#include <drm/drmP.h>
#include <linux/dma-fence.h>
/**
* DOC: overview
*
* Writeback connectors are used to expose hardware which can write the output
* from a CRTC to a memory buffer. They are used and act similarly to other
* types of connectors, with some important differences:
* - Writeback connectors don't provide a way to output visually to the user.
* - Writeback connectors should always report as "disconnected" (so that
* clients which don't understand them will ignore them).
* - Writeback connectors don't have EDID.
*
* A framebuffer may only be attached to a writeback connector when the
* connector is attached to a CRTC. The WRITEBACK_FB_ID property which sets the
* framebuffer applies only to a single commit (see below). A framebuffer may
* not be attached while the CRTC is off.
*
* Unlike with planes, when a writeback framebuffer is removed by userspace DRM
* makes no attempt to remove it from active use by the connector. This is
* because no method is provided to abort a writeback operation, and in any
* case making a new commit whilst a writeback is ongoing is undefined (see
* WRITEBACK_OUT_FENCE_PTR below). As soon as the current writeback is finished,
* the framebuffer will automatically no longer be in active use. As it will
* also have already been removed from the framebuffer list, there will be no
* way for any userspace application to retrieve a reference to it in the
* intervening period.
*
* Writeback connectors have some additional properties, which userspace
* can use to query and control them:
*
* "WRITEBACK_FB_ID":
* Write-only object property storing a DRM_MODE_OBJECT_FB: it stores the
* framebuffer to be written by the writeback connector. This property is
* similar to the FB_ID property on planes, but will always read as zero
* and is not preserved across commits.
* Userspace must set this property to an output buffer every time it
* wishes the buffer to get filled.
*
* "WRITEBACK_PIXEL_FORMATS":
* Immutable blob property to store the supported pixel formats table. The
* data is an array of u32 DRM_FORMAT_* fourcc values.
* Userspace can use this blob to find out what pixel formats are supported
* by the connector's writeback engine.
*
* "WRITEBACK_OUT_FENCE_PTR":
* Userspace can use this property to provide a pointer for the kernel to
* fill with a sync_file file descriptor, which will signal once the
* writeback is finished. The value should be the address of a 32-bit
* signed integer, cast to a u64.
* Userspace should wait for this fence to signal before making another
* commit affecting any of the same CRTCs, Planes or Connectors.
* **Failure to do so will result in undefined behaviour.**
* For this reason it is strongly recommended that all userspace
* applications making use of writeback connectors *always* retrieve an
* out-fence for the commit and use it appropriately.
* From userspace, this property will always read as zero.
*/
#define fence_to_wb_connector(x) container_of(x->lock, \
struct drm_writeback_connector, \
fence_lock)
static const char *drm_writeback_fence_get_driver_name(struct dma_fence *fence)
{
struct drm_writeback_connector *wb_connector =
fence_to_wb_connector(fence);
return wb_connector->base.dev->driver->name;
}
static const char *
drm_writeback_fence_get_timeline_name(struct dma_fence *fence)
{
struct drm_writeback_connector *wb_connector =
fence_to_wb_connector(fence);
return wb_connector->timeline_name;
}
static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
static const struct dma_fence_ops drm_writeback_fence_ops = {
.get_driver_name = drm_writeback_fence_get_driver_name,
.get_timeline_name = drm_writeback_fence_get_timeline_name,
.enable_signaling = drm_writeback_fence_enable_signaling,
.wait = dma_fence_default_wait,
};
static int create_writeback_properties(struct drm_device *dev)
{
struct drm_property *prop;
if (!dev->mode_config.writeback_fb_id_property) {
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
"WRITEBACK_FB_ID",
DRM_MODE_OBJECT_FB);
if (!prop)
return -ENOMEM;
dev->mode_config.writeback_fb_id_property = prop;
}
if (!dev->mode_config.writeback_pixel_formats_property) {
prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_ATOMIC |
DRM_MODE_PROP_IMMUTABLE,
"WRITEBACK_PIXEL_FORMATS", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.writeback_pixel_formats_property = prop;
}
if (!dev->mode_config.writeback_out_fence_ptr_property) {
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"WRITEBACK_OUT_FENCE_PTR", 0,
U64_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.writeback_out_fence_ptr_property = prop;
}
return 0;
}
static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
/**
* drm_writeback_connector_init - Initialize a writeback connector and its properties
* @dev: DRM device
* @wb_connector: Writeback connector to initialize
* @con_funcs: Connector funcs vtable
* @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
* @formats: Array of supported pixel formats for the writeback engine
* @n_formats: Length of the formats array
*
* This function creates the writeback-connector-specific properties if they
* have not been already created, initializes the connector as
* type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
* values. It will also create an internal encoder associated with the
* drm_writeback_connector and set it to use the @enc_helper_funcs vtable for
* the encoder helper.
*
* Drivers should always use this function instead of drm_connector_init() to
* set up writeback connectors.
*
* Returns: 0 on success, or a negative error code
*/
int drm_writeback_connector_init(struct drm_device *dev,
struct drm_writeback_connector *wb_connector,
const struct drm_connector_funcs *con_funcs,
const struct drm_encoder_helper_funcs *enc_helper_funcs,
const u32 *formats, int n_formats)
{
struct drm_property_blob *blob;
struct drm_connector *connector = &wb_connector->base;
struct drm_mode_config *config = &dev->mode_config;
int ret = create_writeback_properties(dev);
if (ret != 0)
return ret;
blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
formats);
if (IS_ERR(blob))
return PTR_ERR(blob);
drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
ret = drm_encoder_init(dev, &wb_connector->encoder,
&drm_writeback_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
goto fail;
connector->interlace_allowed = 0;
ret = drm_connector_init(dev, connector, con_funcs,
DRM_MODE_CONNECTOR_WRITEBACK);
if (ret)
goto connector_fail;
ret = drm_mode_connector_attach_encoder(connector,
&wb_connector->encoder);
if (ret)
goto attach_fail;
INIT_LIST_HEAD(&wb_connector->job_queue);
spin_lock_init(&wb_connector->job_lock);
wb_connector->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&wb_connector->fence_lock);
snprintf(wb_connector->timeline_name,
sizeof(wb_connector->timeline_name),
"CONNECTOR:%d-%s", connector->base.id, connector->name);
drm_object_attach_property(&connector->base,
config->writeback_out_fence_ptr_property, 0);
drm_object_attach_property(&connector->base,
config->writeback_fb_id_property, 0);
drm_object_attach_property(&connector->base,
config->writeback_pixel_formats_property,
blob->base.id);
wb_connector->pixel_formats_blob_ptr = blob;
return 0;
attach_fail:
drm_connector_cleanup(connector);
connector_fail:
drm_encoder_cleanup(&wb_connector->encoder);
fail:
drm_property_blob_put(blob);
return ret;
}
EXPORT_SYMBOL(drm_writeback_connector_init);
/**
* drm_writeback_queue_job - Queue a writeback job for later signalling
* @wb_connector: The writeback connector to queue a job on
* @job: The job to queue
*
* This function adds a job to the job_queue for a writeback connector. It
* should be considered to take ownership of the writeback job, and so any other
* references to the job must be cleared after calling this function.
*
* Drivers must ensure that for a given writeback connector, jobs are queued in
* exactly the same order as they will be completed by the hardware (and
* signaled via drm_writeback_signal_completion).
*
* For every call to drm_writeback_queue_job() there must be exactly one call to
* drm_writeback_signal_completion()
*
* See also: drm_writeback_signal_completion()
*/
void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
struct drm_writeback_job *job)
{
unsigned long flags;
spin_lock_irqsave(&wb_connector->job_lock, flags);
list_add_tail(&job->list_entry, &wb_connector->job_queue);
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
}
EXPORT_SYMBOL(drm_writeback_queue_job);
/*
* @cleanup_work: deferred cleanup of a writeback job
*
* The job cannot be cleaned up directly in drm_writeback_signal_completion,
* because it may be called in interrupt context. Dropping the framebuffer
* reference can sleep, and so the cleanup is deferred to a workqueue.
*/
static void cleanup_work(struct work_struct *work)
{
struct drm_writeback_job *job = container_of(work,
struct drm_writeback_job,
cleanup_work);
drm_framebuffer_put(job->fb);
kfree(job);
}
/**
* drm_writeback_signal_completion - Signal the completion of a writeback job
* @wb_connector: The writeback connector whose job is complete
* @status: Status code to set in the writeback out_fence (0 for success)
*
* Drivers should call this to signal the completion of a previously queued
* writeback job. It should be called as soon as possible after the hardware
* has finished writing, and may be called from interrupt context.
* It is the driver's responsibility to ensure that for a given connector, the
* hardware completes writeback jobs in the same order as they are queued.
*
* Unless the driver is holding its own reference to the framebuffer, it must
* not be accessed after calling this function.
*
* See also: drm_writeback_queue_job()
*/
void
drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
int status)
{
unsigned long flags;
struct drm_writeback_job *job;
spin_lock_irqsave(&wb_connector->job_lock, flags);
job = list_first_entry_or_null(&wb_connector->job_queue,
struct drm_writeback_job,
list_entry);
if (job) {
list_del(&job->list_entry);
if (job->out_fence) {
if (status)
dma_fence_set_error(job->out_fence, status);
dma_fence_signal(job->out_fence);
dma_fence_put(job->out_fence);
}
}
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
if (WARN_ON(!job))
return;
INIT_WORK(&job->cleanup_work, cleanup_work);
queue_work(system_long_wq, &job->cleanup_work);
}
EXPORT_SYMBOL(drm_writeback_signal_completion);
struct dma_fence *
drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector)
{
struct dma_fence *fence;
if (WARN_ON(wb_connector->base.connector_type !=
DRM_MODE_CONNECTOR_WRITEBACK))
return NULL;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return NULL;
dma_fence_init(fence, &drm_writeback_fence_ops,
&wb_connector->fence_lock, wb_connector->fence_context,
++wb_connector->fence_seqno);
return fence;
}
EXPORT_SYMBOL(drm_writeback_get_out_fence);

View File

@ -263,8 +263,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
if (!state->crtc)
return;
plane->crtc = state->crtc;
if (exynos_crtc->ops->update_plane)
exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
}

View File

@ -251,7 +251,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
if (!fb)
return;
offset = psbfb->gtt->offset;
offset = to_gtt_range(fb->obj[0])->offset;
stride = fb->pitches[0];
switch (fb->format->depth) {

View File

@ -33,6 +33,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
@ -40,14 +41,9 @@
#include "framebuffer.h"
#include "gtt.h"
static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
static const struct drm_framebuffer_funcs psb_fb_funcs = {
.destroy = psb_user_framebuffer_destroy,
.create_handle = psb_user_framebuffer_create_handle,
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
@ -96,17 +92,18 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
/*
* We have to poke our nose in here. The core fb code assumes
* panning is part of the hardware that can be invoked before
* the actual fb is mapped. In our case that isn't quite true.
*/
if (psbfb->gtt->npage) {
if (gtt->npage) {
/* GTT roll shifts in 4K pages, we need to shift the right
number of pages */
int pages = info->fix.line_length >> 12;
psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
psb_gtt_roll(dev, gtt, var->yoffset * pages);
}
return 0;
}
@ -117,13 +114,14 @@ static int psbfb_vm_fault(struct vm_fault *vmf)
struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
int page_num;
int i;
unsigned long address;
int ret;
unsigned long pfn;
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
psbfb->gtt->offset;
gtt->offset;
page_num = vma_pages(vma);
address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
@ -246,7 +244,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
return -EINVAL;
drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
fb->gtt = gt;
fb->base.obj[0] = &gt->gem;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
@ -518,8 +516,8 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
if (psbfb->gtt)
drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
if (psbfb->base.obj[0])
drm_gem_object_unreference_unlocked(psbfb->base.obj[0]);
return 0;
}
@ -576,44 +574,6 @@ static void psb_fbdev_fini(struct drm_device *dev)
dev_priv->fbdev = NULL;
}
/**
* psb_user_framebuffer_create_handle - add hamdle to a framebuffer
* @fb: framebuffer
* @file_priv: our DRM file
* @handle: returned handle
*
* Our framebuffer object is a GTT range which also contains a GEM
* object. We need to turn it into a handle for userspace. GEM will do
* the work for us
*/
static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *r = psbfb->gtt;
return drm_gem_handle_create(file_priv, &r->gem, handle);
}
/**
* psb_user_framebuffer_destroy - destruct user created fb
* @fb: framebuffer
*
* User framebuffers are backed by GEM objects so all we have to do is
* clean up a bit and drop the reference, GEM will handle the fallout
*/
static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *r = psbfb->gtt;
/* Let DRM do its clean up */
drm_framebuffer_cleanup(fb);
/* We are no longer using the resource in GEM */
drm_gem_object_unreference_unlocked(&r->gem);
kfree(fb);
}
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,

View File

@ -31,7 +31,6 @@ struct psb_framebuffer {
struct drm_framebuffer base;
struct address_space *addr_space;
struct fb_info *fbdev;
struct gtt_range *gtt;
};
struct psb_fbdev {

View File

@ -60,7 +60,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@ -78,10 +78,10 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
ret = psb_gtt_pin(psbfb->gtt);
ret = psb_gtt_pin(gtt);
if (ret < 0)
goto gma_pipe_set_base_exit;
start = psbfb->gtt->offset;
start = gtt->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
@ -129,7 +129,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
gma_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb)
psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
gma_pipe_set_base_exit:
gma_power_end(dev);
@ -491,7 +491,7 @@ void gma_crtc_disable(struct drm_crtc *crtc)
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
gt = to_psb_fb(crtc->primary->fb)->gtt;
gt = to_gtt_range(crtc->primary->fb->obj[0]);
psb_gtt_unpin(gt);
}
}

View File

@ -53,6 +53,8 @@ struct gtt_range {
int roll; /* Roll applied to the GTT entries */
};
#define to_gtt_range(x) container_of(x, struct gtt_range, gem)
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed,
u32 align);

View File

@ -196,7 +196,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (!gma_power_begin(dev, true))
return 0;
start = psbfb->gtt->offset;
start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);

View File

@ -600,7 +600,6 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@ -617,7 +616,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
start = psbfb->gtt->offset;
start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);

View File

@ -429,13 +429,20 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
#define MAX_ARG_LEN 32
static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
const void *args, int args_len)
{
u8 buf[args_len*2 + 2], status;
struct i2c_msg msgs[args_len + 3];
u8 buf[MAX_ARG_LEN*2 + 2], status;
struct i2c_msg msgs[MAX_ARG_LEN + 3];
int i, ret;
if (args_len > MAX_ARG_LEN) {
DRM_ERROR("Need to increase arg length\n");
return false;
}
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {

View File

@ -589,13 +589,22 @@ out:
return ret;
}
#define MAX_WRITE_RANGE_BUF 32
static void
reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
{
struct i2c_client *client = priv->hdmi;
u8 buf[cnt+1];
/* This is the maximum size of the buffer passed in */
u8 buf[MAX_WRITE_RANGE_BUF + 1];
int ret;
if (cnt > MAX_WRITE_RANGE_BUF) {
dev_err(&client->dev, "Fixed write buffer too small (%d)\n",
MAX_WRITE_RANGE_BUF);
return;
}
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
@ -805,7 +814,7 @@ static void
tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
union hdmi_infoframe *frame)
{
u8 buf[32];
u8 buf[MAX_WRITE_RANGE_BUF];
ssize_t len;
len = hdmi_infoframe_pack(frame, buf, sizeof(buf));

View File

@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
i915_gem_object_unpin_map(obj);
}
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
}
static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{
}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = i915_gem_dmabuf_kmap,
.map_atomic = i915_gem_dmabuf_kmap_atomic,
.unmap = i915_gem_dmabuf_kunmap,
.unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,

View File

@ -3945,7 +3945,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
mode = DRM_MM_INSERT_BEST;
if (flags & PIN_HIGH)
mode = DRM_MM_INSERT_HIGH;
mode = DRM_MM_INSERT_HIGHEST;
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
@ -3965,6 +3965,15 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
if (mode & DRM_MM_INSERT_ONCE) {
err = drm_mm_insert_node_in_range(&vm->mm, node,
size, alignment, color,
start, end,
DRM_MM_INSERT_BEST);
if (err != -ENOSPC)
return err;
}
if (flags & PIN_NOEVICT)
return -ENOSPC;

View File

@ -124,6 +124,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
if (new_conn_state->force_audio != old_conn_state->force_audio ||
new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
crtc_state->mode_changed = true;

View File

@ -120,12 +120,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
&crtc_state->base.adjusted_mode;
int ret;
/*
* Both crtc and plane->crtc could be NULL if we're updating a
* property while the plane is disabled. We don't actually have
* anything driver-specific we need to test in that case, so
* just return success.
*/
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
@ -209,12 +203,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
/*
* Both crtc and plane->crtc could be NULL if we're updating a
* property while the plane is disabled. We don't actually have
* anything driver-specific we need to test in that case, so
* just return success.
*/
if (!crtc)
return 0;

View File

@ -1022,7 +1022,7 @@ bool intel_crtc_active(struct intel_crtc *crtc)
* We can ditch the adjusted_mode.crtc_clock check as soon
* as Haswell has gained clock readout/fastboot support.
*
* We can ditch the crtc->primary->fb check as soon as we can
* We can ditch the crtc->primary->state->fb check as soon as we can
* properly reconstruct framebuffers.
*
* FIXME: The intel_crtc->active here should be switched to
@ -2882,9 +2882,8 @@ valid_fb:
if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true;
drm_framebuffer_get(fb);
primary->fb = primary->state->fb = fb;
primary->crtc = primary->state->crtc = &intel_crtc->base;
plane_state->fb = fb;
plane_state->crtc = &intel_crtc->base;
intel_set_plane_visible(to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state),
@ -13241,8 +13240,17 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
return false;
}
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@ -13255,8 +13263,17 @@ static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
}
}
static bool i965_mod_supported(uint32_t format, uint64_t modifier)
static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
return false;
}
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@ -13271,8 +13288,26 @@ static bool i965_mod_supported(uint32_t format, uint64_t modifier)
}
}
static bool skl_mod_supported(uint32_t format, uint64_t modifier)
static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
struct intel_plane *plane = to_intel_plane(_plane);
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
break;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
if (!plane->has_ccs)
return false;
break;
default:
return false;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@ -13304,38 +13339,14 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
struct drm_i915_private *dev_priv = to_i915(plane->dev);
if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
return false;
if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
modifier != DRM_FORMAT_MOD_LINEAR)
return false;
if (INTEL_GEN(dev_priv) >= 9)
return skl_mod_supported(format, modifier);
else if (INTEL_GEN(dev_priv) >= 4)
return i965_mod_supported(format, modifier);
else
return i8xx_mod_supported(format, modifier);
return modifier == DRM_FORMAT_MOD_LINEAR &&
format == DRM_FORMAT_ARGB8888;
}
static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
return false;
return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
}
static struct drm_plane_funcs intel_plane_funcs = {
static struct drm_plane_funcs skl_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@ -13343,7 +13354,29 @@ static struct drm_plane_funcs intel_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = intel_primary_plane_format_mod_supported,
.format_mod_supported = skl_plane_format_mod_supported,
};
static struct drm_plane_funcs i965_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i965_plane_format_mod_supported,
};
static struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i8xx_plane_format_mod_supported,
};
static int
@ -13468,7 +13501,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = intel_cursor_plane_format_mod_supported,
.format_mod_supported = intel_cursor_format_mod_supported,
};
static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
@ -13526,6 +13559,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
const struct drm_plane_funcs *plane_funcs;
const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
unsigned int num_formats;
@ -13581,6 +13615,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->check_plane = intel_check_primary_plane;
if (INTEL_GEN(dev_priv) >= 9) {
primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
PLANE_PRIMARY);
if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
intel_primary_formats = skl_pri_planar_formats;
num_formats = ARRAY_SIZE(skl_pri_planar_formats);
@ -13589,7 +13626,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(skl_primary_formats);
}
if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
if (primary->has_ccs)
modifiers = skl_format_modifiers_ccs;
else
modifiers = skl_format_modifiers_noccs;
@ -13597,6 +13634,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
primary->get_hw_state = skl_plane_get_hw_state;
plane_funcs = &skl_plane_funcs;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@ -13605,6 +13644,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
plane_funcs = &i965_plane_funcs;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
@ -13613,25 +13654,27 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
plane_funcs = &i8xx_plane_funcs;
}
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
0, &intel_plane_funcs,
0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
0, &intel_plane_funcs,
0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
0, &intel_plane_funcs,
0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,

View File

@ -952,6 +952,7 @@ struct intel_plane {
enum pipe pipe;
bool can_scale;
bool has_fbc;
bool has_ccs;
int max_downscale;
uint32_t frontbuffer_bit;

View File

@ -461,7 +461,8 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state)
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
@ -491,6 +492,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
intel_hdmi->rgb_quant_range_selectable,
is_hdmi2_sink);
drm_hdmi_avi_infoframe_content_type(&frame.avi,
conn_state);
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
}
@ -586,7 +590,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@ -727,7 +731,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@ -770,7 +774,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@ -823,7 +827,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@ -856,7 +860,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@ -2048,6 +2052,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}

View File

@ -1049,6 +1049,8 @@ int intel_ring_pin(struct intel_ring *ring,
flags |= PIN_OFFSET_BIAS | offset_bias;
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
if (flags & PIN_MAPPABLE || map == I915_MAP_WC)

View File

@ -1241,8 +1241,17 @@ static const uint64_t skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
return false;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_YUYV:
@ -1258,8 +1267,17 @@ static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
}
}
static bool snb_mod_supported(uint32_t format, uint64_t modifier)
static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
return false;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@ -1276,8 +1294,17 @@ static bool snb_mod_supported(uint32_t format, uint64_t modifier)
}
}
static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
return false;
}
switch (format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ABGR8888:
@ -1299,8 +1326,26 @@ static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
}
}
static bool skl_mod_supported(uint32_t format, uint64_t modifier)
static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
struct intel_plane *plane = to_intel_plane(_plane);
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
break;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
if (!plane->has_ccs)
return false;
break;
default:
return false;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@ -1332,30 +1377,7 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
struct drm_i915_private *dev_priv = to_i915(plane->dev);
if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
return false;
if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
modifier != DRM_FORMAT_MOD_LINEAR)
return false;
if (INTEL_GEN(dev_priv) >= 9)
return skl_mod_supported(format, modifier);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_mod_supported(format, modifier);
else if (INTEL_GEN(dev_priv) >= 6)
return snb_mod_supported(format, modifier);
else
return g4x_mod_supported(format, modifier);
}
static const struct drm_plane_funcs intel_sprite_plane_funcs = {
static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@ -1363,7 +1385,40 @@ static const struct drm_plane_funcs intel_sprite_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = intel_sprite_plane_format_mod_supported,
.format_mod_supported = g4x_sprite_format_mod_supported,
};
static const struct drm_plane_funcs snb_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = snb_sprite_format_mod_supported,
};
static const struct drm_plane_funcs vlv_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = vlv_sprite_format_mod_supported,
};
static const struct drm_plane_funcs skl_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = skl_plane_format_mod_supported,
};
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
@ -1389,6 +1444,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
const struct drm_plane_funcs *plane_funcs;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
const uint64_t *modifiers;
@ -1413,6 +1469,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->can_scale = true;
state->scaler_id = -1;
intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
PLANE_SPRITE0 + plane);
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
intel_plane->get_hw_state = skl_plane_get_hw_state;
@ -1426,10 +1485,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
}
if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
if (intel_plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
modifiers = skl_plane_format_modifiers_noccs;
plane_funcs = &skl_plane_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
@ -1441,6 +1502,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
@ -1457,6 +1520,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &snb_sprite_funcs;
} else {
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
@ -1469,9 +1534,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
plane_funcs = &snb_sprite_funcs;
} else {
plane_formats = g4x_plane_formats;
num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
plane_funcs = &g4x_sprite_funcs;
}
}
@ -1498,14 +1567,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
possible_crtcs, &intel_sprite_plane_funcs,
possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
possible_crtcs, &intel_sprite_plane_funcs,
possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,

View File

@ -94,18 +94,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
vm_unmap_ram(vaddr, mock->npages);
}
static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
return kmap_atomic(mock->pages[page_num]);
}
static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{
kunmap_atomic(addr);
}
static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
@ -130,9 +118,7 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
.map = mock_dmabuf_kmap,
.map_atomic = mock_dmabuf_kmap_atomic,
.unmap = mock_dmabuf_kunmap,
.unmap_atomic = mock_dmabuf_kunmap_atomic,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,

View File

@ -15,6 +15,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/dma-buf.h>
#include <linux/reservation.h>
@ -22,78 +23,37 @@
#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
/*
* mtk specific framebuffer structure.
*
* @fb: drm framebuffer object.
* @gem_obj: array of gem objects.
*/
struct mtk_drm_fb {
struct drm_framebuffer base;
/* For now we only support a single plane */
struct drm_gem_object *gem_obj;
};
#define to_mtk_fb(x) container_of(x, struct mtk_drm_fb, base)
struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb)
{
struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
return mtk_fb->gem_obj;
}
static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle);
}
static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
drm_framebuffer_cleanup(fb);
drm_gem_object_put_unlocked(mtk_fb->gem_obj);
kfree(mtk_fb);
}
static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
.create_handle = mtk_drm_fb_create_handle,
.destroy = mtk_drm_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
};
static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode,
struct drm_gem_object *obj)
{
struct mtk_drm_fb *mtk_fb;
struct drm_framebuffer *fb;
int ret;
if (drm_format_num_planes(mode->pixel_format) != 1)
return ERR_PTR(-EINVAL);
mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL);
if (!mtk_fb)
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
drm_helper_mode_fill_fb_struct(dev, &mtk_fb->base, mode);
drm_helper_mode_fill_fb_struct(dev, fb, mode);
mtk_fb->gem_obj = obj;
fb->obj[0] = obj;
ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs);
ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
kfree(mtk_fb);
kfree(fb);
return ERR_PTR(ret);
}
return mtk_fb;
return fb;
}
/*
@ -110,7 +70,7 @@ int mtk_fb_wait(struct drm_framebuffer *fb)
if (!fb)
return 0;
gem = mtk_fb_get_gem_obj(fb);
gem = fb->obj[0];
if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
return 0;
@ -128,7 +88,7 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
struct mtk_drm_fb *mtk_fb;
struct drm_framebuffer *fb;
struct drm_gem_object *gem;
unsigned int width = cmd->width;
unsigned int height = cmd->height;
@ -151,13 +111,13 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
goto unreference;
}
mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem);
if (IS_ERR(mtk_fb)) {
ret = PTR_ERR(mtk_fb);
fb = mtk_drm_framebuffer_init(dev, cmd, gem);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto unreference;
}
return &mtk_fb->base;
return fb;
unreference:
drm_gem_object_put_unlocked(gem);

View File

@ -14,7 +14,6 @@
#ifndef MTK_DRM_FB_H
#define MTK_DRM_FB_H
struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb);
int mtk_fb_wait(struct drm_framebuffer *fb);
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,

View File

@ -95,11 +95,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
if (!mtk_fb_get_gem_obj(fb)) {
DRM_DEBUG_KMS("buffer is null\n");
return -EFAULT;
}
if (!state->crtc)
return 0;
@ -127,7 +122,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return;
gem = mtk_fb_get_gem_obj(fb);
gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
pitch = fb->pitches[0];

View File

@ -201,7 +201,7 @@ static void blend_setup(struct drm_crtc *crtc)
int idx = idxs[pipe_id];
if (idx > 0) {
const struct mdp_format *format =
to_mdp_format(msm_framebuffer_format(plane->fb));
to_mdp_format(msm_framebuffer_format(plane->state->fb));
alpha[idx-1] = format->alpha_enable;
}
}
@ -665,7 +665,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
plane->crtc = crtc;
return crtc;
}

View File

@ -167,8 +167,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
msm_framebuffer_iova(fb, kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 3));
plane->fb = fb;
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,

View File

@ -1207,7 +1207,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
"unref cursor", unref_cursor_worker);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
plane->crtc = crtc;
return crtc;
}

View File

@ -512,7 +512,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
if (plane_enabled(new_state)) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline =
mdp5_crtc_get_pipeline(plane->crtc);
mdp5_crtc_get_pipeline(new_state->crtc);
int ret;
ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
@ -1029,8 +1029,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_img_w, src_img_h,
src_x + src_w, src_y, src_w, src_h);
plane->fb = fb;
return ret;
}

View File

@ -17,6 +17,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "msm_kms.h"
@ -25,49 +26,20 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
struct drm_gem_object *planes[MAX_PLANE];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
return drm_gem_handle_create(file_priv,
msm_fb->planes[0], handle);
}
static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
drm_framebuffer_cleanup(fb);
for (i = 0; i < n; i++) {
struct drm_gem_object *bo = msm_fb->planes[i];
drm_gem_object_put_unlocked(bo);
}
kfree(msm_fb);
}
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
.create_handle = msm_framebuffer_create_handle,
.destroy = msm_framebuffer_destroy,
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
};
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
@ -77,7 +49,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
msm_gem_describe(msm_fb->planes[i], m);
msm_gem_describe(fb->obj[i], m);
}
}
#endif
@ -90,12 +62,11 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
uint64_t iova;
for (i = 0; i < n; i++) {
ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@ -107,26 +78,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
msm_gem_put_iova(msm_fb->planes[i], aspace);
msm_gem_put_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
if (!fb->obj[plane])
return 0;
return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
return msm_fb->planes[plane];
return drm_gem_fb_get_obj(fb, plane);
}
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
@ -202,7 +170,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
if (n > ARRAY_SIZE(msm_fb->planes)) {
if (n > ARRAY_SIZE(fb->obj)) {
ret = -EINVAL;
goto fail;
}
@ -221,7 +189,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
goto fail;
}
msm_fb->planes[i] = bos[i];
msm_fb->base.obj[i] = bos[i];
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);

View File

@ -19,6 +19,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@ -51,9 +52,6 @@ static const u32 formats[] = {
/* per-plane info for the fb: */
struct plane {
struct drm_gem_object *bo;
u32 pitch;
u32 offset;
dma_addr_t dma_addr;
};
@ -68,56 +66,28 @@ struct omap_framebuffer {
struct mutex lock;
};
static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
return drm_gem_handle_create(file_priv,
omap_fb->planes[0].bo, handle);
}
static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int i, n = fb->format->num_planes;
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
drm_framebuffer_cleanup(fb);
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
drm_gem_object_unreference_unlocked(plane->bo);
}
kfree(omap_fb);
}
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.create_handle = omap_framebuffer_create_handle,
.destroy = omap_framebuffer_destroy,
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
};
static u32 get_linear_addr(struct plane *plane,
static u32 get_linear_addr(struct drm_framebuffer *fb,
const struct drm_format_info *format, int n, int x, int y)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
struct plane *plane = &omap_fb->planes[n];
u32 offset;
offset = plane->offset
offset = fb->offsets[n]
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
+ (y * plane->pitch / (n == 0 ? 1 : format->vsub));
+ (y * fb->pitches[n] / (n == 0 ? 1 : format->vsub));
return plane->dma_addr + offset;
}
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
struct plane *plane = &omap_fb->planes[0];
return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@ -176,7 +146,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@ -201,12 +171,12 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x += w - 1;
/* Note: x and y are in TILER units, not pixels */
omap_gem_rotated_dma_addr(plane->bo, orient, x, y,
omap_gem_rotated_dma_addr(fb->obj[0], orient, x, y,
&info->paddr);
info->rotation_type = OMAP_DSS_ROT_TILER;
info->rotation = state->rotation ?: DRM_MODE_ROTATE_0;
/* Note: stride in TILER units, not pixels */
info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
info->screen_width = omap_gem_tiled_stride(fb->obj[0], orient);
} else {
switch (state->rotation & DRM_MODE_ROTATE_MASK) {
case 0:
@ -221,10 +191,10 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
break;
}
info->paddr = get_linear_addr(plane, format, 0, x, y);
info->paddr = get_linear_addr(fb, format, 0, x, y);
info->rotation_type = OMAP_DSS_ROT_NONE;
info->rotation = DRM_MODE_ROTATE_0;
info->screen_width = plane->pitch;
info->screen_width = fb->pitches[0];
}
/* convert to pixels: */
@ -234,11 +204,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
plane = &omap_fb->planes[1];
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
omap_gem_rotated_dma_addr(plane->bo, orient, x/2, y/2,
WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
info->p_uv_addr = get_linear_addr(fb, format, 1, x, y);
}
} else {
info->p_uv_addr = 0;
@ -261,10 +231,10 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
ret = omap_gem_pin(plane->bo, &plane->dma_addr);
ret = omap_gem_pin(fb->obj[i], &plane->dma_addr);
if (ret)
goto fail;
omap_gem_dma_sync_buffer(plane->bo, DMA_TO_DEVICE);
omap_gem_dma_sync_buffer(fb->obj[i], DMA_TO_DEVICE);
}
omap_fb->pin_count++;
@ -276,7 +246,7 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
fail:
for (i--; i >= 0; i--) {
struct plane *plane = &omap_fb->planes[i];
omap_gem_unpin(plane->bo);
omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
@ -302,54 +272,25 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
omap_gem_unpin(plane->bo);
omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
}
/* iterate thru all the connectors, returning ones that are attached
* to the same fb..
*/
struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_framebuffer *fb, struct drm_connector *from)
{
struct drm_device *dev = fb->dev;
struct list_head *connector_list = &dev->mode_config.connector_list;
struct drm_connector *connector = from;
if (!from)
return list_first_entry_or_null(connector_list, typeof(*from),
head);
list_for_each_entry_from(connector, connector_list, head) {
if (connector != from) {
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
if (crtc && crtc->primary->fb == fb)
return connector;
}
}
return NULL;
}
#ifdef CONFIG_DEBUG_FS
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->format->format);
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, plane->offset, plane->pitch);
omap_gem_describe(plane->bo, m);
i, fb->offsets[n], fb->pitches[i]);
omap_gem_describe(fb->obj[i], m);
}
}
#endif
@ -454,9 +395,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
goto fail;
}
plane->bo = bos[i];
plane->offset = mode_cmd->offsets[i];
plane->pitch = pitch;
fb->obj[i] = bos[i];
plane->dma_addr = 0;
}

View File

@ -38,8 +38,6 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct drm_plane_state *state, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_framebuffer *fb, struct drm_connector *from);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);

View File

@ -148,8 +148,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
.map_atomic = omap_gem_dmabuf_kmap_atomic,
.unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
.map = omap_gem_dmabuf_kmap,
.unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,

View File

@ -292,7 +292,6 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
err);
drm_panel_detach(&innolux->base);
innolux_panel_del(innolux);
return 0;

View File

@ -500,7 +500,6 @@ static int jdi_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
ret);
drm_panel_detach(&jdi->base);
jdi_panel_del(jdi);
return 0;

View File

@ -282,7 +282,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
{
struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
drm_panel_detach(&lvds->panel);
drm_panel_remove(&lvds->panel);
panel_lvds_disable(&lvds->panel);

View File

@ -14,8 +14,6 @@
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#define DRV_NAME "orisetech_otm8009a"
#define OTM8009A_BACKLIGHT_DEFAULT 240
#define OTM8009A_BACKLIGHT_MAX 255
@ -98,6 +96,20 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
DRM_WARN("mipi dsi dcs write buffer failed\n");
}
static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
size_t len)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
/* data will be sent in dsi hs mode (ie. no lpm) */
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
otm8009a_dcs_write_buf(ctx, data, len);
/* restore back the dsi lpm mode */
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
}
#define dcs_write_seq(ctx, seq...) \
({ \
static const u8 d[] = { seq }; \
@ -248,11 +260,7 @@ static int otm8009a_disable(struct drm_panel *panel)
if (!ctx->enabled)
return 0; /* This is not an issue so we return 0 here */
/* Power off the backlight. Note: end-user still controls brightness */
ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
ret = backlight_update_status(ctx->bl_dev);
if (ret)
return ret;
backlight_disable(ctx->bl_dev);
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret)
@ -316,13 +324,6 @@ static int otm8009a_prepare(struct drm_panel *panel)
ctx->prepared = true;
/*
* Power on the backlight. Note: end-user still controls brightness
* Note: ctx->prepared must be true before updating the backlight.
*/
ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
backlight_update_status(ctx->bl_dev);
return 0;
}
@ -330,6 +331,11 @@ static int otm8009a_enable(struct drm_panel *panel)
{
struct otm8009a *ctx = panel_to_otm8009a(panel);
if (ctx->enabled)
return 0;
backlight_enable(ctx->bl_dev);
ctx->enabled = true;
return 0;
@ -387,7 +393,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
*/
data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
data[1] = bd->props.brightness;
otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
/* set Brightness Control & Backlight on */
data[1] = 0x24;
@ -399,7 +405,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
/* Update Brightness Control & Backlight */
data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
return 0;
}
@ -444,11 +450,14 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->panel.dev = dev;
ctx->panel.funcs = &otm8009a_drm_funcs;
ctx->bl_dev = backlight_device_register(DRV_NAME "_backlight", dev, ctx,
&otm8009a_backlight_ops, NULL);
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dsi->host->dev, ctx,
&otm8009a_backlight_ops,
NULL);
if (IS_ERR(ctx->bl_dev)) {
dev_err(dev, "failed to register backlight device\n");
return PTR_ERR(ctx->bl_dev);
ret = PTR_ERR(ctx->bl_dev);
dev_err(dev, "failed to register backlight: %d\n", ret);
return ret;
}
ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
@ -466,11 +475,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
return ret;
}
DRM_INFO(DRV_NAME "_panel %ux%u@%u %ubpp dsi %udl - ready\n",
default_mode.hdisplay, default_mode.vdisplay,
default_mode.vrefresh,
mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
return 0;
}
@ -481,8 +485,6 @@ static int otm8009a_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
backlight_device_unregister(ctx->bl_dev);
return 0;
}
@ -496,7 +498,7 @@ static struct mipi_dsi_driver orisetech_otm8009a_driver = {
.probe = otm8009a_probe,
.remove = otm8009a_remove,
.driver = {
.name = DRV_NAME "_panel",
.name = "panel-orisetech-otm8009a",
.of_match_table = orisetech_otm8009a_of_match,
},
};

View File

@ -299,7 +299,6 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
drm_panel_detach(&wuxga_nt->base);
wuxga_nt_panel_del(wuxga_nt);
return 0;

View File

@ -292,7 +292,6 @@ static int seiko_panel_remove(struct platform_device *pdev)
{
struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
seiko_panel_disable(&panel->base);

View File

@ -418,7 +418,6 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
drm_panel_detach(&sharp->base);
sharp_panel_del(sharp);
return 0;

View File

@ -327,7 +327,6 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
drm_panel_detach(&sharp_nt->base);
sharp_nt_panel_del(sharp_nt);
return 0;

View File

@ -364,7 +364,6 @@ static int panel_simple_remove(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
panel_simple_disable(&panel->base);
@ -581,6 +580,34 @@ static const struct panel_desc auo_b133htn01 = {
},
};
static const struct display_timing auo_g070vvn01_timings = {
.pixelclock = { 33300000, 34209000, 45000000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 20, 40, 200 },
.hback_porch = { 87, 40, 1 },
.hsync_len = { 1, 48, 87 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 5, 13, 200 },
.vback_porch = { 31, 31, 29 },
.vsync_len = { 1, 1, 3 },
};
static const struct panel_desc auo_g070vvn01 = {
.timings = &auo_g070vvn01_timings,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 152,
.height = 91,
},
.delay = {
.prepare = 200,
.enable = 50,
.disable = 50,
.unprepare = 1000,
},
};
static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000,
.hdisplay = 800,
@ -687,7 +714,7 @@ static const struct panel_desc auo_p320hvn03 = {
.enable = 450,
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@ -1217,6 +1244,30 @@ static const struct panel_desc innolux_n156bge_l21 = {
},
};
static const struct drm_display_mode innolux_tv123wam_mode = {
.clock = 206016,
.hdisplay = 2160,
.hsync_start = 2160 + 48,
.hsync_end = 2160 + 48 + 32,
.htotal = 2160 + 48 + 32 + 80,
.vdisplay = 1440,
.vsync_start = 1440 + 3,
.vsync_end = 1440 + 3 + 10,
.vtotal = 1440 + 3 + 10 + 27,
.vrefresh = 60,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct panel_desc innolux_tv123wam = {
.modes = &innolux_tv123wam_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 259,
.height = 173,
},
};
static const struct drm_display_mode innolux_zj070na_01p_mode = {
.clock = 51501,
.hdisplay = 1024,
@ -1247,8 +1298,8 @@ static const struct display_timing koe_tx31d200vm0baa_timing = {
.hback_porch = { 16, 36, 56 },
.hsync_len = { 8, 8, 8 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 6, 21, 33.5 },
.vback_porch = { 6, 21, 33.5 },
.vfront_porch = { 6, 21, 33 },
.vback_porch = { 6, 21, 33 },
.vsync_len = { 8, 8, 8 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
@ -2094,6 +2145,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
@ -2169,6 +2223,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
.compatible = "innolux,tv123wam",
.data = &innolux_tv123wam,
}, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,

View File

@ -419,7 +419,6 @@ static int st7789v_remove(struct spi_device *spi)
{
struct st7789v *ctx = spi_get_drvdata(spi);
drm_panel_detach(&ctx->panel);
drm_panel_remove(&ctx->panel);
if (ctx->backlight)

View File

@ -792,7 +792,6 @@ err_config_video:
int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
{
u32 val;
int ret;
ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
@ -801,11 +800,7 @@ int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
return ret;
}
val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
val |= SPDIF_FIFO_MID_RANGE(0xe0);
val |= SPDIF_JITTER_THRSH(0xe0);
val |= SPDIF_JITTER_AVG_WIN(7);
writel(val, dp->regs + SPDIF_CTRL_ADDR);
writel(0, dp->regs + SPDIF_CTRL_ADDR);
/* clearn the audio config and reset */
writel(0, dp->regs + AUDIO_SRC_CNTL);
@ -929,12 +924,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
{
u32 val;
val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
val |= SPDIF_FIFO_MID_RANGE(0xe0);
val |= SPDIF_JITTER_THRSH(0xe0);
val |= SPDIF_JITTER_AVG_WIN(7);
writel(val, dp->regs + SPDIF_CTRL_ADDR);
writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
@ -942,9 +931,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
val |= SPDIF_FIFO_MID_RANGE(0xe0);
val |= SPDIF_JITTER_THRSH(0xe0);
val |= SPDIF_JITTER_AVG_WIN(7);
writel(val, dp->regs + SPDIF_CTRL_ADDR);
clk_prepare_enable(dp->spdif_clk);

View File

@ -18,52 +18,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_psr.h"
#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
struct rockchip_drm_fb {
struct drm_framebuffer fb;
struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
};
struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
if (plane >= ROCKCHIP_MAX_FB_BUFFER)
return NULL;
return rk_fb->obj[plane];
}
static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
int i;
for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(rockchip_fb);
}
static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
return drm_gem_handle_create(file_priv,
rockchip_fb->obj[0], handle);
}
static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int flags, unsigned int color,
@ -75,46 +36,45 @@ static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
}
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
.destroy = rockchip_drm_fb_destroy,
.create_handle = rockchip_drm_fb_create_handle,
.dirty = rockchip_drm_fb_dirty,
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
.dirty = rockchip_drm_fb_dirty,
};
static struct rockchip_drm_fb *
static struct drm_framebuffer *
rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes)
{
struct rockchip_drm_fb *rockchip_fb;
struct drm_framebuffer *fb;
int ret;
int i;
rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
if (!rockchip_fb)
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
drm_helper_mode_fill_fb_struct(dev, &rockchip_fb->fb, mode_cmd);
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
for (i = 0; i < num_planes; i++)
rockchip_fb->obj[i] = obj[i];
fb->obj[i] = obj[i];
ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
&rockchip_drm_fb_funcs);
ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize framebuffer: %d\n",
ret);
kfree(rockchip_fb);
kfree(fb);
return ERR_PTR(ret);
}
return rockchip_fb;
return fb;
}
static struct drm_framebuffer *
rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct rockchip_drm_fb *rockchip_fb;
struct drm_framebuffer *fb;
struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
struct drm_gem_object *obj;
unsigned int hsub;
@ -153,13 +113,13 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
objs[i] = obj;
}
rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
if (IS_ERR(rockchip_fb)) {
ret = PTR_ERR(rockchip_fb);
fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto err_gem_object_unreference;
}
return &rockchip_fb->fb;
return fb;
err_gem_object_unreference:
for (i--; i >= 0; i--)
@ -242,13 +202,13 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
struct rockchip_drm_fb *rockchip_fb;
struct drm_framebuffer *fb;
rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
if (IS_ERR(rockchip_fb))
return ERR_CAST(rockchip_fb);
fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
if (IS_ERR(fb))
return ERR_CAST(fb);
return &rockchip_fb->fb;
return fb;
}
void rockchip_drm_mode_config_init(struct drm_device *dev)

View File

@ -22,7 +22,4 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
void rockchip_drm_mode_config_init(struct drm_device *dev);
struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
#endif /* _ROCKCHIP_DRM_FB_H */

View File

@ -486,6 +486,31 @@ static void vop_line_flag_irq_disable(struct vop *vop)
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
static int vop_core_clks_enable(struct vop *vop)
{
int ret;
ret = clk_enable(vop->hclk);
if (ret < 0)
return ret;
ret = clk_enable(vop->aclk);
if (ret < 0)
goto err_disable_hclk;
return 0;
err_disable_hclk:
clk_disable(vop->hclk);
return ret;
}
static void vop_core_clks_disable(struct vop *vop)
{
clk_disable(vop->aclk);
clk_disable(vop->hclk);
}
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
@ -497,17 +522,13 @@ static int vop_enable(struct drm_crtc *crtc)
return ret;
}
ret = clk_enable(vop->hclk);
ret = vop_core_clks_enable(vop);
if (WARN_ON(ret < 0))
goto err_put_pm_runtime;
ret = clk_enable(vop->dclk);
if (WARN_ON(ret < 0))
goto err_disable_hclk;
ret = clk_enable(vop->aclk);
if (WARN_ON(ret < 0))
goto err_disable_dclk;
goto err_disable_core;
/*
* Slave iommu shares power, irq and clock with vop. It was associated
@ -519,7 +540,7 @@ static int vop_enable(struct drm_crtc *crtc)
if (ret) {
DRM_DEV_ERROR(vop->dev,
"failed to attach dma mapping, %d\n", ret);
goto err_disable_aclk;
goto err_disable_dclk;
}
spin_lock(&vop->reg_lock);
@ -552,18 +573,14 @@ static int vop_enable(struct drm_crtc *crtc)
spin_unlock(&vop->reg_lock);
enable_irq(vop->irq);
drm_crtc_vblank_on(crtc);
return 0;
err_disable_aclk:
clk_disable(vop->aclk);
err_disable_dclk:
clk_disable(vop->dclk);
err_disable_hclk:
clk_disable(vop->hclk);
err_disable_core:
vop_core_clks_disable(vop);
err_put_pm_runtime:
pm_runtime_put_sync(vop->dev);
return ret;
@ -599,8 +616,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
vop_dsp_hold_valid_irq_disable(vop);
disable_irq(vop->irq);
vop->is_enabled = false;
/*
@ -609,8 +624,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
clk_disable(vop->dclk);
clk_disable(vop->aclk);
clk_disable(vop->hclk);
vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
mutex_unlock(&vop->vop_lock);
@ -728,7 +742,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
return;
}
obj = rockchip_fb_get_gem_obj(fb, 0);
obj = fb->obj[0];
rk_obj = to_rockchip_obj(obj);
actual_w = drm_rect_width(src) >> 16;
@ -758,7 +772,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
int bpp = fb->format->cpp[1];
uv_obj = rockchip_fb_get_gem_obj(fb, 1);
uv_obj = fb->obj[1];
rk_uv_obj = to_rockchip_obj(uv_obj);
offset = (src->x1 >> 16) * bpp / hsub;
@ -1177,6 +1191,18 @@ static irqreturn_t vop_isr(int irq, void *data)
uint32_t active_irqs;
int ret = IRQ_NONE;
/*
* The irq is shared with the iommu. If the runtime-pm state of the
* vop-device is disabled the irq has to be targeted at the iommu.
*/
if (!pm_runtime_get_if_in_use(vop->dev))
return IRQ_NONE;
if (vop_core_clks_enable(vop)) {
DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
goto out;
}
/*
* interrupt register has interrupt status, enable and clear bits, we
* must hold irq_lock to avoid a race with enable/disable_vblank().
@ -1192,7 +1218,7 @@ static irqreturn_t vop_isr(int irq, void *data)
/* This is expected for vop iommu irqs, since the irq is shared */
if (!active_irqs)
return IRQ_NONE;
goto out_disable;
if (active_irqs & DSP_HOLD_VALID_INTR) {
complete(&vop->dsp_hold_completion);
@ -1218,6 +1244,10 @@ static irqreturn_t vop_isr(int irq, void *data)
DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
active_irqs);
out_disable:
vop_core_clks_disable(vop);
out:
pm_runtime_put(vop->dev);
return ret;
}
@ -1596,9 +1626,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_disable_pm_runtime;
/* IRQ is initially disabled; it gets enabled in power_on */
disable_irq(vop->irq);
return 0;
err_disable_pm_runtime:

View File

@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
of_property_read_u32(endpoint, "reg", &endpoint_id);
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
&lvds->panel, &lvds->bridge);
if (!ret)
if (!ret) {
of_node_put(endpoint);
break;
}
}
if (!child_count) {
DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
@ -446,14 +448,12 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_free_connector;
}
} else {
lvds->bridge->encoder = encoder;
ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
goto err_free_encoder;
}
encoder->bridge = lvds->bridge;
}
pm_runtime_enable(dev);

View File

@ -19,7 +19,9 @@ selftest(align64, igt_align64)
selftest(evict, igt_evict)
selftest(evict_range, igt_evict_range)
selftest(bottomup, igt_bottomup)
selftest(lowest, igt_lowest)
selftest(topdown, igt_topdown)
selftest(highest, igt_highest)
selftest(color, igt_color)
selftest(color_evict, igt_color_evict)
selftest(color_evict_range, igt_color_evict_range)

View File

@ -1825,6 +1825,77 @@ err:
return ret;
}
static int __igt_once(unsigned int mode)
{
struct drm_mm mm;
struct drm_mm_node rsvd_lo, rsvd_hi, node;
int err;
drm_mm_init(&mm, 0, 7);
memset(&rsvd_lo, 0, sizeof(rsvd_lo));
rsvd_lo.start = 1;
rsvd_lo.size = 1;
err = drm_mm_reserve_node(&mm, &rsvd_lo);
if (err) {
pr_err("Could not reserve low node\n");
goto err;
}
memset(&rsvd_hi, 0, sizeof(rsvd_hi));
rsvd_hi.start = 5;
rsvd_hi.size = 1;
err = drm_mm_reserve_node(&mm, &rsvd_hi);
if (err) {
pr_err("Could not reserve low node\n");
goto err_lo;
}
if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
pr_err("Expected a hole after lo and high nodes!\n");
err = -EINVAL;
goto err_hi;
}
memset(&node, 0, sizeof(node));
err = drm_mm_insert_node_generic(&mm, &node,
2, 0, 0,
mode | DRM_MM_INSERT_ONCE);
if (!err) {
pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
node.start);
err = -EINVAL;
goto err_node;
}
err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
if (err) {
pr_err("Could not insert the node into the available hole!\n");
err = -EINVAL;
goto err_hi;
}
err_node:
drm_mm_remove_node(&node);
err_hi:
drm_mm_remove_node(&rsvd_hi);
err_lo:
drm_mm_remove_node(&rsvd_lo);
err:
drm_mm_takedown(&mm);
return err;
}
static int igt_lowest(void *ignored)
{
return __igt_once(DRM_MM_INSERT_LOW);
}
static int igt_highest(void *ignored)
{
return __igt_once(DRM_MM_INSERT_HIGH);
}
static void separate_adjacent_colors(const struct drm_mm_node *node,
unsigned long color,
u64 *start,

View File

@ -211,7 +211,11 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
struct drm_crtc *crtc = drm_plane->crtc;
struct drm_crtc *crtc;
drm_modeset_lock(&drm_plane->mutex, NULL);
crtc = drm_plane->state->crtc;
drm_modeset_unlock(&drm_plane->mutex);
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&gdp->plane), gdp->regs);

View File

@ -1040,7 +1040,7 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
return 0;
}
static int sun6i_dsi_runtime_resume(struct device *dev)
static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
@ -1069,7 +1069,7 @@ static int sun6i_dsi_runtime_resume(struct device *dev)
return 0;
}
static int sun6i_dsi_runtime_suspend(struct device *dev)
static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);

View File

@ -582,18 +582,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
return 0;
}
static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
unsigned long page)
{
return NULL;
}
static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
unsigned long page,
void *addr)
{
}
static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
{
return NULL;
@ -634,8 +622,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.release = tegra_gem_prime_release,
.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
.end_cpu_access = tegra_gem_prime_end_cpu_access,
.map_atomic = tegra_gem_prime_kmap_atomic,
.unmap_atomic = tegra_gem_prime_kunmap_atomic,
.map = tegra_gem_prime_kmap,
.unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,

View File

@ -29,7 +29,6 @@ struct udl_drm_dmabuf_attachment {
};
static int udl_attach_dma_buf(struct dma_buf *dmabuf,
struct device *dev,
struct dma_buf_attachment *attach)
{
struct udl_drm_dmabuf_attachment *udl_attach;
@ -158,27 +157,12 @@ static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
return NULL;
}
static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
/* TODO */
return NULL;
}
static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
/* TODO */
}
static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num,
void *addr)
{
/* TODO */
}
static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
struct vm_area_struct *vma)
{
@ -193,9 +177,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
.map_dma_buf = udl_map_dma_buf,
.unmap_dma_buf = udl_unmap_dma_buf,
.map = udl_dmabuf_kmap,
.map_atomic = udl_dmabuf_kmap_atomic,
.unmap = udl_dmabuf_kunmap,
.unmap_atomic = udl_dmabuf_kunmap_atomic,
.mmap = udl_dmabuf_mmap,
.release = drm_gem_dmabuf_release,
};

View File

@ -16,6 +16,7 @@
#include <linux/usb.h>
#include <drm/drm_gem.h>
#include <linux/mm_types.h>
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
@ -136,7 +137,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int udl_gem_fault(struct vm_fault *vmf);
vm_fault_t udl_gem_fault(struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height);

View File

@ -100,13 +100,12 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
int udl_gem_fault(struct vm_fault *vmf)
vm_fault_t udl_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
struct page *page;
unsigned int page_offset;
int ret = 0;
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
@ -114,17 +113,7 @@ int udl_gem_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
ret = vm_insert_page(vma, vmf->address, page);
switch (ret) {
case -EAGAIN:
case 0:
case -ERESTARTSYS:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
}
return vmf_insert_page(vma, vmf->address, page);
}
int udl_gem_get_pages(struct udl_gem_object *obj)

View File

@ -114,8 +114,8 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
v3d_invalidate_caches(v3d);
fence = v3d_fence_create(v3d, q);
if (!fence)
return fence;
if (IS_ERR(fence))
return NULL;
if (job->done_fence)
dma_fence_put(job->done_fence);

View File

@ -862,7 +862,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
* is released.
*/
drm_atomic_set_fb_for_plane(plane->state, fb);
plane->fb = fb;
vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
vc4_async_page_flip_complete);
@ -1057,7 +1056,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
&vc4_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
primary_plane->crtc = crtc;
vc4_crtc->channel = vc4_crtc->data->hvs_channel;
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
@ -1093,7 +1091,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
if (!IS_ERR(cursor_plane)) {
cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc);
cursor_plane->crtc = crtc;
crtc->cursor = cursor_plane;
}

View File

@ -467,12 +467,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
struct drm_framebuffer *fb = state->fb;
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
int num_planes = drm_format_num_planes(format->drm);
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
u32 lbm_size, tiling;
unsigned long irqflags;
u32 hvs_format = format->hvs;
int ret, i;
ret = vc4_plane_setup_clipping_and_scaling(state);
@ -512,7 +514,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
scl1 = vc4_get_scl_field(state, 0);
}
switch (fb->modifier) {
switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR:
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
@ -535,6 +537,49 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
break;
}
case DRM_FORMAT_MOD_BROADCOM_SAND64:
case DRM_FORMAT_MOD_BROADCOM_SAND128:
case DRM_FORMAT_MOD_BROADCOM_SAND256: {
uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
/* Column-based NV12 or RGBA.
*/
if (fb->format->num_planes > 1) {
if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) {
DRM_DEBUG_KMS("SAND format only valid for NV12/21");
return -EINVAL;
}
hvs_format = HVS_PIXEL_FORMAT_H264;
} else {
if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) {
DRM_DEBUG_KMS("SAND256 format only valid for H.264");
return -EINVAL;
}
}
switch (base_format_mod) {
case DRM_FORMAT_MOD_BROADCOM_SAND64:
tiling = SCALER_CTL0_TILING_64B;
break;
case DRM_FORMAT_MOD_BROADCOM_SAND128:
tiling = SCALER_CTL0_TILING_128B;
break;
case DRM_FORMAT_MOD_BROADCOM_SAND256:
tiling = SCALER_CTL0_TILING_256B_OR_T;
break;
default:
break;
}
if (param > SCALER_TILE_HEIGHT_MASK) {
DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
return -EINVAL;
}
pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
break;
}
default:
DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
(long long)fb->modifier);
@ -544,8 +589,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
(format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
(hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
(vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
@ -607,8 +653,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Pitch word 1/2 */
for (i = 1; i < num_planes; i++) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
if (hvs_format != HVS_PIXEL_FORMAT_H264) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(fb->pitches[i],
SCALER_SRC_PITCH));
} else {
vc4_dlist_write(vc4_state, pitch0);
}
}
/* Colorspace conversion words */
@ -810,18 +861,21 @@ static int vc4_prepare_fb(struct drm_plane *plane,
struct dma_fence *fence;
int ret;
if ((plane->state->fb == state->fb) || !state->fb)
if (!state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
fence = reservation_object_get_excl_rcu(bo->resv);
drm_atomic_set_fence_for_plane(state, fence);
if (plane->state->fb == state->fb)
return 0;
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
fence = reservation_object_get_excl_rcu(bo->resv);
drm_atomic_set_fence_for_plane(state, fence);
return 0;
}
@ -866,13 +920,32 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
case DRM_FORMAT_BGR565:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
return true;
switch (fourcc_mod_broadcom_mod(modifier)) {
case DRM_FORMAT_MOD_LINEAR:
case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
case DRM_FORMAT_MOD_BROADCOM_SAND64:
case DRM_FORMAT_MOD_BROADCOM_SAND128:
return true;
default:
return false;
}
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
switch (fourcc_mod_broadcom_mod(modifier)) {
case DRM_FORMAT_MOD_LINEAR:
case DRM_FORMAT_MOD_BROADCOM_SAND64:
case DRM_FORMAT_MOD_BROADCOM_SAND128:
case DRM_FORMAT_MOD_BROADCOM_SAND256:
return true;
default:
return false;
}
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
default:
return (modifier == DRM_FORMAT_MOD_LINEAR);
}
@ -900,6 +973,9 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
unsigned i;
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
DRM_FORMAT_MOD_BROADCOM_SAND128,
DRM_FORMAT_MOD_BROADCOM_SAND64,
DRM_FORMAT_MOD_BROADCOM_SAND256,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};

View File

@ -1031,6 +1031,12 @@ enum hvs_pixel_format {
#define SCALER_SRC_PITCH_MASK VC4_MASK(15, 0)
#define SCALER_SRC_PITCH_SHIFT 0
/* PITCH0/1/2 fields for tiled (SAND). */
#define SCALER_TILE_SKIP_0_MASK VC4_MASK(18, 16)
#define SCALER_TILE_SKIP_0_SHIFT 16
#define SCALER_TILE_HEIGHT_MASK VC4_MASK(15, 0)
#define SCALER_TILE_HEIGHT_SHIFT 0
/* PITCH0 fields for T-tiled. */
#define SCALER_PITCH0_TILE_WIDTH_L_MASK VC4_MASK(22, 16)
#define SCALER_PITCH0_TILE_WIDTH_L_SHIFT 16

View File

@ -61,13 +61,13 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
kfree(vgem_obj);
}
static int vgem_gem_fault(struct vm_fault *vmf)
static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
int ret;
vm_fault_t ret = VM_FAULT_SIGBUS;
loff_t num_pages;
pgoff_t page_offset;
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
@ -77,7 +77,6 @@ static int vgem_gem_fault(struct vm_fault *vmf)
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
ret = -ENOENT;
mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);

View File

@ -28,6 +28,7 @@
#include "virtgpu_drv.h"
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#define XRES_MIN 32
#define YRES_MIN 32
@ -48,16 +49,6 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct virtio_gpu_framebuffer *virtio_gpu_fb
= to_virtio_gpu_framebuffer(fb);
drm_gem_object_put_unlocked(virtio_gpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(virtio_gpu_fb);
}
static int
virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@ -71,20 +62,9 @@ virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
}
static int
virtio_gpu_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct virtio_gpu_framebuffer *virtio_gpu_fb =
to_virtio_gpu_framebuffer(fb);
return drm_gem_handle_create(file_priv, virtio_gpu_fb->obj, handle);
}
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
.create_handle = virtio_gpu_framebuffer_create_handle,
.destroy = virtio_gpu_user_framebuffer_destroy,
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
.dirty = virtio_gpu_framebuffer_surface_dirty,
};
@ -97,7 +77,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
int ret;
struct virtio_gpu_object *bo;
vgfb->obj = obj;
vgfb->base.obj[0] = obj;
bo = gem_to_virtio_gpu_obj(obj);
@ -105,7 +85,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
vgfb->obj = NULL;
vgfb->base.obj[0] = NULL;
return ret;
}
@ -302,8 +282,6 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
primary->crtc = crtc;
cursor->crtc = crtc;
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);

View File

@ -124,7 +124,6 @@ struct virtio_gpu_output {
struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
uint32_t hw_res_handle;

Some files were not shown because too many files have changed in this diff Show More