drm-misc-next for 5.1:

UAPI Changes:
   - New fourcc for P010 and P016 formats
 
 Cross-subsystem Changes:
 
 Core Changes:
   - Removal of drm_calc_{h,v}scale_relaxed
   - A few fixes for DP-MST
 
 Driver Changes:
   - More drmP.h cleanups
   - A bunch of vkms fixes
   - Conversion of the Cadence DSI bridge and Allwinner DSI driver to the
     generic phy MIPI-DPHY API
   - New panel: Innolux EE101IA-01D
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCXGFFqAAKCRDj7w1vZxhR
 xf6VAQCWMLZ/y+H4hxlO2HmpeauZnvY1T1p6txNhEAT5fcKGygEAkonFdY1JDW6+
 MGv6rERz7/iKHQ/NM0zHzr3xMRr6yAw=
 =rdbv
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2019-02-11' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.1:

UAPI Changes:
  - New fourcc for P010 and P016 formats

Cross-subsystem Changes:

Core Changes:
  - Removal of drm_calc_{h,v}scale_relaxed
  - A few fixes for DP-MST

Driver Changes:
  - More drmP.h cleanups
  - A bunch of vkms fixes
  - Conversion of the Cadence DSI bridge and Allwinner DSI driver to the
    generic phy MIPI-DPHY API
  - New panel: Innolux EE101IA-01D

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>

From: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190211095220.3oeodszr2dgxrwqq@flea
This commit is contained in:
Daniel Vetter 2019-02-14 14:07:17 +01:00
commit 8d451a4b6e
76 changed files with 867 additions and 879 deletions

View File

@ -0,0 +1,7 @@
Innolux Corporation 10.1" EE101IA-01D WXGA (1280x800) LVDS panel
Required properties:
- compatible: should be "innolux,ee101ia-01d"
This binding is compatible with the lvds-panel binding, which is specified
in panel-lvds.txt in this directory.

View File

@ -238,6 +238,14 @@ DRM specific patterns. Note that ENOTTY has the slightly unintuitive meaning of
Testing and validation
======================
Testing Requirements for userspace API
--------------------------------------
New cross-driver userspace interface extensions, like new IOCTL, new KMS
properties, new files in sysfs or anything else that constitutes an API change
should have driver-agnostic testcases in IGT for that feature, if such a test
can be reasonably made using IGT for the target hardware.
Validating changes with IGT
---------------------------

View File

@ -398,10 +398,6 @@ KMS cleanups
Some of these date from the very introduction of KMS in 2008 ...
- drm_display_mode doesn't need to be derived from drm_mode_object. That's
leftovers from older (never merged into upstream) KMS designs where modes
where set using their ID, including support to add/remove modes.
- Make ->funcs and ->helper_private vtables optional. There's a bunch of empty
function tables in drivers, but before we can remove them we need to make sure
that all the users in helpers and drivers do correctly check for a NULL

View File

@ -23,17 +23,6 @@ CRC API Improvements
- Add igt test to check extreme alpha values i.e. fully opaque and fully
transparent (intermediate values are affected by hw-specific rounding modes).
Vblank issues
-------------
Some IGT test cases are failing. Need to analyze why and fix the issues:
- plain-flip-fb-recreate
- plain-flip-ts-check
- flip-vs-blocking-wf-vblank
- plain-flip-fb-recreate-interruptible
- flip-vs-wf_vblank-interruptible
Runtime Configuration
---------------------

View File

@ -5,12 +5,16 @@
*
*/
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/pm_runtime.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "komeda_dev.h"
#include "komeda_kms.h"

View File

@ -4,9 +4,13 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <drm/drm_print.h>
#include "komeda_dev.h"
static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)

View File

@ -4,10 +4,12 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "komeda_framebuffer.h"
#include "komeda_dev.h"

View File

@ -5,15 +5,19 @@
*
*/
#include <linux/component.h>
#include <linux/interrupt.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <linux/interrupt.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
#include "komeda_framebuffer.h"
#include "komeda_kms.h"
DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops);

View File

@ -10,6 +10,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
/** struct komeda_plane - komeda instance of drm_plane */

View File

@ -4,6 +4,8 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_pipeline.h"

View File

@ -442,5 +442,6 @@ int bochs_gem_prime_mmap(struct drm_gem_object *obj,
{
struct bochs_bo *bo = gem_to_bochs_bo(obj);
return ttm_fbdev_mmap(vma, &bo->bo);
bo->gem.vma_node.vm_node.start = bo->bo.vma_node.vm_node.start;
return drm_gem_prime_mmap(obj, vma);
}

View File

@ -30,6 +30,7 @@ config DRM_CDNS_DSI
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
select GENERIC_PHY_MIPI_DPHY
depends on OF
help
Support Cadence DPI to DSI bridge. This is an internal

View File

@ -23,6 +23,9 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#define IP_CONF 0x0
#define SP_HS_FIFO_DEPTH(x) (((x) & GENMASK(30, 26)) >> 26)
#define SP_LP_FIFO_DEPTH(x) (((x) & GENMASK(25, 21)) >> 21)
@ -421,44 +424,11 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
#define REG_WAKEUP_TIME_NS 800
#define DPHY_PLL_RATE_HZ 108000000
/* DPHY registers */
#define DPHY_PMA_CMN(reg) (reg)
#define DPHY_PMA_LCLK(reg) (0x100 + (reg))
#define DPHY_PMA_LDATA(lane, reg) (0x200 + ((lane) * 0x100) + (reg))
#define DPHY_PMA_RCLK(reg) (0x600 + (reg))
#define DPHY_PMA_RDATA(lane, reg) (0x700 + ((lane) * 0x100) + (reg))
#define DPHY_PCS(reg) (0xb00 + (reg))
#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
#define DPHY_CMN_SSM_EN BIT(0)
#define DPHY_CMN_TX_MODE_EN BIT(9)
#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
#define DPHY_CMN_PWM_DIV(x) ((x) << 20)
#define DPHY_CMN_PWM_LOW(x) ((x) << 10)
#define DPHY_CMN_PWM_HIGH(x) (x)
#define DPHY_CMN_FBDIV DPHY_PMA_CMN(0x4c)
#define DPHY_CMN_FBDIV_VAL(low, high) (((high) << 11) | ((low) << 22))
#define DPHY_CMN_FBDIV_FROM_REG (BIT(10) | BIT(21))
#define DPHY_CMN_OPIPDIV DPHY_PMA_CMN(0x50)
#define DPHY_CMN_IPDIV_FROM_REG BIT(0)
#define DPHY_CMN_IPDIV(x) ((x) << 1)
#define DPHY_CMN_OPDIV_FROM_REG BIT(6)
#define DPHY_CMN_OPDIV(x) ((x) << 7)
#define DPHY_PSM_CFG DPHY_PCS(0x4)
#define DPHY_PSM_CFG_FROM_REG BIT(0)
#define DPHY_PSM_CLK_DIV(x) ((x) << 1)
struct cdns_dsi_output {
struct mipi_dsi_device *dev;
struct drm_panel *panel;
struct drm_bridge *bridge;
union phy_configure_opts phy_opts;
};
enum cdns_dsi_input_id {
@ -467,14 +437,6 @@ enum cdns_dsi_input_id {
CDNS_DSC_INPUT,
};
struct cdns_dphy_cfg {
u8 pll_ipdiv;
u8 pll_opdiv;
u16 pll_fbdiv;
unsigned long lane_bps;
unsigned int nlanes;
};
struct cdns_dsi_cfg {
unsigned int hfp;
unsigned int hsa;
@ -483,34 +445,6 @@ struct cdns_dsi_cfg {
unsigned int htotal;
};
struct cdns_dphy;
enum cdns_dphy_clk_lane_cfg {
DPHY_CLK_CFG_LEFT_DRIVES_ALL = 0,
DPHY_CLK_CFG_LEFT_DRIVES_RIGHT = 1,
DPHY_CLK_CFG_LEFT_DRIVES_LEFT = 2,
DPHY_CLK_CFG_RIGHT_DRIVES_ALL = 3,
};
struct cdns_dphy_ops {
int (*probe)(struct cdns_dphy *dphy);
void (*remove)(struct cdns_dphy *dphy);
void (*set_psm_div)(struct cdns_dphy *dphy, u8 div);
void (*set_clk_lane_cfg)(struct cdns_dphy *dphy,
enum cdns_dphy_clk_lane_cfg cfg);
void (*set_pll_cfg)(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg);
unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
};
struct cdns_dphy {
struct cdns_dphy_cfg cfg;
void __iomem *regs;
struct clk *psm_clk;
struct clk *pll_ref_clk;
const struct cdns_dphy_ops *ops;
};
struct cdns_dsi_input {
enum cdns_dsi_input_id id;
struct drm_bridge bridge;
@ -528,7 +462,7 @@ struct cdns_dsi {
struct reset_control *dsi_p_rst;
struct clk *dsi_sys_clk;
bool link_initialized;
struct cdns_dphy *dphy;
struct phy *dphy;
};
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
@ -547,173 +481,13 @@ bridge_to_cdns_dsi_input(struct drm_bridge *bridge)
return container_of(bridge, struct cdns_dsi_input, bridge);
}
static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
struct cdns_dphy_cfg *cfg,
unsigned int dpi_htotal,
unsigned int dpi_bpp,
unsigned int dpi_hz,
unsigned int dsi_htotal,
unsigned int dsi_nlanes,
unsigned int *dsi_hfp_ext)
static unsigned int mode_to_dpi_hfp(const struct drm_display_mode *mode,
bool mode_valid_check)
{
u64 dlane_bps, dlane_bps_max, fbdiv, fbdiv_max, adj_dsi_htotal;
unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk);
if (mode_valid_check)
return mode->hsync_start - mode->hdisplay;
memset(cfg, 0, sizeof(*cfg));
cfg->nlanes = dsi_nlanes;
if (pll_ref_hz < 9600000 || pll_ref_hz >= 150000000)
return -EINVAL;
else if (pll_ref_hz < 19200000)
cfg->pll_ipdiv = 1;
else if (pll_ref_hz < 38400000)
cfg->pll_ipdiv = 2;
else if (pll_ref_hz < 76800000)
cfg->pll_ipdiv = 4;
else
cfg->pll_ipdiv = 8;
/*
* Make sure DSI htotal is aligned on a lane boundary when calculating
* the expected data rate. This is done by extending HFP in case of
* misalignment.
*/
adj_dsi_htotal = dsi_htotal;
if (dsi_htotal % dsi_nlanes)
adj_dsi_htotal += dsi_nlanes - (dsi_htotal % dsi_nlanes);
dlane_bps = (u64)dpi_hz * adj_dsi_htotal;
/* data rate in bytes/sec is not an integer, refuse the mode. */
if (do_div(dlane_bps, dsi_nlanes * dpi_htotal))
return -EINVAL;
/* data rate was in bytes/sec, convert to bits/sec. */
dlane_bps *= 8;
if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL)
return -EINVAL;
else if (dlane_bps >= 1250000000)
cfg->pll_opdiv = 1;
else if (dlane_bps >= 630000000)
cfg->pll_opdiv = 2;
else if (dlane_bps >= 320000000)
cfg->pll_opdiv = 4;
else if (dlane_bps >= 160000000)
cfg->pll_opdiv = 8;
/*
* Allow a deviation of 0.2% on the per-lane data rate to try to
* recover a potential mismatch between DPI and PPI clks.
*/
dlane_bps_max = dlane_bps + DIV_ROUND_DOWN_ULL(dlane_bps, 500);
fbdiv_max = DIV_ROUND_DOWN_ULL(dlane_bps_max * 2 *
cfg->pll_opdiv * cfg->pll_ipdiv,
pll_ref_hz);
fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv *
cfg->pll_ipdiv,
pll_ref_hz);
/*
* Iterate over all acceptable fbdiv and try to find an adjusted DSI
* htotal length providing an exact match.
*
* Note that we could do something even trickier by relying on the fact
* that a new line is not necessarily aligned on a lane boundary, so,
* by making adj_dsi_htotal non aligned on a dsi_lanes we can improve a
* bit the precision. With this, the step would be
*
* pll_ref_hz / (2 * opdiv * ipdiv * nlanes)
*
* instead of
*
* pll_ref_hz / (2 * opdiv * ipdiv)
*
* The drawback of this approach is that we would need to make sure the
* number or lines is a multiple of the realignment periodicity which is
* a function of the number of lanes and the original misalignment. For
* example, for NLANES = 4 and HTOTAL % NLANES = 3, it takes 4 lines
* to realign on a lane:
* LINE 0: expected number of bytes, starts emitting first byte of
* LINE 1 on LANE 3
* LINE 1: expected number of bytes, starts emitting first 2 bytes of
* LINE 2 on LANES 2 and 3
* LINE 2: expected number of bytes, starts emitting first 3 bytes of
* of LINE 3 on LANES 1, 2 and 3
* LINE 3: one byte less, now things are realigned on LANE 0 for LINE 4
*
* I figured this extra complexity was not worth the benefit, but if
* someone really has unfixable mismatch, that would be something to
* investigate.
*/
for (; fbdiv <= fbdiv_max; fbdiv++) {
u32 rem;
adj_dsi_htotal = (u64)fbdiv * pll_ref_hz * dsi_nlanes *
dpi_htotal;
/*
* Do the division in 2 steps to avoid an overflow on the
* divider.
*/
rem = do_div(adj_dsi_htotal, dpi_hz);
if (rem)
continue;
rem = do_div(adj_dsi_htotal,
cfg->pll_opdiv * cfg->pll_ipdiv * 2 * 8);
if (rem)
continue;
cfg->pll_fbdiv = fbdiv;
*dsi_hfp_ext = adj_dsi_htotal - dsi_htotal;
break;
}
/* No match, let's just reject the display mode. */
if (!cfg->pll_fbdiv)
return -EINVAL;
dlane_bps = DIV_ROUND_DOWN_ULL((u64)dpi_hz * adj_dsi_htotal * 8,
dsi_nlanes * dpi_htotal);
cfg->lane_bps = dlane_bps;
return 0;
}
static int cdns_dphy_setup_psm(struct cdns_dphy *dphy)
{
unsigned long psm_clk_hz = clk_get_rate(dphy->psm_clk);
unsigned long psm_div;
if (!psm_clk_hz || psm_clk_hz > 100000000)
return -EINVAL;
psm_div = DIV_ROUND_CLOSEST(psm_clk_hz, 1000000);
if (dphy->ops->set_psm_div)
dphy->ops->set_psm_div(dphy, psm_div);
return 0;
}
static void cdns_dphy_set_clk_lane_cfg(struct cdns_dphy *dphy,
enum cdns_dphy_clk_lane_cfg cfg)
{
if (dphy->ops->set_clk_lane_cfg)
dphy->ops->set_clk_lane_cfg(dphy, cfg);
}
static void cdns_dphy_set_pll_cfg(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg)
{
if (dphy->ops->set_pll_cfg)
dphy->ops->set_pll_cfg(dphy, cfg);
}
static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
{
return dphy->ops->get_wakeup_time_ns(dphy);
return mode->crtc_hsync_start - mode->crtc_hdisplay;
}
static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
@ -733,14 +507,12 @@ static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
const struct drm_display_mode *mode,
struct cdns_dsi_cfg *dsi_cfg,
struct cdns_dphy_cfg *dphy_cfg,
bool mode_valid_check)
{
unsigned long dsi_htotal = 0, dsi_hss_hsa_hse_hbp = 0;
struct cdns_dsi_output *output = &dsi->output;
unsigned int dsi_hfp_ext = 0, dpi_hfp, tmp;
unsigned int tmp;
bool sync_pulse = false;
int bpp, nlanes, ret;
int bpp, nlanes;
memset(dsi_cfg, 0, sizeof(*dsi_cfg));
@ -759,8 +531,6 @@ static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
mode->crtc_hsync_end : mode->crtc_hsync_start);
dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD);
dsi_htotal += dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
dsi_hss_hsa_hse_hbp += dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
if (sync_pulse) {
if (mode_valid_check)
@ -770,49 +540,104 @@ static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp,
DSI_HSA_FRAME_OVERHEAD);
dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
}
dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ?
mode->hdisplay : mode->crtc_hdisplay,
bpp, 0);
dsi_cfg->hfp = dpi_to_dsi_timing(mode_to_dpi_hfp(mode, mode_valid_check),
bpp, DSI_HFP_FRAME_OVERHEAD);
return 0;
}
static int cdns_dsi_adjust_phy_config(struct cdns_dsi *dsi,
struct cdns_dsi_cfg *dsi_cfg,
struct phy_configure_opts_mipi_dphy *phy_cfg,
const struct drm_display_mode *mode,
bool mode_valid_check)
{
struct cdns_dsi_output *output = &dsi->output;
unsigned long long dlane_bps;
unsigned long adj_dsi_htotal;
unsigned long dsi_htotal;
unsigned long dpi_htotal;
unsigned long dpi_hz;
unsigned int dsi_hfp_ext;
unsigned int lanes = output->dev->lanes;
dsi_htotal = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
dsi_htotal += dsi_cfg->hact;
if (mode_valid_check)
dpi_hfp = mode->hsync_start - mode->hdisplay;
else
dpi_hfp = mode->crtc_hsync_start - mode->crtc_hdisplay;
dsi_cfg->hfp = dpi_to_dsi_timing(dpi_hfp, bpp, DSI_HFP_FRAME_OVERHEAD);
dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
if (mode_valid_check)
ret = cdns_dsi_get_dphy_pll_cfg(dsi->dphy, dphy_cfg,
mode->htotal, bpp,
mode->clock * 1000,
dsi_htotal, nlanes,
&dsi_hfp_ext);
else
ret = cdns_dsi_get_dphy_pll_cfg(dsi->dphy, dphy_cfg,
mode->crtc_htotal, bpp,
mode->crtc_clock * 1000,
dsi_htotal, nlanes,
&dsi_hfp_ext);
/*
* Make sure DSI htotal is aligned on a lane boundary when calculating
* the expected data rate. This is done by extending HFP in case of
* misalignment.
*/
adj_dsi_htotal = dsi_htotal;
if (dsi_htotal % lanes)
adj_dsi_htotal += lanes - (dsi_htotal % lanes);
dpi_hz = (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000;
dlane_bps = (unsigned long long)dpi_hz * adj_dsi_htotal;
/* data rate in bytes/sec is not an integer, refuse the mode. */
dpi_htotal = mode_valid_check ? mode->htotal : mode->crtc_htotal;
if (do_div(dlane_bps, lanes * dpi_htotal))
return -EINVAL;
/* data rate was in bytes/sec, convert to bits/sec. */
phy_cfg->hs_clk_rate = dlane_bps * 8;
dsi_hfp_ext = adj_dsi_htotal - dsi_htotal;
dsi_cfg->hfp += dsi_hfp_ext;
dsi_cfg->htotal = dsi_htotal + dsi_hfp_ext;
return 0;
}
static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
const struct drm_display_mode *mode,
struct cdns_dsi_cfg *dsi_cfg,
bool mode_valid_check)
{
struct cdns_dsi_output *output = &dsi->output;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
int ret;
ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
if (ret)
return ret;
dsi_cfg->hfp += dsi_hfp_ext;
dsi_htotal += dsi_hfp_ext;
dsi_cfg->htotal = dsi_htotal;
phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
mipi_dsi_pixel_format_to_bpp(output->dev->format),
nlanes, phy_cfg);
ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
if (ret)
return ret;
ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &output->phy_opts);
if (ret)
return ret;
dsi_hss_hsa_hse_hbp = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
/*
* Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO
* is empty before we start a receiving a new line on the DPI
* interface.
*/
if ((u64)dphy_cfg->lane_bps * dpi_hfp * nlanes <
if ((u64)phy_cfg->hs_clk_rate *
mode_to_dpi_hfp(mode, mode_valid_check) * nlanes <
(u64)dsi_hss_hsa_hse_hbp *
(mode_valid_check ? mode->clock : mode->crtc_clock) * 1000)
return -EINVAL;
@ -842,9 +667,8 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
struct cdns_dphy_cfg dphy_cfg;
struct cdns_dsi_cfg dsi_cfg;
int bpp, nlanes, ret;
int bpp, ret;
/*
* VFP_DSI should be less than VFP_DPI and VFP_DSI should be at
@ -862,11 +686,9 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
if ((mode->hdisplay * bpp) % 32)
return MODE_H_ILLEGAL;
nlanes = output->dev->lanes;
ret = cdns_dsi_mode2cfg(dsi, mode, &dsi_cfg, &dphy_cfg, true);
ret = cdns_dsi_check_conf(dsi, mode, &dsi_cfg, true);
if (ret)
return MODE_CLOCK_RANGE;
return MODE_BAD;
return MODE_OK;
}
@ -887,9 +709,9 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put(dsi->base.dev);
}
static void cdns_dsi_hs_init(struct cdns_dsi *dsi,
const struct cdns_dphy_cfg *dphy_cfg)
static void cdns_dsi_hs_init(struct cdns_dsi *dsi)
{
struct cdns_dsi_output *output = &dsi->output;
u32 status;
/*
@ -900,30 +722,10 @@ static void cdns_dsi_hs_init(struct cdns_dsi *dsi,
DPHY_CMN_PDN | DPHY_PLL_PDN,
dsi->regs + MCTL_DPHY_CFG0);
/*
* Configure the internal PSM clk divider so that the DPHY has a
* 1MHz clk (or something close).
*/
WARN_ON_ONCE(cdns_dphy_setup_psm(dsi->dphy));
/*
* Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
* and 8 data lanes, each clk lane can be attache different set of
* data lanes. The 2 groups are named 'left' and 'right', so here we
* just say that we want the 'left' clk lane to drive the 'left' data
* lanes.
*/
cdns_dphy_set_clk_lane_cfg(dsi->dphy, DPHY_CLK_CFG_LEFT_DRIVES_LEFT);
/*
* Configure the DPHY PLL that will be used to generate the TX byte
* clk.
*/
cdns_dphy_set_pll_cfg(dsi->dphy, dphy_cfg);
/* Start TX state machine. */
writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
dsi->dphy->regs + DPHY_CMN_SSM);
phy_init(dsi->dphy);
phy_set_mode(dsi->dphy, PHY_MODE_MIPI_DPHY);
phy_configure(dsi->dphy, &output->phy_opts);
phy_power_on(dsi->dphy);
/* Activate the PLL and wait until it's locked. */
writel(PLL_LOCKED, dsi->regs + MCTL_MAIN_STS_CLR);
@ -933,7 +735,7 @@ static void cdns_dsi_hs_init(struct cdns_dsi *dsi,
status & PLL_LOCKED, 100, 100));
/* De-assert data and clock reset lines. */
writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN |
DPHY_D_RSTB(dphy_cfg->nlanes) | DPHY_C_RSTB,
DPHY_D_RSTB(output->dev->lanes) | DPHY_C_RSTB,
dsi->regs + MCTL_DPHY_CFG0);
}
@ -979,7 +781,7 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
struct drm_display_mode *mode;
struct cdns_dphy_cfg dphy_cfg;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
u32 tmp, reg_wakeup, div;
@ -992,9 +794,9 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
nlanes = output->dev->lanes;
WARN_ON_ONCE(cdns_dsi_mode2cfg(dsi, mode, &dsi_cfg, &dphy_cfg, false));
WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
cdns_dsi_hs_init(dsi, &dphy_cfg);
cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
@ -1030,9 +832,8 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
tmp -= DIV_ROUND_UP(DSI_EOT_PKT_SIZE, nlanes);
tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8,
dphy_cfg.lane_bps);
reg_wakeup = cdns_dphy_get_wakeup_time_ns(dsi->dphy) /
tx_byte_period;
phy_cfg->hs_clk_rate);
reg_wakeup = (phy_cfg->hs_prepare + phy_cfg->hs_zero) / tx_byte_period;
writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp),
dsi->regs + VID_DPHY_TIME);
@ -1346,8 +1147,6 @@ static int __maybe_unused cdns_dsi_resume(struct device *dev)
reset_control_deassert(dsi->dsi_p_rst);
clk_prepare_enable(dsi->dsi_p_clk);
clk_prepare_enable(dsi->dsi_sys_clk);
clk_prepare_enable(dsi->dphy->psm_clk);
clk_prepare_enable(dsi->dphy->pll_ref_clk);
return 0;
}
@ -1356,8 +1155,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
clk_disable_unprepare(dsi->dphy->pll_ref_clk);
clk_disable_unprepare(dsi->dphy->psm_clk);
clk_disable_unprepare(dsi->dsi_sys_clk);
clk_disable_unprepare(dsi->dsi_p_clk);
reset_control_assert(dsi->dsi_p_rst);
@ -1368,121 +1165,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
static UNIVERSAL_DEV_PM_OPS(cdns_dsi_pm_ops, cdns_dsi_suspend, cdns_dsi_resume,
NULL);
static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
{
/* Default wakeup time is 800 ns (in a simulated environment). */
return 800;
}
static void cdns_dphy_ref_set_pll_cfg(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg)
{
u32 fbdiv_low, fbdiv_high;
fbdiv_low = (cfg->pll_fbdiv / 4) - 2;
fbdiv_high = cfg->pll_fbdiv - fbdiv_low - 2;
writel(DPHY_CMN_IPDIV_FROM_REG | DPHY_CMN_OPDIV_FROM_REG |
DPHY_CMN_IPDIV(cfg->pll_ipdiv) |
DPHY_CMN_OPDIV(cfg->pll_opdiv),
dphy->regs + DPHY_CMN_OPIPDIV);
writel(DPHY_CMN_FBDIV_FROM_REG |
DPHY_CMN_FBDIV_VAL(fbdiv_low, fbdiv_high),
dphy->regs + DPHY_CMN_FBDIV);
writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
DPHY_CMN_PWM_DIV(0x8),
dphy->regs + DPHY_CMN_PWM);
}
static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div)
{
writel(DPHY_PSM_CFG_FROM_REG | DPHY_PSM_CLK_DIV(div),
dphy->regs + DPHY_PSM_CFG);
}
/*
* This is the reference implementation of DPHY hooks. Specific integration of
* this IP may have to re-implement some of them depending on how they decided
* to wire things in the SoC.
*/
static const struct cdns_dphy_ops ref_dphy_ops = {
.get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns,
.set_pll_cfg = cdns_dphy_ref_set_pll_cfg,
.set_psm_div = cdns_dphy_ref_set_psm_div,
};
static const struct of_device_id cdns_dphy_of_match[] = {
{ .compatible = "cdns,dphy", .data = &ref_dphy_ops },
{ /* sentinel */ },
};
static struct cdns_dphy *cdns_dphy_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct cdns_dphy *dphy;
struct of_phandle_args args;
struct resource res;
int ret;
ret = of_parse_phandle_with_args(pdev->dev.of_node, "phys",
"#phy-cells", 0, &args);
if (ret)
return ERR_PTR(-ENOENT);
match = of_match_node(cdns_dphy_of_match, args.np);
if (!match || !match->data)
return ERR_PTR(-EINVAL);
dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
if (!dphy)
return ERR_PTR(-ENOMEM);
dphy->ops = match->data;
ret = of_address_to_resource(args.np, 0, &res);
if (ret)
return ERR_PTR(ret);
dphy->regs = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(dphy->regs))
return ERR_CAST(dphy->regs);
dphy->psm_clk = of_clk_get_by_name(args.np, "psm");
if (IS_ERR(dphy->psm_clk))
return ERR_CAST(dphy->psm_clk);
dphy->pll_ref_clk = of_clk_get_by_name(args.np, "pll_ref");
if (IS_ERR(dphy->pll_ref_clk)) {
ret = PTR_ERR(dphy->pll_ref_clk);
goto err_put_psm_clk;
}
if (dphy->ops->probe) {
ret = dphy->ops->probe(dphy);
if (ret)
goto err_put_pll_ref_clk;
}
return dphy;
err_put_pll_ref_clk:
clk_put(dphy->pll_ref_clk);
err_put_psm_clk:
clk_put(dphy->psm_clk);
return ERR_PTR(ret);
}
static void cdns_dphy_remove(struct cdns_dphy *dphy)
{
if (dphy->ops->remove)
dphy->ops->remove(dphy);
clk_put(dphy->pll_ref_clk);
clk_put(dphy->psm_clk);
}
static int cdns_dsi_drm_probe(struct platform_device *pdev)
{
struct cdns_dsi *dsi;
@ -1521,13 +1203,13 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
dsi->dphy = cdns_dphy_probe(pdev);
dsi->dphy = devm_phy_get(&pdev->dev, "dphy");
if (IS_ERR(dsi->dphy))
return PTR_ERR(dsi->dphy);
ret = clk_prepare_enable(dsi->dsi_p_clk);
if (ret)
goto err_remove_dphy;
return ret;
val = readl(dsi->regs + ID_REG);
if (REV_VENDOR_ID(val) != 0xcad) {
@ -1585,9 +1267,6 @@ err_disable_runtime_pm:
err_disable_pclk:
clk_disable_unprepare(dsi->dsi_p_clk);
err_remove_dphy:
cdns_dphy_remove(dsi->dphy);
return ret;
}
@ -1597,7 +1276,6 @@ static int cdns_dsi_drm_remove(struct platform_device *pdev)
mipi_dsi_host_unregister(&dsi->base);
pm_runtime_disable(&pdev->dev);
cdns_dphy_remove(dsi->dphy);
return 0;
}

View File

@ -360,10 +360,70 @@ static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
};
/* CRTC setup */
static const uint32_t cirrus_formats_16[] = {
DRM_FORMAT_RGB565,
};
static const uint32_t cirrus_formats_24[] = {
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
};
static const uint32_t cirrus_formats_32[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
};
static struct drm_plane *cirrus_primary_plane(struct drm_device *dev)
{
const uint32_t *formats;
uint32_t nformats;
struct drm_plane *primary;
int ret;
switch (cirrus_bpp) {
case 16:
formats = cirrus_formats_16;
nformats = ARRAY_SIZE(cirrus_formats_16);
break;
case 24:
formats = cirrus_formats_24;
nformats = ARRAY_SIZE(cirrus_formats_24);
break;
case 32:
formats = cirrus_formats_32;
nformats = ARRAY_SIZE(cirrus_formats_32);
break;
default:
return NULL;
}
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
if (primary == NULL) {
DRM_DEBUG_KMS("Failed to allocate primary plane\n");
return NULL;
}
ret = drm_universal_plane_init(dev, primary, 0,
&drm_primary_helper_funcs,
formats, nformats,
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
primary = NULL;
}
return primary;
}
static void cirrus_crtc_init(struct drm_device *dev)
{
struct cirrus_device *cdev = dev->dev_private;
struct cirrus_crtc *cirrus_crtc;
struct drm_plane *primary;
cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
(CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
@ -372,7 +432,15 @@ static void cirrus_crtc_init(struct drm_device *dev)
if (cirrus_crtc == NULL)
return;
drm_crtc_init(dev, &cirrus_crtc->base, &cirrus_crtc_funcs);
primary = cirrus_primary_plane(dev);
if (primary == NULL) {
kfree(cirrus_crtc);
return;
}
drm_crtc_init_with_planes(dev, &cirrus_crtc->base,
primary, NULL,
&cirrus_crtc_funcs, NULL);
drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
cdev->mode_info.crtc = cirrus_crtc;

View File

@ -348,7 +348,7 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and looks up the
* AGP memory entry. If the memory it's currently bound, unbind it via
* AGP memory entry. If the memory is currently bound, unbind it via
* unbind_agp(). Frees it via free_agp() as well as the entry itself
* and unlinks from the doubly linked list it's inserted in.
*/

View File

@ -330,10 +330,17 @@ update_connector_routing(struct drm_atomic_state *state,
* Since the connector can be unregistered at any point during an
* atomic check or commit, this is racy. But that's OK: all we care
* about is ensuring that userspace can't do anything but shut off the
* display on a connector that was destroyed after its been notified,
* display on a connector that was destroyed after it's been notified,
* not before.
*
* Additionally, we also want to ignore connector registration when
* we're trying to restore an atomic state during system resume since
* there's a chance the connector may have been destroyed during the
* process, but it's better to ignore that then cause
* drm_atomic_helper_resume() to fail.
*/
if (drm_connector_is_unregistered(connector) && crtc_state->active) {
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
crtc_state->active) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
connector->base.id, connector->name);
return -EINVAL;
@ -685,7 +692,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
/*
* After all the routing has been prepared we need to add in any
* connector which is itself unchanged, but who's crtc changes it's
* connector which is itself unchanged, but whose crtc changes its
* configuration. This must be done before calling mode_fixup in case a
* crtc only changed its mode but has the same set of connectors.
*/
@ -1670,7 +1677,7 @@ EXPORT_SYMBOL(drm_atomic_helper_async_commit);
* drm_atomic_helper_setup_commit() and related functions.
*
* Committing the actual hardware state is done through the
* &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default
* &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
* implementation drm_atomic_helper_commit_tail().
*
* RETURNS:
@ -1893,7 +1900,7 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
* functions. drm_atomic_helper_wait_for_dependencies() must be called before
* actually committing the hardware state, and for nonblocking commits this call
* must be placed in the async worker. See also drm_atomic_helper_swap_state()
* and it's stall parameter, for when a driver's commit hooks look at the
* and its stall parameter, for when a driver's commit hooks look at the
* &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
*
* Completion of the hardware commit step must be signalled using
@ -3180,6 +3187,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
state->acquire_ctx = ctx;
state->duplicated = true;
drm_for_each_crtc(crtc, dev) {
struct drm_crtc_state *crtc_state;

View File

@ -44,8 +44,8 @@
* DOC: overview
*
* This file contains the marshalling and demarshalling glue for the atomic UAPI
* in all it's form: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and
* SET_PROPERTY IOCTls. Plus interface functions for compatibility helpers and
* in all its forms: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and
* SET_PROPERTY IOCTLs. Plus interface functions for compatibility helpers and
* drivers which have special needs to construct their own atomic updates, e.g.
* for load detect or similiar.
*/

View File

@ -494,7 +494,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
* isn't in use.
*
* Searches the map on drm_device::maplist, removes it from the list, see if
* its being used, and free any associate resource (such as MTRR's) if it's not
* it's being used, and free any associated resource (such as MTRR's) if it's not
* being on use.
*
* \sa drm_legacy_addmap
@ -621,7 +621,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
}
}
/* List has wrapped around to the head pointer, or its empty we didn't
/* List has wrapped around to the head pointer, or it's empty we didn't
* find anything.
*/
if (list_empty(&dev->maplist) || !map) {

View File

@ -1066,7 +1066,7 @@ EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
*
* content type (HDMI specific):
* Indicates content type setting to be used in HDMI infoframes to indicate
* content type for the external device, so that it adjusts it's display
* content type for the external device, so that it adjusts its display
* settings accordingly.
*
* The value of this property can be one of the following:

View File

@ -32,6 +32,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
/**
* DOC: overview

View File

@ -1360,7 +1360,20 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
EXPORT_SYMBOL(drm_dp_read_desc);
/**
* DRM DP Helpers for DSC
* drm_dp_dsc_sink_max_slice_count() - Get the max slice count
* supported by the DSC sink.
* @dsc_dpcd: DSC capabilities from DPCD
* @is_edp: true if its eDP, false for DP
*
* Read the slice capabilities DPCD register from DSC sink to get
* the maximum slice count supported. This is used to populate
* the DSC parameters in the &struct drm_dsc_config by the driver.
* Driver creates an infoframe using these parameters to populate
* &struct drm_dsc_pps_infoframe. These are sent to the sink using DSC
* infoframe using the helper function drm_dsc_pps_infoframe_pack()
*
* Returns:
* Maximum slice count supported by DSC sink or 0 its invalid
*/
u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp)
@ -1405,6 +1418,21 @@ u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
}
EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);
/**
* drm_dp_dsc_sink_line_buf_depth() - Get the line buffer depth in bits
* @dsc_dpcd: DSC capabilities from DPCD
*
* Read the DSC DPCD register to parse the line buffer depth in bits which is
* number of bits of precision within the decoder line buffer supported by
* the DSC sink. This is used to populate the DSC parameters in the
* &struct drm_dsc_config by the driver.
* Driver creates an infoframe using these parameters to populate
* &struct drm_dsc_pps_infoframe. These are sent to the sink using DSC
* infoframe using the helper function drm_dsc_pps_infoframe_pack()
*
* Returns:
* Line buffer depth supported by DSC panel or 0 its invalid
*/
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT];
@ -1434,6 +1462,23 @@ u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
}
EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
/**
* drm_dp_dsc_sink_supported_input_bpcs() - Get all the input bits per component
* values supported by the DSC sink.
* @dsc_dpcd: DSC capabilities from DPCD
* @dsc_bpc: An array to be filled by this helper with supported
* input bpcs.
*
* Read the DSC DPCD from the sink device to parse the supported bits per
* component values. This is used to populate the DSC parameters
* in the &struct drm_dsc_config by the driver.
* Driver creates an infoframe using these parameters to populate
* &struct drm_dsc_pps_infoframe. These are sent to the sink using DSC
* infoframe using the helper function drm_dsc_pps_infoframe_pack()
*
* Returns:
* Number of input BPC values parsed from the DPCD
*/
int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
u8 dsc_bpc[3])
{

View File

@ -941,7 +941,7 @@ static void drm_dp_free_mst_branch_device(struct kref *kref)
* in-memory topology state from being changed in the middle of critical
* operations like changing the internal state of payload allocations. This
* means each branch and port will be considered to be connected to the rest
* of the topology until it's topology refcount reaches zero. Additionally,
* of the topology until its topology refcount reaches zero. Additionally,
* for ports this means that their associated &struct drm_connector will stay
* registered with userspace until the port's refcount reaches 0.
*
@ -979,8 +979,8 @@ static void drm_dp_free_mst_branch_device(struct kref *kref)
* same way as the C pointers used to reference a structure.
*
* As you can see in the above figure, every branch increments the topology
* refcount of it's children, and increments the malloc refcount of it's
* parent. Additionally, every payload increments the malloc refcount of it's
* refcount of its children, and increments the malloc refcount of its
* parent. Additionally, every payload increments the malloc refcount of its
* assigned port by 1.
*
* So, what would happen if MSTB #3 from the above figure was unplugged from
@ -997,9 +997,9 @@ static void drm_dp_free_mst_branch_device(struct kref *kref)
* of its parent, and finally its own malloc refcount. For MSTB #4 and port
* #4, this means they both have been disconnected from the topology and freed
* from memory. But, because payload #2 is still holding a reference to port
* #3, port #3 is removed from the topology but it's &struct drm_dp_mst_port
* #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
* is still accessible from memory. This also means port #3 has not yet
* decremented the malloc refcount of MSTB #3, so it's &struct
* decremented the malloc refcount of MSTB #3, so its &struct
* drm_dp_mst_branch will also stay allocated in memory until port #3's
* malloc refcount reaches 0.
*
@ -1139,7 +1139,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
/**
* drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
* branch device unless its zero
* branch device unless it's zero
* @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
*
* Attempts to grab a topology reference to @mstb, if it hasn't yet been
@ -1265,7 +1265,7 @@ static void drm_dp_destroy_port(struct kref *kref)
/**
* drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
* port unless its zero
* port unless it's zero
* @port: &struct drm_dp_mst_port to increment the topology refcount of
*
* Attempts to grab a topology reference to @port, if it hasn't yet been
@ -1471,7 +1471,7 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
port->mstb->port_parent = port;
/*
* Make sure this port's memory allocation stays
* around until it's child MSTB releases it
* around until its child MSTB releases it
*/
drm_dp_mst_get_port_malloc(port);
@ -2271,7 +2271,7 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_payload *payload)
{
DRM_DEBUG_KMS("\n");
/* its okay for these to fail */
/* it's okay for these to fail */
if (port) {
drm_dp_payload_send_msg(mgr, port, id, 0);
}
@ -2947,7 +2947,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
{
enum drm_connector_status status = connector_status_disconnected;
/* we need to search for the port in the mgr in case its gone */
/* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
@ -3013,7 +3013,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
{
struct edid *edid = NULL;
/* we need to search for the port in the mgr in case its gone */
/* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return NULL;
@ -3117,10 +3117,6 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (port == NULL)
return -EINVAL;
/* Find the current allocation for this port, if any */
list_for_each_entry(pos, &topology_state->vcpis, next) {
if (pos->port == port) {
@ -3153,10 +3149,8 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
/* Add the new allocation to the state */
if (!vcpi) {
vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
if (!vcpi) {
ret = -ENOMEM;
goto out;
}
if (!vcpi)
return -ENOMEM;
drm_dp_mst_get_port_malloc(port);
vcpi->port = port;
@ -3165,8 +3159,6 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
vcpi->vcpi = req_slots;
ret = req_slots;
out:
drm_dp_mst_topology_put_port(port);
return ret;
}
EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
@ -3180,7 +3172,7 @@ EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
* Releases any VCPI slots that have been allocated to a port in the atomic
* state. Any atomic drivers which support MST must call this function in
* their &drm_connector_helper_funcs.atomic_check() callback when the
* connector will no longer have VCPI allocated (e.g. because it's CRTC was
* connector will no longer have VCPI allocated (e.g. because its CRTC was
* removed) when it had VCPI allocated in the previous atomic state.
*
* It is OK to call this even if @port has been removed from the system.
@ -3268,7 +3260,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
pbn, port->vcpi.num_slots);
/* Keep port allocated until it's payload has been removed */
/* Keep port allocated until its payload has been removed */
drm_dp_mst_get_port_malloc(port);
drm_dp_mst_topology_put_port(port);
return true;
@ -3300,7 +3292,7 @@ EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
/*
* A port with VCPI will remain allocated until it's VCPI is
* A port with VCPI will remain allocated until its VCPI is
* released, no verified ref needed
*/
@ -3311,15 +3303,16 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
/**
* drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
* @mgr: manager for this port
* @port: unverified port to deallocate vcpi for
* @port: port to deallocate vcpi for
*
* This can be called unconditionally, regardless of whether
* drm_dp_mst_allocate_vcpi() succeeded or not.
*/
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
/*
* A port with VCPI will remain allocated until it's VCPI is
* released, no verified ref needed
*/
if (!port->vcpi.vcpi)
return;
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
port->vcpi.num_slots = 0;

View File

@ -17,6 +17,12 @@
/**
* DOC: dsc helpers
*
* VESA specification for DP 1.4 adds a new feature called Display Stream
* Compression (DSC) used to compress the pixel bits before sending it on
* DP/eDP/MIPI DSI interface. DSC is required to be enabled so that the existing
* display interfaces can support high resolutions at higher frames rates uisng
* the maximum available link capacity of these interfaces.
*
* These functions contain some common logic and helpers to deal with VESA
* Display Stream Compression standard required for DSC on Display Port/eDP or
* MIPI display interfaces.
@ -26,6 +32,13 @@
* drm_dsc_dp_pps_header_init() - Initializes the PPS Header
* for DisplayPort as per the DP 1.4 spec.
* @pps_sdp: Secondary data packet for DSC Picture Parameter Set
* as defined in &struct drm_dsc_pps_infoframe
*
* DP 1.4 spec defines the secondary data packet for sending the
* picture parameter infoframes from the source to the sink.
* This function populates the pps header defined in
* &struct drm_dsc_pps_infoframe as per the header bytes defined
* in &struct dp_sdp_header.
*/
void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
{
@ -38,15 +51,20 @@ EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
/**
* drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
* using the DSC configuration parameters in the order expected
* by the DSC Display Sink device. For the DSC, the sink device
* expects the PPS payload in the big endian format for the fields
* that span more than 1 byte.
*
* @pps_sdp:
* Secondary data packet for DSC Picture Parameter Set
* Secondary data packet for DSC Picture Parameter Set. This is defined
* by &struct drm_dsc_pps_infoframe
* @dsc_cfg:
* DSC Configuration data filled by driver
* DSC Configuration data filled by driver as defined by
* &struct drm_dsc_config
*
* DSC source device sends a secondary data packet filled with all the
* picture parameter set (PPS) information required by the sink to decode
* the compressed frame. Driver populates the dsC PPS infoframe using the DSC
* configuration parameters in the order expected by the DSC Display Sink
* device. For the DSC, the sink device expects the PPS payload in the big
* endian format for the fields that span more than 1 byte.
*/
void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
const struct drm_dsc_config *dsc_cfg)

View File

@ -2483,7 +2483,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
/*
* This function checks if rotation is necessary because of panel orientation
* and if it is, if it is supported.
* If rotation is necessary and supported, its gets set in fb_crtc.rotation.
* If rotation is necessary and supported, it gets set in fb_crtc.rotation.
* If rotation is necessary but not supported, a DRM_MODE_ROTATE_* flag gets
* or-ed into fb_helper->sw_rotations. In drm_setup_crtcs_fb() we check if only
* one bit is set and then we set fb_info.fbcon_rotate_hint to make fbcon do

View File

@ -701,7 +701,7 @@ int drm_event_reserve_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_event_reserve_init);
/**
* drm_event_cancel_free - free a DRM event and release it's space
* drm_event_cancel_free - free a DRM event and release its space
* @dev: DRM device
* @p: tracking structure for the pending event
*

View File

@ -238,6 +238,15 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_X0L2, .depth = 0, .num_planes = 1,
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P010, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P012, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
};
unsigned int i;

View File

@ -773,7 +773,7 @@ EXPORT_SYMBOL(drm_framebuffer_lookup);
* @fb: fb to unregister
*
* Drivers need to call this when cleaning up driver-private framebuffers, e.g.
* those used for fbdev. Note that the caller must hold a reference of it's own,
* those used for fbdev. Note that the caller must hold a reference of its own,
* i.e. the object may not be destroyed through this call (since it'll lead to a
* locking inversion).
*

View File

@ -660,7 +660,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
EXPORT_SYMBOL(drm_gem_put_pages);
/**
* drm_gem_object_lookup - look up a GEM object from it's handle
* drm_gem_object_lookup - look up a GEM object from its handle
* @filp: DRM file private date
* @handle: userspace handle
*

View File

@ -816,7 +816,7 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
* When the scan list is empty, the selected memory nodes can be freed. An
* immediately following drm_mm_insert_node_in_range_generic() or one of the
* simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
* the just freed block (because its at the top of the free_stack list).
* the just freed block (because it's at the top of the free_stack list).
*
* Returns:
* True if this block should be evicted, false otherwise. Will always

View File

@ -1272,7 +1272,7 @@ const char *drm_get_mode_status_name(enum drm_mode_status status)
* @verbose: be verbose about it
*
* This helper function can be used to prune a display mode list after
* validation has been completed. All modes who's status is not MODE_OK will be
* validation has been completed. All modes whose status is not MODE_OK will be
* removed from the list, and if @verbose the status code and mode name is also
* printed to dmesg.
*/

View File

@ -22,8 +22,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
/**

View File

@ -866,7 +866,7 @@ err:
* value doesn't become invalid part way through the property update due to
* race). The value returned by reference via 'obj' should be passed back
* to drm_property_change_valid_put() after the property is set (and the
* object to which the property is attached has a chance to take it's own
* object to which the property is attached has a chance to take its own
* reference).
*/
bool drm_property_change_valid_get(struct drm_property *property,

View File

@ -207,114 +207,6 @@ int drm_rect_calc_vscale(const struct drm_rect *src,
}
EXPORT_SYMBOL(drm_rect_calc_vscale);
/**
* drm_calc_hscale_relaxed - calculate the horizontal scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_hscale: minimum allowed horizontal scaling factor
* @max_hscale: maximum allowed horizontal scaling factor
*
* Calculate the horizontal scaling factor as
* (@src width) / (@dst width).
*
* If the calculated scaling factor is below @min_vscale,
* decrease the height of rectangle @dst to compensate.
*
* If the calculated scaling factor is above @max_vscale,
* decrease the height of rectangle @src to compensate.
*
* If the scale is below 1 << 16, round down. If the scale is above
* 1 << 16, round up. This will calculate the scale with the most
* pessimistic limit calculation.
*
* RETURNS:
* The horizontal scaling factor.
*/
int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_hscale, int max_hscale)
{
int src_w = drm_rect_width(src);
int dst_w = drm_rect_width(dst);
int hscale = drm_calc_scale(src_w, dst_w);
if (hscale < 0 || dst_w == 0)
return hscale;
if (hscale < min_hscale) {
int max_dst_w = src_w / min_hscale;
drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
return min_hscale;
}
if (hscale > max_hscale) {
int max_src_w = dst_w * max_hscale;
drm_rect_adjust_size(src, max_src_w - src_w, 0);
return max_hscale;
}
return hscale;
}
EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
/**
* drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_vscale: minimum allowed vertical scaling factor
* @max_vscale: maximum allowed vertical scaling factor
*
* Calculate the vertical scaling factor as
* (@src height) / (@dst height).
*
* If the calculated scaling factor is below @min_vscale,
* decrease the height of rectangle @dst to compensate.
*
* If the calculated scaling factor is above @max_vscale,
* decrease the height of rectangle @src to compensate.
*
* If the scale is below 1 << 16, round down. If the scale is above
* 1 << 16, round up. This will calculate the scale with the most
* pessimistic limit calculation.
*
* RETURNS:
* The vertical scaling factor.
*/
int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_vscale, int max_vscale)
{
int src_h = drm_rect_height(src);
int dst_h = drm_rect_height(dst);
int vscale = drm_calc_scale(src_h, dst_h);
if (vscale < 0 || dst_h == 0)
return vscale;
if (vscale < min_vscale) {
int max_dst_h = src_h / min_vscale;
drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
return min_vscale;
}
if (vscale > max_vscale) {
int max_src_h = dst_h * max_vscale;
drm_rect_adjust_size(src, 0, max_src_h - src_h);
return max_vscale;
}
return vscale;
}
EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
/**
* drm_rect_debug_print - print the rectangle information
* @prefix: prefix string

View File

@ -48,7 +48,7 @@
* Drivers must initialize the vertical blanking handling core with a call to
* drm_vblank_init(). Minimally, a driver needs to implement
* &drm_crtc_funcs.enable_vblank and &drm_crtc_funcs.disable_vblank plus call
* drm_crtc_handle_vblank() in it's vblank interrupt handler for working vblank
* drm_crtc_handle_vblank() in its vblank interrupt handler for working vblank
* support.
*
* Vertical blanking interrupts can be enabled by the DRM core or by drivers

View File

@ -41,8 +41,10 @@
#include <linux/vt.h>
#include <acpi/video.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"

View File

@ -31,7 +31,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
/**

View File

@ -32,7 +32,9 @@
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
struct intel_plane *intel_plane_alloc(void)

View File

@ -24,23 +24,32 @@
* Eric Anholt <eric@anholt.net>
*/
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/reservation.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic_uapi.h>
#include <linux/intel-iommu.h>
#include <linux/reservation.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_frontbuffer.h"
#include "intel_drv.h"
#include "intel_dsi.h"

View File

@ -80,17 +80,12 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn;
/* Zombie connectors can't have VCPI slots */
if (!drm_connector_is_unregistered(connector)) {
slots = drm_dp_atomic_find_vcpi_slots(state,
&intel_dp->mst_mgr,
port,
mst_pbn);
if (slots < 0) {
DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
slots);
return slots;
}
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port,
mst_pbn);
if (slots < 0) {
DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
slots);
return slots;
}
intel_link_compute_m_n(bpp, lane_count,

View File

@ -26,13 +26,16 @@
*/
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
#include <drm/drm_atomic_helper.h>
/**
* DOC: RC6

View File

@ -116,6 +116,12 @@ struct nv50_head_atom {
u8 depth:4;
} or;
/* Currently only used for MST */
struct {
int pbn;
u8 tu:6;
} dp;
union nv50_head_atom_mask {
struct {
bool olut:1;

View File

@ -659,8 +659,6 @@ struct nv50_mstc {
struct drm_display_mode *native;
struct edid *edid;
int pbn;
};
struct nv50_msto {
@ -765,18 +763,26 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
struct drm_connector *connector = conn_state->connector;
struct nv50_mstc *mstc = nv50_mstc(connector);
struct nv50_mstm *mstm = mstc->mstm;
int bpp = connector->display_info.bpc * 3;
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int slots;
mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
bpp);
/* When restoring duplicated states, we need to make sure that the
* bw remains the same and avoid recalculating it, as the connector's
* bpc may have changed after the state was duplicated
*/
if (!state->duplicated)
asyh->dp.pbn =
drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
connector->display_info.bpc * 3);
if (drm_atomic_crtc_needs_modeset(crtc_state) &&
!drm_connector_is_unregistered(connector)) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
mstc->port, mstc->pbn);
mstc->port,
asyh->dp.pbn);
if (slots < 0)
return slots;
asyh->dp.tu = slots;
}
return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
@ -787,13 +793,13 @@ static void
nv50_msto_enable(struct drm_encoder *encoder)
{
struct nv50_head *head = nv50_head(encoder->crtc);
struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state);
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = NULL;
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 proto, depth;
int slots;
bool r;
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
@ -809,8 +815,8 @@ nv50_msto_enable(struct drm_encoder *encoder)
if (WARN_ON(!mstc))
return;
slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn,
armh->dp.tu);
WARN_ON(!r);
if (!mstm->links++)
@ -828,8 +834,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
default: depth = 0x6; break;
}
mstm->outp->update(mstm->outp, head->base.index,
nv50_head_atom(head->base.base.state), proto, depth);
mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth);
msto->head = head;
msto->mstc = mstc;

View File

@ -413,6 +413,7 @@ nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
asyh->ovly = armh->ovly;
asyh->dither = armh->dither;
asyh->procamp = armh->procamp;
asyh->dp = armh->dp;
asyh->clr.mask = 0;
asyh->set.mask = 0;
return &asyh->state;

View File

@ -9,14 +9,17 @@
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"

View File

@ -20,6 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_probe_helper.h>

View File

@ -10,10 +10,12 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <linux/of_graph.h>
#include <linux/wait.h>

View File

@ -10,7 +10,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>

View File

@ -13,6 +13,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>

View File

@ -45,10 +45,11 @@ config DRM_SUN6I_DSI
default MACH_SUN8I
select CRC_CCITT
select DRM_MIPI_DSI
select PHY_SUN6I_MIPI_DPHY
help
Choose this option if you want have an Allwinner SoC with
MIPI-DSI support. If M is selected the module will be called
sun6i-dsi
sun6i_mipi_dsi.
config DRM_SUN8I_DW_HDMI
tristate "Support for Allwinner version of DesignWare HDMI"

View File

@ -24,9 +24,6 @@ sun4i-tcon-y += sun4i_lvds.o
sun4i-tcon-y += sun4i_tcon.o
sun4i-tcon-y += sun4i_rgb.o
sun6i-dsi-y += sun6i_mipi_dphy.o
sun6i-dsi-y += sun6i_mipi_dsi.o
obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o
obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
@ -37,7 +34,7 @@ ifdef CONFIG_DRM_SUN4I_BACKEND
obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o
endif
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o
obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i_mipi_dsi.o
obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
obj-$(CONFIG_DRM_SUN8I_TCON_TOP) += sun8i_tcon_top.o

View File

@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
@ -616,6 +617,8 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
struct mipi_dsi_device *device = dsi->device;
union phy_configure_opts opts = { 0 };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
u16 delay;
DRM_DEBUG_DRIVER("Enabling DSI output\n");
@ -634,8 +637,15 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
sun6i_dsi_setup_format(dsi, mode);
sun6i_dsi_setup_timings(dsi, mode);
sun6i_dphy_init(dsi->dphy, device->lanes);
sun6i_dphy_power_on(dsi->dphy, device->lanes);
phy_init(dsi->dphy);
phy_mipi_dphy_get_default_config(mode->clock * 1000,
mipi_dsi_pixel_format_to_bpp(device->format),
device->lanes, cfg);
phy_set_mode(dsi->dphy, PHY_MODE_MIPI_DPHY);
phy_configure(dsi->dphy, &opts);
phy_power_on(dsi->dphy);
if (!IS_ERR(dsi->panel))
drm_panel_prepare(dsi->panel);
@ -673,8 +683,8 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
drm_panel_unprepare(dsi->panel);
}
sun6i_dphy_power_off(dsi->dphy);
sun6i_dphy_exit(dsi->dphy);
phy_power_off(dsi->dphy);
phy_exit(dsi->dphy);
pm_runtime_put(dsi->dev);
}
@ -967,7 +977,6 @@ static const struct component_ops sun6i_dsi_ops = {
static int sun6i_dsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dphy_node;
struct sun6i_dsi *dsi;
struct resource *res;
void __iomem *base;
@ -1013,11 +1022,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
*/
clk_set_rate_exclusive(dsi->mod_clk, 297000000);
dphy_node = of_parse_phandle(dev->of_node, "phys", 0);
ret = sun6i_dphy_probe(dsi, dphy_node);
of_node_put(dphy_node);
if (ret) {
dsi->dphy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->dphy)) {
dev_err(dev, "Couldn't get the MIPI D-PHY\n");
ret = PTR_ERR(dsi->dphy);
goto err_unprotect_clk;
}
@ -1026,7 +1034,7 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
ret = mipi_dsi_host_register(&dsi->host);
if (ret) {
dev_err(dev, "Couldn't register MIPI-DSI host\n");
goto err_remove_phy;
goto err_pm_disable;
}
ret = component_add(&pdev->dev, &sun6i_dsi_ops);
@ -1039,9 +1047,8 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
err_remove_dsi_host:
mipi_dsi_host_unregister(&dsi->host);
err_remove_phy:
err_pm_disable:
pm_runtime_disable(dev);
sun6i_dphy_remove(dsi);
err_unprotect_clk:
clk_rate_exclusive_put(dsi->mod_clk);
return ret;
@ -1055,7 +1062,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
component_del(&pdev->dev, &sun6i_dsi_ops);
mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(dev);
sun6i_dphy_remove(dsi);
clk_rate_exclusive_put(dsi->mod_clk);
return 0;

View File

@ -13,13 +13,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_mipi_dsi.h>
struct sun6i_dphy {
struct clk *bus_clk;
struct clk *mod_clk;
struct regmap *regs;
struct reset_control *reset;
};
struct sun6i_dsi {
struct drm_connector connector;
struct drm_encoder encoder;
@ -29,7 +22,7 @@ struct sun6i_dsi {
struct clk *mod_clk;
struct regmap *regs;
struct reset_control *reset;
struct sun6i_dphy *dphy;
struct phy *dphy;
struct device *dev;
struct sun4i_drv *drv;
@ -52,12 +45,4 @@ static inline struct sun6i_dsi *encoder_to_sun6i_dsi(const struct drm_encoder *e
return container_of(encoder, struct sun6i_dsi, encoder);
};
int sun6i_dphy_probe(struct sun6i_dsi *dsi, struct device_node *node);
int sun6i_dphy_remove(struct sun6i_dsi *dsi);
int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes);
int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes);
int sun6i_dphy_power_off(struct sun6i_dphy *dphy);
int sun6i_dphy_exit(struct sun6i_dphy *dphy);
#endif /* _SUN6I_MIPI_DSI_H_ */

View File

@ -117,7 +117,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
perfmon = kzalloc(sizeof(*perfmon) + (req->ncounters * sizeof(u64)),
perfmon = kzalloc(struct_size(perfmon, counters, req->ncounters),
GFP_KERNEL);
if (!perfmon)
return -ENOMEM;

View File

@ -10,13 +10,17 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
static void _vblank_handle(struct vkms_output *output)
static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
{
struct vkms_output *output = container_of(timer, struct vkms_output,
vblank_hrtimer);
struct drm_crtc *crtc = &output->crtc;
struct vkms_crtc_state *state = to_vkms_crtc_state(crtc->state);
u64 ret_overrun;
bool ret;
spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
@ -37,19 +41,11 @@ static void _vblank_handle(struct vkms_output *output)
DRM_WARN("failed to queue vkms_crc_work_handle");
}
spin_unlock(&output->lock);
}
static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
{
struct vkms_output *output = container_of(timer, struct vkms_output,
vblank_hrtimer);
int ret_overrun;
_vblank_handle(output);
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
WARN_ON(ret_overrun != 1);
spin_unlock(&output->lock);
return HRTIMER_RESTART;
}
@ -87,6 +83,9 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
*vblank_time = output->vblank_hrtimer.node.expires;
if (!in_vblank_irq)
*vblank_time -= output->period_ns;
return true;
}

View File

@ -235,8 +235,14 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj,
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_pgoff = 0;
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/*
* According to Xen on ARM ABI (xen/include/public/arch-arm.h):
* all memory which is shared with other entities in the system
* (including the hypervisor and other guests) must reside in memory
* which is mapped as Normal Inner Write-Back Outer Write-Back
* Inner-Shareable.
*/
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
/*
* vm_operations_struct.fault handler will be called if CPU access
@ -282,8 +288,9 @@ void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
if (!xen_obj->pages)
return NULL;
/* Please see comment in gem_mmap_obj on mapping and attributes. */
return vmap(xen_obj->pages, xen_obj->num_pages,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
VM_MAP, PAGE_KERNEL);
}
void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,

View File

@ -54,7 +54,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xen_drm_front_drm_info *drm_info = dev->dev_private;
static struct drm_framebuffer *fb;
struct drm_framebuffer *fb;
struct drm_gem_object *gem_obj;
int ret;

View File

@ -17,6 +17,18 @@ config PHY_SUN4I_USB
This driver controls the entire USB PHY block, both the USB OTG
parts, as well as the 2 regular USB 2 host PHYs.
config PHY_SUN6I_MIPI_DPHY
tristate "Allwinner A31 MIPI D-PHY Support"
depends on ARCH_SUNXI && HAS_IOMEM && OF
depends on RESET_CONTROLLER
select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
select REGMAP_MMIO
help
Choose this option if you have an Allwinner SoC with
MIPI-DSI support. If M is selected, the module will be
called sun6i_mipi_dphy.
config PHY_SUN9I_USB
tristate "Allwinner sun9i SoC USB PHY driver"
depends on ARCH_SUNXI && HAS_IOMEM && OF

View File

@ -1,2 +1,3 @@
obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
obj-$(CONFIG_PHY_SUN6I_MIPI_DPHY) += phy-sun6i-mipi-dphy.o
obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o

View File

@ -8,11 +8,14 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include "sun6i_mipi_dsi.h"
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#define SUN6I_DPHY_GCTL_REG 0x00
#define SUN6I_DPHY_GCTL_LANE_NUM(n) ((((n) - 1) & 3) << 4)
@ -81,12 +84,46 @@
#define SUN6I_DPHY_DBG5_REG 0xf4
int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes)
struct sun6i_dphy {
struct clk *bus_clk;
struct clk *mod_clk;
struct regmap *regs;
struct reset_control *reset;
struct phy *phy;
struct phy_configure_opts_mipi_dphy config;
};
static int sun6i_dphy_init(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
reset_control_deassert(dphy->reset);
clk_prepare_enable(dphy->mod_clk);
clk_set_rate_exclusive(dphy->mod_clk, 150000000);
return 0;
}
static int sun6i_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
int ret;
ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
if (ret)
return ret;
memcpy(&dphy->config, opts, sizeof(dphy->config));
return 0;
}
static int sun6i_dphy_power_on(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0);
regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG,
SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT);
@ -111,16 +148,9 @@ int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes)
SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(3));
regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG,
SUN6I_DPHY_GCTL_LANE_NUM(lanes) |
SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) |
SUN6I_DPHY_GCTL_EN);
return 0;
}
int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes)
{
u8 lanes_mask = GENMASK(lanes - 1, 0);
regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
SUN6I_DPHY_ANA0_REG_PWS |
SUN6I_DPHY_ANA0_REG_DMPC |
@ -181,16 +211,20 @@ int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes)
return 0;
}
int sun6i_dphy_power_off(struct sun6i_dphy *dphy)
static int sun6i_dphy_power_off(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
SUN6I_DPHY_ANA1_REG_VTTMODE, 0);
return 0;
}
int sun6i_dphy_exit(struct sun6i_dphy *dphy)
static int sun6i_dphy_exit(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
clk_rate_exclusive_put(dphy->mod_clk);
clk_disable_unprepare(dphy->mod_clk);
reset_control_assert(dphy->reset);
@ -198,6 +232,15 @@ int sun6i_dphy_exit(struct sun6i_dphy *dphy)
return 0;
}
static struct phy_ops sun6i_dphy_ops = {
.configure = sun6i_dphy_configure,
.power_on = sun6i_dphy_power_on,
.power_off = sun6i_dphy_power_off,
.init = sun6i_dphy_init,
.exit = sun6i_dphy_exit,
};
static struct regmap_config sun6i_dphy_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@ -206,87 +249,70 @@ static struct regmap_config sun6i_dphy_regmap_config = {
.name = "mipi-dphy",
};
static int sun6i_dphy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct sun6i_dphy *dphy;
struct resource *res;
void __iomem *regs;
dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
if (!dphy)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(regs)) {
dev_err(&pdev->dev, "Couldn't map the DPHY encoder registers\n");
return PTR_ERR(regs);
}
dphy->regs = devm_regmap_init_mmio_clk(&pdev->dev, "bus",
regs, &sun6i_dphy_regmap_config);
if (IS_ERR(dphy->regs)) {
dev_err(&pdev->dev, "Couldn't create the DPHY encoder regmap\n");
return PTR_ERR(dphy->regs);
}
dphy->reset = devm_reset_control_get_shared(&pdev->dev, NULL);
if (IS_ERR(dphy->reset)) {
dev_err(&pdev->dev, "Couldn't get our reset line\n");
return PTR_ERR(dphy->reset);
}
dphy->mod_clk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(dphy->mod_clk)) {
dev_err(&pdev->dev, "Couldn't get the DPHY mod clock\n");
return PTR_ERR(dphy->mod_clk);
}
dphy->phy = devm_phy_create(&pdev->dev, NULL, &sun6i_dphy_ops);
if (IS_ERR(dphy->phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
return PTR_ERR(dphy->phy);
}
phy_set_drvdata(dphy->phy, dphy);
phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id sun6i_dphy_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-mipi-dphy" },
{ }
};
MODULE_DEVICE_TABLE(of, sun6i_dphy_of_table);
int sun6i_dphy_probe(struct sun6i_dsi *dsi, struct device_node *node)
{
struct sun6i_dphy *dphy;
struct resource res;
void __iomem *regs;
int ret;
static struct platform_driver sun6i_dphy_platform_driver = {
.probe = sun6i_dphy_probe,
.driver = {
.name = "sun6i-mipi-dphy",
.of_match_table = sun6i_dphy_of_table,
},
};
module_platform_driver(sun6i_dphy_platform_driver);
if (!of_match_node(sun6i_dphy_of_table, node)) {
dev_err(dsi->dev, "Incompatible D-PHY\n");
return -EINVAL;
}
dphy = devm_kzalloc(dsi->dev, sizeof(*dphy), GFP_KERNEL);
if (!dphy)
return -ENOMEM;
ret = of_address_to_resource(node, 0, &res);
if (ret) {
dev_err(dsi->dev, "phy: Couldn't get our resources\n");
return ret;
}
regs = devm_ioremap_resource(dsi->dev, &res);
if (IS_ERR(regs)) {
dev_err(dsi->dev, "Couldn't map the DPHY encoder registers\n");
return PTR_ERR(regs);
}
dphy->regs = devm_regmap_init_mmio(dsi->dev, regs,
&sun6i_dphy_regmap_config);
if (IS_ERR(dphy->regs)) {
dev_err(dsi->dev, "Couldn't create the DPHY encoder regmap\n");
return PTR_ERR(dphy->regs);
}
dphy->reset = of_reset_control_get_shared(node, NULL);
if (IS_ERR(dphy->reset)) {
dev_err(dsi->dev, "Couldn't get our reset line\n");
return PTR_ERR(dphy->reset);
}
dphy->bus_clk = of_clk_get_by_name(node, "bus");
if (IS_ERR(dphy->bus_clk)) {
dev_err(dsi->dev, "Couldn't get the DPHY bus clock\n");
ret = PTR_ERR(dphy->bus_clk);
goto err_free_reset;
}
regmap_mmio_attach_clk(dphy->regs, dphy->bus_clk);
dphy->mod_clk = of_clk_get_by_name(node, "mod");
if (IS_ERR(dphy->mod_clk)) {
dev_err(dsi->dev, "Couldn't get the DPHY mod clock\n");
ret = PTR_ERR(dphy->mod_clk);
goto err_free_bus;
}
dsi->dphy = dphy;
return 0;
err_free_bus:
regmap_mmio_detach_clk(dphy->regs);
clk_put(dphy->bus_clk);
err_free_reset:
reset_control_put(dphy->reset);
return ret;
}
int sun6i_dphy_remove(struct sun6i_dsi *dsi)
{
struct sun6i_dphy *dphy = dsi->dphy;
regmap_mmio_detach_clk(dphy->regs);
clk_put(dphy->mod_clk);
clk_put(dphy->bus_clk);
reset_control_put(dphy->reset);
return 0;
}
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin>");
MODULE_DESCRIPTION("Allwinner A31 MIPI D-PHY Driver");
MODULE_LICENSE("GPL");

View File

@ -1,5 +1,8 @@
TODO:
-Get a full review from the drm-maintainers on dri-devel done on this driver
-Drop all the logic around initial_mode_queried, the master_set and
master_drop callbacks and everything related to this. kms clients can handle
hotplugs.
-Extend this TODO with the results of that review
Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,

View File

@ -7,11 +7,15 @@
* Michael Thayer <michael.thayer@oracle.com,
* Hans de Goede <hdegoede@redhat.com>
*/
#include <linux/module.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vt_kern.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include "vbox_drv.h"
@ -222,7 +226,6 @@ static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
.dev_priv_size = 0,
.lastclose = drm_fb_helper_lastclose,
.master_set = vbox_master_set,

View File

@ -6,20 +6,22 @@
* Authors: Dave Airlie <airlied@redhat.com>
* Michael Thayer <michael.thayer@oracle.com,
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include "vbox_drv.h"
#include "vboxvideo.h"

View File

@ -10,14 +10,17 @@
* Hans de Goede <hdegoede@redhat.com>
*/
#include <linux/export.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "hgsmi_channels.h"
#include "vbox_drv.h"
#include "vboxvideo.h"
#include "hgsmi_channels.h"
/*
* Set a graphics mode. Poke any required values into registers, do an HGSMI

View File

@ -192,7 +192,7 @@ struct drm_private_state;
* private objects. The structure itself is used as a vtable to identify the
* associated private object type. Each private object type that needs to be
* added to the atomic states is expected to have an implementation of these
* hooks and pass a pointer to it's drm_private_state_funcs struct to
* hooks and pass a pointer to its drm_private_state_funcs struct to
* drm_atomic_get_private_obj_state().
*/
struct drm_private_state_funcs {
@ -329,6 +329,15 @@ struct drm_atomic_state {
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
bool async_update : 1;
/**
* @duplicated:
*
* Indicates whether or not this atomic state was duplicated using
* drm_atomic_helper_duplicate_state(). Drivers and atomic helpers
* should use this to fixup normal inconsistencies in duplicated
* states.
*/
bool duplicated : 1;
struct __drm_planes_state *planes;
struct __drm_crtcs_state *crtcs;
int num_connector;

View File

@ -47,6 +47,24 @@ static inline bool drm_arch_can_wc_memory(void)
return false;
#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
return false;
#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
/*
* The DRM driver stack is designed to work with cache coherent devices
* only, but permits an optimization to be enabled in some cases, where
* for some buffers, both the CPU and the GPU use uncached mappings,
* removing the need for DMA snooping and allocation in the CPU caches.
*
* The use of uncached GPU mappings relies on the correct implementation
* of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
* will use cached mappings nonetheless. On x86 platforms, this does not
* seem to matter, as uncached CPU mappings will snoop the caches in any
* case. However, on ARM and arm64, enabling this optimization on a
* platform where NoSnoop is ignored results in loss of coherency, which
* breaks correct operation of the device. Since we have no way of
* detecting whether NoSnoop works or not, just disable this
* optimization entirely for ARM and arm64.
*/
return false;
#else
return true;
#endif

View File

@ -26,7 +26,7 @@ struct drm_client_funcs {
* @unregister:
*
* Called when &drm_device is unregistered. The client should respond by
* releasing it's resources using drm_client_release().
* releasing its resources using drm_client_release().
*
* This callback is optional.
*/

View File

@ -912,7 +912,7 @@ struct drm_connector {
/**
* @ycbcr_420_allowed : This bool indicates if this connector is
* capable of handling YCBCR 420 output. While parsing the EDID
* blocks, its very helpful to know, if the source is capable of
* blocks it's very helpful to know if the source is capable of
* handling YCBCR 420 outputs.
*/
bool ycbcr_420_allowed;

View File

@ -1052,11 +1052,18 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw);
#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
/* 0x80+ CEA-861 infoframe types */
/**
* struct dp_sdp_header - DP secondary data packet header
* @HB0: Secondary Data Packet ID
* @HB1: Secondary Data Packet Type
* @HB2: Secondary Data Packet Specific header, Byte 0
* @HB3: Secondary Data packet Specific header, Byte 1
*/
struct dp_sdp_header {
u8 HB0; /* Secondary Data Packet ID */
u8 HB1; /* Secondary Data Packet Type */
u8 HB2; /* Secondary Data Packet Specific header, Byte 0 */
u8 HB3; /* Secondary Data packet Specific header, Byte 1 */
u8 HB0;
u8 HB1;
u8 HB2;
u8 HB3;
} __packed;
#define EDP_SDP_HEADER_REVISION_MASK 0x1F

View File

@ -44,111 +44,231 @@
#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0
#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13
/* Configuration for a single Rate Control model range */
/**
* struct drm_dsc_rc_range_parameters - DSC Rate Control range parameters
*
* This defines different rate control parameters used by the DSC engine
* to compress the frame.
*/
struct drm_dsc_rc_range_parameters {
/* Min Quantization Parameters allowed for this range */
/**
* @range_min_qp: Min Quantization Parameters allowed for this range
*/
u8 range_min_qp;
/* Max Quantization Parameters allowed for this range */
/**
* @range_max_qp: Max Quantization Parameters allowed for this range
*/
u8 range_max_qp;
/* Bits/group offset to apply to target for this group */
/**
* @range_bpg_offset:
* Bits/group offset to apply to target for this group
*/
u8 range_bpg_offset;
};
/**
* struct drm_dsc_config - Parameters required to configure DSC
*
* Driver populates this structure with all the parameters required
* to configure the display stream compression on the source.
*/
struct drm_dsc_config {
/* Bits / component for previous reconstructed line buffer */
/**
* @line_buf_depth:
* Bits per component for previous reconstructed line buffer
*/
u8 line_buf_depth;
/* Bits per component to code (must be 8, 10, or 12) */
/**
* @bits_per_component: Bits per component to code (8/10/12)
*/
u8 bits_per_component;
/*
* Flag indicating to do RGB - YCoCg conversion
* and back (should be 1 for RGB input)
/**
* @convert_rgb:
* Flag to indicate if RGB - YCoCg conversion is needed
* True if RGB input, False if YCoCg input
*/
bool convert_rgb;
/**
* @slice_count: Number fo slices per line used by the DSC encoder
*/
u8 slice_count;
/* Slice Width */
/**
* @slice_width: Width of each slice in pixels
*/
u16 slice_width;
/* Slice Height */
/**
* @slice_height: Slice height in pixels
*/
u16 slice_height;
/*
* 4:2:2 enable mode (from PPS, 4:2:2 conversion happens
* outside of DSC encode/decode algorithm)
/**
* @enable422: True for 4_2_2 sampling, false for 4_4_4 sampling
*/
bool enable422;
/* Picture Width */
/**
* @pic_width: Width of the input display frame in pixels
*/
u16 pic_width;
/* Picture Height */
/**
* @pic_height: Vertical height of the input display frame
*/
u16 pic_height;
/* Offset to bits/group used by RC to determine QP adjustment */
/**
* @rc_tgt_offset_high:
* Offset to bits/group used by RC to determine QP adjustment
*/
u8 rc_tgt_offset_high;
/* Offset to bits/group used by RC to determine QP adjustment */
/**
* @rc_tgt_offset_low:
* Offset to bits/group used by RC to determine QP adjustment
*/
u8 rc_tgt_offset_low;
/* Bits/pixel target << 4 (ie., 4 fractional bits) */
/**
* @bits_per_pixel:
* Target bits per pixel with 4 fractional bits, bits_per_pixel << 4
*/
u16 bits_per_pixel;
/*
* Factor to determine if an edge is present based
* on the bits produced
/**
* @rc_edge_factor:
* Factor to determine if an edge is present based on the bits produced
*/
u8 rc_edge_factor;
/* Slow down incrementing once the range reaches this value */
/**
* @rc_quant_incr_limit1:
* Slow down incrementing once the range reaches this value
*/
u8 rc_quant_incr_limit1;
/* Slow down incrementing once the range reaches this value */
/**
* @rc_quant_incr_limit0:
* Slow down incrementing once the range reaches this value
*/
u8 rc_quant_incr_limit0;
/* Number of pixels to delay the initial transmission */
/**
* @initial_xmit_delay:
* Number of pixels to delay the initial transmission
*/
u16 initial_xmit_delay;
/* Number of pixels to delay the VLD on the decoder,not including SSM */
/**
* @initial_dec_delay:
* Initial decoder delay, number of pixel times that the decoder
* accumulates data in its rate buffer before starting to decode
* and output pixels.
*/
u16 initial_dec_delay;
/* Block prediction enable */
/**
* @block_pred_enable:
* True if block prediction is used to code any groups within the
* picture. False if BP not used
*/
bool block_pred_enable;
/* Bits/group offset to use for first line of the slice */
/**
* @first_line_bpg_offset:
* Number of additional bits allocated for each group on the first
* line of slice.
*/
u8 first_line_bpg_offset;
/* Value to use for RC model offset at slice start */
/**
* @initial_offset: Value to use for RC model offset at slice start
*/
u16 initial_offset;
/* Thresholds defining each of the buffer ranges */
/**
* @rc_buf_thresh: Thresholds defining each of the buffer ranges
*/
u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
/* Parameters for each of the RC ranges */
/**
* @rc_range_params:
* Parameters for each of the RC ranges defined in
* &struct drm_dsc_rc_range_parameters
*/
struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
/* Total size of RC model */
/**
* @rc_model_size: Total size of RC model
*/
u16 rc_model_size;
/* Minimum QP where flatness information is sent */
/**
* @flatness_min_qp: Minimum QP where flatness information is sent
*/
u8 flatness_min_qp;
/* Maximum QP where flatness information is sent */
/**
* @flatness_max_qp: Maximum QP where flatness information is sent
*/
u8 flatness_max_qp;
/* Initial value for scale factor */
/**
* @initial_scale_value: Initial value for the scale factor
*/
u8 initial_scale_value;
/* Decrement scale factor every scale_decrement_interval groups */
/**
* @scale_decrement_interval:
* Specifies number of group times between decrementing the scale factor
* at beginning of a slice.
*/
u16 scale_decrement_interval;
/* Increment scale factor every scale_increment_interval groups */
/**
* @scale_increment_interval:
* Number of group times between incrementing the scale factor value
* used at the beginning of a slice.
*/
u16 scale_increment_interval;
/* Non-first line BPG offset to use */
/**
* @nfl_bpg_offset: Non first line BPG offset to be used
*/
u16 nfl_bpg_offset;
/* BPG offset used to enforce slice bit */
/**
* @slice_bpg_offset: BPG offset used to enforce slice bit
*/
u16 slice_bpg_offset;
/* Final RC linear transformation offset value */
/**
* @final_offset: Final RC linear transformation offset value
*/
u16 final_offset;
/* Enable on-off VBR (ie., disable stuffing bits) */
/**
* @vbr_enable: True if VBR mode is enabled, false if disabled
*/
bool vbr_enable;
/* Mux word size (in bits) for SSM mode */
/**
* @mux_word_size: Mux word size (in bits) for SSM mode
*/
u8 mux_word_size;
/*
* The (max) size in bytes of the "chunks" that are
* used in slice multiplexing
/**
* @slice_chunk_size:
* The (max) size in bytes of the "chunks" that are used in slice
* multiplexing.
*/
u16 slice_chunk_size;
/* Rate Control buffer siz in bits */
/**
* @rc_bits: Rate control buffer size in bits
*/
u16 rc_bits;
/* DSC Minor Version */
/**
* @dsc_version_minor: DSC minor version
*/
u8 dsc_version_minor;
/* DSC Major version */
/**
* @dsc_version_major: DSC major version
*/
u8 dsc_version_major;
/* Native 4:2:2 support */
/**
* @native_422: True if Native 4:2:2 supported, else false
*/
bool native_422;
/* Native 4:2:0 support */
/**
* @native_420: True if Native 4:2:0 supported else false.
*/
bool native_420;
/* Additional bits/grp for seconnd line of slice for native 4:2:0 */
/**
* @second_line_bpg_offset:
* Additional bits/grp for seconnd line of slice for native 4:2:0
*/
u8 second_line_bpg_offset;
/* Num of bits deallocated for each grp that is not in second line of slice */
/**
* @nsl_bpg_offset:
* Num of bits deallocated for each grp that is not in second line of
* slice
*/
u16 nsl_bpg_offset;
/* Offset adj fr second line in Native 4:2:0 mode */
/**
* @second_line_offset_adj:
* Offset adjustment for second line in Native 4:2:0 mode
*/
u16 second_line_offset_adj;
};
@ -468,10 +588,13 @@ struct drm_dsc_picture_parameter_set {
* This structure represents the DSC PPS infoframe required to send the Picture
* Parameter Set metadata required before enabling VESA Display Stream
* Compression. This is based on the DP Secondary Data Packet structure and
* comprises of SDP Header as defined in drm_dp_helper.h and PPS payload.
* comprises of SDP Header as defined &struct struct dp_sdp_header in drm_dp_helper.h
* and PPS payload defined in &struct drm_dsc_picture_parameter_set.
*
* @pps_header: Header for PPS as per DP SDP header format
* @pps_header: Header for PPS as per DP SDP header format of type
* &struct dp_sdp_header
* @pps_payload: PPS payload fields as per DSC specification Table 4-1
* as represented in &struct drm_dsc_picture_parameter_set
*/
struct drm_dsc_pps_infoframe {
struct dp_sdp_header pps_header;

View File

@ -361,7 +361,7 @@ struct drm_mode_config {
*
* This is the big scary modeset BKL which protects everything that
* isn't protect otherwise. Scope is unclear and fuzzy, try to remove
* anything from under it's protection and move it into more well-scoped
* anything from under its protection and move it into more well-scoped
* locks.
*
* The one important thing this protects is the use of @acquire_ctx.

View File

@ -23,7 +23,11 @@
#ifndef __DRM_KMS_HELPER_H__
#define __DRM_KMS_HELPER_H__
#include <drm/drmP.h>
struct drm_crtc;
struct drm_crtc_funcs;
struct drm_device;
struct drm_framebuffer;
struct drm_mode_fb_cmd2;
void drm_helper_move_panel_connectors_to_head(struct drm_device *);

View File

@ -1013,7 +1013,7 @@ struct drm_plane_helper_funcs {
* @prepare_fb:
*
* This hook is to prepare a framebuffer for scanout by e.g. pinning
* it's backing storage or relocating it into a contiguous block of
* its backing storage or relocating it into a contiguous block of
* VRAM. Other possible preparatory work includes flushing caches.
*
* This function must not block for outstanding rendering, since it is

View File

@ -68,7 +68,7 @@ struct drm_modeset_acquire_ctx {
/**
* struct drm_modeset_lock - used for locking modeset resources.
* @mutex: resource locking
* @head: used to hold it's place on &drm_atomi_state.locked list when
* @head: used to hold its place on &drm_atomi_state.locked list when
* part of an atomic update
*
* Used for locking CRTCs and other modeset resources.

View File

@ -182,12 +182,6 @@ int drm_rect_calc_hscale(const struct drm_rect *src,
int drm_rect_calc_vscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_vscale, int max_vscale);
int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_hscale, int max_hscale);
int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_vscale, int max_vscale);
void drm_rect_debug_print(const char *prefix,
const struct drm_rect *r, bool fixed_point);
void drm_rect_rotate(struct drm_rect *r,

View File

@ -876,7 +876,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Pipelined gutting a BO of it's backing store.
* Pipelined gutting a BO of its backing store.
*/
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);

View File

@ -195,6 +195,27 @@ extern "C" {
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
*/
#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel */
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [12:4] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian
*/
#define DRM_FORMAT_P012 fourcc_code('P', '0', '1', '2') /* 2x2 subsampled Cr:Cb plane 12 bits per channel */
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y little endian
* index 1 = Cr:Cb plane, [31:0] Cr:Cb [16:16] little endian
*/
#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y