drm fixes for 5.9-rc2

amdgpu:
 - Fix allocation size
 - SR-IOV fixes
 - Vega20 SMU feature state caching fix
 - Fix custom pptable handling
 - Arcturus golden settings update
 - Several display fixes
 - Fixes for Navy Flounder
 - Misc display fixes
 - RAS fix
 
 amdkfd:
 - SDMA fix for renoir
 
 i915:
 - Fix device parameter usage for selftest mock i915 device
 - Fix LPSP capability debugfs NULL dereference
 - Fix buddy register pagemask table
 - Fix intel_atomic_check() non-negative return value
 - Fix selftests passing a random 0 into ilog2()
 - Fix TGL power well enable/disable ordering
 - Switch to PMU module refcounting
 - GVT fixes
 
 virtio:
 - Add missing dma_fence_put() in virtio_gpu_execbuffer_ioctl().
 - Fix memory leak in virtio_gpu_cleanup_object().
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJfPyfDAAoJEAx081l5xIa+FZMP/1I13j/J/uaxThFuAc8m0By5
 wvDLu4EdzV1zIXmAT1m/bUzvXVxsCgg6XSQjZEQ4nK0SqEN5dU8g/Kg+u0E3ojJc
 g4A3XJydrQ+CSkiuP51QenRXZPMdj3rAIKXYelb7UylSdw0tPKdBP0ISXTwQUZcS
 PSN6PPUiVTJHZ52EatO7yIDV2QGyV4h9qnKcGsyLIEBa567kClrQPqdUdLa+WBSf
 9uTVdulx7CQg5vO5qZCQYpEDbhToZQA2DTYx9m640D5OCP0M6XCXTQIgpK7sISdk
 N1XkqhfiWr5ivnXJBRdqTkXv3PAUrxGNVYfXvkK3+oP5Vz4yrM9tB8AyCorWXzps
 WibnqejHgYhG57uwo3APNg/1j4EDJDdq01pDl65TEz2YyDLHAV1FiYW98XveKL2k
 8uNqCmxFnnj4p9xWhsmNIm7dwkud3QxOs17vX7odzlLq63QX+8tTnjhAKw8aXFhC
 USJqmMNY5pI3kVX1jUHLGvPxakLngLWH2T+Bozk55Rm1f0JyMCY6ZSHPVaq48mqv
 2ZifBgBb12h6MKENZvHXbGUrK1p+Q+uo4ueXvpQs1vAMNx1kQ0hJwoCJqNq+WxQN
 /P9XtQJUJ6jg/w1PSMNA3hipg4jtqy511pf9+jrdDOHbIYhIxEK3F1cmjasMjBya
 +zwVzm3p5u+TKuw+z4wF
 =eivO
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2020-08-21' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular fixes pull for rc2. Usual rc2 doesn't seem too busy, mainly
  i915 and amdgpu. I'd expect the usual uptick for rc3.

  amdgpu:
   - Fix allocation size
   - SR-IOV fixes
   - Vega20 SMU feature state caching fix
   - Fix custom pptable handling
   - Arcturus golden settings update
   - Several display fixes
   - Fixes for Navy Flounder
   - Misc display fixes
   - RAS fix

  amdkfd:
   - SDMA fix for renoir

  i915:
   - Fix device parameter usage for selftest mock i915 device
   - Fix LPSP capability debugfs NULL dereference
   - Fix buddy register pagemask table
   - Fix intel_atomic_check() non-negative return value
   - Fix selftests passing a random 0 into ilog2()
   - Fix TGL power well enable/disable ordering
   - Switch to PMU module refcounting
   - GVT fixes

  virtio:
   - Add missing dma_fence_put() in virtio_gpu_execbuffer_ioctl()
   - Fix memory leak in virtio_gpu_cleanup_object()"

* tag 'drm-fixes-2020-08-21' of git://anongit.freedesktop.org/drm/drm: (34 commits)
  Revert "drm/amdgpu: disable gfxoff for navy_flounder"
  drm/i915/tgl: Make sure TC-cold is blocked before enabling TC AUX power wells
  drm/i915/selftests: Avoid passing a random 0 into ilog2
  drm/i915: Fix wrong return value in intel_atomic_check()
  drm/i915: Update bw_buddy pagemask table
  drm/i915/display: Check for an LPSP encoder before dereferencing
  drm/i915: Copy default modparams to mock i915_device
  drm/i915: Provide the perf pmu.module
  drm/amd/display: fix pow() crashing when given base 0
  drm/amd/display: Reset scrambling on Test Pattern
  drm/amd/display: fix dcn3 wide timing dsc validation
  drm/amd/display: Fix DFPstate hang due to view port changed
  drm/amd/display: Assign correct left shift
  drm/amd/display: Call DMUB for eDP power control
  drm/amdkfd: fix the wrong sdma instance query for renoir
  drm/amdgpu: parse ta firmware for navy_flounder
  drm/amdgpu: fix NULL pointer access issue when unloading driver
  drm/amdgpu: fix uninit-value in arcturus_log_thermal_throttling_event()
  drm/amdgpu: disable gfxoff for navy_flounder
  drm/amdgpu/display: use GFP_ATOMIC in dcn20_validate_bandwidth_internal
  ...
This commit is contained in:
Linus Torvalds 2020-08-21 10:02:44 -07:00
commit 43d387a4ad
41 changed files with 319 additions and 96 deletions

View File

@ -195,19 +195,32 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
};
uint32_t retval = sdma_engine_reg_base[engine_id]
uint32_t sdma_engine_reg_base = 0;
uint32_t sdma_rlc_reg_offset;
switch (engine_id) {
default:
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 1:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
}
sdma_rlc_reg_offset = sdma_engine_reg_base
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
queue_id, retval);
queue_id, sdma_rlc_reg_offset);
return retval;
return sdma_rlc_reg_offset;
}
static inline struct v9_mqd *get_mqd(void *mqd)

View File

@ -1243,7 +1243,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
if (!obj || !obj->ent)
return;
debugfs_remove(obj->ent);
obj->ent = NULL;
put_obj(obj);
}
@ -1257,7 +1256,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
debugfs_remove_recursive(con->dir);
con->dir = NULL;
}
/* debugfs end */

View File

@ -462,7 +462,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
unsigned int pages;
int i, r;
*sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
if (!*sgt)
return -ENOMEM;

View File

@ -691,6 +691,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
};
static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {

View File

@ -135,6 +135,12 @@ static void gfxhub_v2_1_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@ -190,6 +196,12 @@ static void gfxhub_v2_1_enable_system_domain(struct amdgpu_device *adev)
static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev)
{
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
@ -326,6 +338,13 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);

View File

@ -134,6 +134,12 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@ -189,6 +195,12 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
{
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
WREG32_SOC15(MMHUB, 0,
mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
@ -318,6 +330,13 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);

View File

@ -179,12 +179,11 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
}
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
err = psp_init_ta_microcode(&adev->psp, chip_name);
if (err)
return err;
break;
case CHIP_NAVY_FLOUNDER:
break;
default:
BUG();
}

View File

@ -2196,6 +2196,7 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,

View File

@ -1108,6 +1108,18 @@ static enum bp_result bios_parser_enable_disp_power_gating(
action);
}
static enum bp_result bios_parser_enable_lvtma_control(
struct dc_bios *dcb,
uint8_t uc_pwr_on)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
if (!bp->cmd_tbl.enable_lvtma_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on);
}
static bool bios_parser_is_accelerated_mode(
struct dc_bios *dcb)
{
@ -2208,7 +2220,9 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_board_layout_info = bios_get_board_layout_info,
.pack_data_tables = bios_parser_pack_data_tables,
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
.enable_lvtma_control = bios_parser_enable_lvtma_control
};
static bool bios_parser2_construct(

View File

@ -904,6 +904,33 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
return 0;
}
/******************************************************************************
******************************************************************************
**
** LVTMA CONTROL
**
******************************************************************************
*****************************************************************************/
static enum bp_result enable_lvtma_control(
struct bios_parser *bp,
uint8_t uc_pwr_on);
static void init_enable_lvtma_control(struct bios_parser *bp)
{
/* TODO add switch for table vrsion */
bp->cmd_tbl.enable_lvtma_control = enable_lvtma_control;
}
static enum bp_result enable_lvtma_control(
struct bios_parser *bp,
uint8_t uc_pwr_on)
{
enum bp_result result = BP_RESULT_FAILURE;
return result;
}
void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
{
init_dig_encoder_control(bp);
@ -919,4 +946,5 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
init_set_dce_clock(bp);
init_get_smu_clock_info(bp);
init_enable_lvtma_control(bp);
}

View File

@ -94,7 +94,8 @@ struct cmd_tbl {
struct bp_set_dce_clock_parameters *bp_params);
unsigned int (*get_smu_clock_info)(
struct bios_parser *bp, uint8_t id);
enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
uint8_t uc_pwr_on);
};
void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);

View File

@ -3286,12 +3286,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
core_link_set_avmute(pipe_ctx, true);
}
dc->hwss.blank_stream(pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, true);
#endif
dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);

View File

@ -136,6 +136,10 @@ struct dc_vbios_funcs {
enum bp_result (*get_atom_dc_golden_table)(
struct dc_bios *dcb);
enum bp_result (*enable_lvtma_control)(
struct dc_bios *bios,
uint8_t uc_pwr_on);
};
struct bios_registers {

View File

@ -49,7 +49,7 @@
#define DCN_PANEL_CNTL_REG_LIST()\
DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
SR(BL_PWM_CNTL), \
SR(BL_PWM_CNTL2), \
SR(BL_PWM_PERIOD_CNTL), \

View File

@ -842,6 +842,17 @@ void dce110_edp_power_control(
cntl.coherent = false;
cntl.lanes_number = LANE_COUNT_FOUR;
cntl.hpd_sel = link->link_enc->hpd_source;
if (ctx->dc->ctx->dmub_srv &&
ctx->dc->debug.dmub_command_table) {
if (cntl.action == TRANSMITTER_CONTROL_POWER_ON)
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_POWER_ON);
else
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_POWER_OFF);
}
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
if (!power_up)
@ -919,8 +930,21 @@ void dce110_edp_backlight_control(
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
edp_receiver_ready_T7(link);
if (ctx->dc->ctx->dmub_srv &&
ctx->dc->debug.dmub_command_table) {
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_LCD_BLON);
else
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_LCD_BLOFF);
}
link_transmitter_control(ctx->dc_bios, &cntl);
if (enable && link->dpcd_sink_ext_caps.bits.oled)
msleep(OLED_POST_T7_DELAY);

View File

@ -121,35 +121,35 @@ void enc1_update_generic_info_packet(
switch (packet_index) {
case 0:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC0_FRAME_UPDATE, 1);
AFMT_GENERIC0_IMMEDIATE_UPDATE, 1);
break;
case 1:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC1_FRAME_UPDATE, 1);
AFMT_GENERIC1_IMMEDIATE_UPDATE, 1);
break;
case 2:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC2_FRAME_UPDATE, 1);
AFMT_GENERIC2_IMMEDIATE_UPDATE, 1);
break;
case 3:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC3_FRAME_UPDATE, 1);
AFMT_GENERIC3_IMMEDIATE_UPDATE, 1);
break;
case 4:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC4_FRAME_UPDATE, 1);
AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
break;
case 5:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC5_FRAME_UPDATE, 1);
AFMT_GENERIC5_IMMEDIATE_UPDATE, 1);
break;
case 6:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC6_FRAME_UPDATE, 1);
AFMT_GENERIC6_IMMEDIATE_UPDATE, 1);
break;
case 7:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
AFMT_GENERIC7_FRAME_UPDATE, 1);
AFMT_GENERIC7_IMMEDIATE_UPDATE, 1);
break;
default:
break;

View File

@ -281,7 +281,14 @@ struct dcn10_stream_enc_registers {
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
@ -345,7 +352,14 @@ struct dcn10_stream_enc_registers {
type AFMT_GENERIC2_FRAME_UPDATE;\
type AFMT_GENERIC3_FRAME_UPDATE;\
type AFMT_GENERIC4_FRAME_UPDATE;\
type AFMT_GENERIC0_IMMEDIATE_UPDATE;\
type AFMT_GENERIC1_IMMEDIATE_UPDATE;\
type AFMT_GENERIC2_IMMEDIATE_UPDATE;\
type AFMT_GENERIC3_IMMEDIATE_UPDATE;\
type AFMT_GENERIC4_IMMEDIATE_UPDATE;\
type AFMT_GENERIC5_IMMEDIATE_UPDATE;\
type AFMT_GENERIC6_IMMEDIATE_UPDATE;\
type AFMT_GENERIC7_IMMEDIATE_UPDATE;\
type AFMT_GENERIC5_FRAME_UPDATE;\
type AFMT_GENERIC6_FRAME_UPDATE;\
type AFMT_GENERIC7_FRAME_UPDATE;\

View File

@ -1457,8 +1457,8 @@ static void dcn20_update_dchubp_dpp(
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
&& pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);

View File

@ -167,7 +167,9 @@
LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
#define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\

View File

@ -3141,7 +3141,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();

View File

@ -61,7 +61,10 @@
DPCS_DCN2_MASK_SH_LIST(mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh)
LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
void dcn30_link_encoder_construct(
struct dcn20_link_encoder *enc20,

View File

@ -491,6 +491,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
[id] = {\
LE_DCN3_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
static const struct dce110_aux_registers_shift aux_shift = {

View File

@ -63,6 +63,7 @@ typedef struct {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN30_MAX_DSC_IMAGE_WIDTH 5184
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@ -3984,6 +3985,9 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
} else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else if (v->DSCEnabled[k] && (v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH)) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;

View File

@ -101,6 +101,13 @@ enum bp_pipe_control_action {
ASIC_PIPE_INIT
};
enum bp_lvtma_control_action {
LVTMA_CONTROL_LCD_BLOFF = 2,
LVTMA_CONTROL_LCD_BLON = 3,
LVTMA_CONTROL_POWER_ON = 12,
LVTMA_CONTROL_POWER_OFF = 13
};
struct bp_encoder_control {
enum bp_encoder_control_action action;
enum engine_id engine_id;

View File

@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
*/
static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
{
if (arg1.value == 0)
return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
return dc_fixpt_exp(
dc_fixpt_mul(
dc_fixpt_log(arg1),

View File

@ -324,22 +324,44 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
(delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
mid_point_frames_floor < 2)) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
} else {
if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
*/
frames_to_insert = mid_point_frames_floor;
} else if (mid_point_frames_floor < 2) {
/* Check if FLOOR would result in non-LFC. In this case
* choose to use CEIL
*/
frames_to_insert = mid_point_frames_ceil;
} else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
/* If choosing CEIL results in a frame duration that is
* closer to the mid point of the range.
* Choose CEIL
*/
frames_to_insert = mid_point_frames_ceil;
} else {
/* If choosing FLOOR results in a frame duration that is
* closer to the mid point of the range.
* Choose FLOOR
*/
frames_to_insert = mid_point_frames_floor;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
delta_from_mid_point_in_us_2;
}
/* Prefer current frame multiplier when BTR is enabled unless it drifts
* too far from the midpoint
*/
if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
} else {
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
delta_from_mid_point_in_us_2;
}
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <

View File

@ -2204,14 +2204,17 @@ static const struct throttling_logging_label {
};
static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
{
int ret;
int throttler_idx, throtting_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
arcturus_get_smu_metrics_data(smu,
METRICS_THROTTLER_STATUS,
&throttler_status);
ret = arcturus_get_smu_metrics_data(smu,
METRICS_THROTTLER_STATUS,
&throttler_status);
if (ret)
return;
memset(log_buf, 0, sizeof(log_buf));
for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);

View File

@ -979,10 +979,7 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint64_t features_enabled;
int i;
bool enabled;
int ret = 0;
int i, ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
@ -990,17 +987,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return ret);
ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret,
"[DisableAllSMUFeatures] Failed to get enabled smc features!",
return ret);
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
true : false;
data->smu_features[i].enabled = enabled;
data->smu_features[i].supported = enabled;
}
for (i = 0; i < GNLD_FEATURES_MAX; i++)
data->smu_features[i].enabled = 0;
return 0;
}
@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
data->uvd_power_gated = true;
data->vce_power_gated = true;
if (data->smu_features[GNLD_DPM_UVD].enabled)
data->uvd_power_gated = false;
if (data->smu_features[GNLD_DPM_VCE].enabled)
data->vce_power_gated = false;
}
static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
@ -3230,10 +3212,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
{
uint64_t features_enabled;
uint64_t features_to_enable;
uint64_t features_to_disable;
int ret = 0;
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint64_t features_enabled, features_to_enable, features_to_disable;
int i, ret = 0;
bool enabled;
if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
return -EINVAL;
@ -3262,6 +3245,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return ret;
}
/* Update the cached feature enablement state */
ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
if (ret)
return ret;
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
true : false;
data->smu_features[i].enabled = enabled;
}
return 0;
}

View File

@ -14930,7 +14930,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (any_ms && !check_digital_port_conflicts(state)) {
drm_dbg_kms(&dev_priv->drm,
"rejecting conflicting digital port configuration\n");
ret = EINVAL;
ret = -EINVAL;
goto fail;
}

View File

@ -2044,9 +2044,12 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_encoder *encoder =
intel_attached_encoder(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_encoder *encoder;
encoder = intel_attached_encoder(to_intel_connector(connector));
if (!encoder)
return -ENODEV;
if (connector->status != connector_status_connected)
return -ENODEV;

View File

@ -4146,6 +4146,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
},
},
{
.name = "TC cold off",
.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
.ops = &tgl_tc_cold_off_ops,
.id = DISP_PW_ID_NONE,
},
{
.name = "AUX A",
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
@ -4332,12 +4338,6 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.hsw.irq_pipe_mask = BIT(PIPE_D),
},
},
{
.name = "TC cold off",
.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
.ops = &tgl_tc_cold_off_ops,
.id = DISP_PW_ID_NONE,
},
};
static const struct i915_power_well_desc rkl_power_wells[] = {
@ -5240,10 +5240,10 @@ struct buddy_page_mask {
};
static const struct buddy_page_mask tgl_buddy_page_masks[] = {
{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
{}
};

View File

@ -70,6 +70,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
{
u8 *cfg_base = vgpu_cfg_space(vgpu);
u8 mask, new, old;
pci_power_t pwr;
int i = 0;
for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
@ -91,6 +92,15 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
/* For other configuration space directly copy as it is. */
if (i < bytes)
memcpy(cfg_base + off + i, src + i, bytes - i);
if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) {
pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off])
& PCI_PM_CTRL_STATE_MASK);
if (pwr == PCI_D3hot)
vgpu->d3_entered = true;
gvt_dbg_core("vgpu-%d power status changed to %d\n",
vgpu->id, pwr);
}
}
/**
@ -366,6 +376,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
u8 next;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
@ -401,6 +412,19 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
pci_resource_len(gvt->gt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
/* PM Support */
vgpu->cfg_space.pmcsr_off = 0;
if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) {
next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST];
do {
if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) {
vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL;
break;
}
next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT];
} while (next);
}
}
/**

View File

@ -2501,7 +2501,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
return create_scratch_page_tree(vgpu);
}
static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_mm *mm;

View File

@ -279,4 +279,6 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
#endif /* _GVT_GTT_H_ */

View File

@ -106,6 +106,7 @@ struct intel_vgpu_pci_bar {
struct intel_vgpu_cfg_space {
unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
u32 pmcsr_off;
};
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
@ -198,6 +199,8 @@ struct intel_vgpu {
struct intel_vgpu_submission submission;
struct radix_tree_root page_track_tree;
u32 hws_pga[I915_NUM_ENGINES];
/* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */
bool d3_entered;
struct dentry *debugfs;

View File

@ -257,6 +257,7 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
intel_gvt_deactivate_vgpu(vgpu);
mutex_lock(&vgpu->vgpu_lock);
vgpu->d3_entered = false;
intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&vgpu->vgpu_lock);
@ -393,6 +394,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary);
vgpu->d3_entered = false;
ret = intel_vgpu_init_mmio(vgpu);
if (ret)
@ -557,10 +559,15 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
intel_vgpu_invalidate_ppgtt(vgpu);
if (engine_mask == ALL_ENGINES)
intel_vgpu_invalidate_ppgtt(vgpu);
/*fence will not be reset during virtual reset */
if (dmlr) {
intel_vgpu_reset_gtt(vgpu);
if(!vgpu->d3_entered) {
intel_vgpu_invalidate_ppgtt(vgpu);
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
}
intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_reset_resource(vgpu);
}
@ -572,7 +579,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_cfg_space(vgpu);
/* only reset the failsafe mode when dmlr reset */
vgpu->failsafe = false;
vgpu->pv_notified = false;
/*
* PCI_D0 is set before dmlr, so reset d3_entered here
* after done using.
*/
if(vgpu->d3_entered)
vgpu->d3_entered = false;
else
vgpu->pv_notified = false;
}
}

View File

@ -445,8 +445,6 @@ static void i915_pmu_event_destroy(struct perf_event *event)
container_of(event->pmu, typeof(*i915), pmu.base);
drm_WARN_ON(&i915->drm, event->parent);
module_put(THIS_MODULE);
}
static int
@ -538,10 +536,8 @@ static int i915_pmu_event_init(struct perf_event *event)
if (ret)
return ret;
if (!event->parent) {
__module_get(THIS_MODULE);
if (!event->parent)
event->destroy = i915_pmu_event_destroy;
}
return 0;
}
@ -1130,6 +1126,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
if (!pmu->base.attr_groups)
goto err_attr;
pmu->base.module = THIS_MODULE;
pmu->base.task_ctx_nr = perf_invalid_context;
pmu->base.event_init = i915_pmu_event_init;
pmu->base.add = i915_pmu_event_add;

View File

@ -8,8 +8,6 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#define SZ_8G (1ULL << 33)
static void __igt_dump_block(struct i915_buddy_mm *mm,
struct i915_buddy_block *block,
bool buddy)
@ -281,18 +279,22 @@ static int igt_check_mm(struct i915_buddy_mm *mm)
static void igt_mm_config(u64 *size, u64 *chunk_size)
{
I915_RND_STATE(prng);
u64 s, ms;
u32 s, ms;
/* Nothing fancy, just try to get an interesting bit pattern */
prandom_seed_state(&prng, i915_selftest.random_seed);
s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
s = max(s & -ms, ms);
/* Let size be a random number of pages up to 8 GB (2M pages) */
s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
/* Let the chunk size be a random power of 2 less than size */
ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
/* Round size down to the chunk size */
s &= -ms;
*chunk_size = ms;
*size = s;
/* Convert from pages to bytes */
*chunk_size = (u64)ms << 12;
*size = (u64)s << 12;
}
static int igt_buddy_alloc_smoke(void *arg)

View File

@ -78,6 +78,7 @@ static void mock_device_release(struct drm_device *dev)
drm_mode_config_cleanup(&i915->drm);
out:
i915_params_free(&i915->params);
put_device(&i915->drm.pdev->dev);
i915->drm.pdev = NULL;
}
@ -165,6 +166,8 @@ struct drm_i915_private *mock_gem_device(void)
i915->drm.pdev = pdev;
drmm_add_final_kfree(&i915->drm, i915);
i915_params_copy(&i915->params, &i915_modparams);
intel_runtime_pm_init_early(&i915->runtime_pm);
/* Using the global GTT may ask questions about KMS users, so prepare */

View File

@ -179,6 +179,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, buflist, out_fence);
dma_fence_put(&out_fence->f);
virtio_gpu_notify(vgdev);
return 0;

View File

@ -79,6 +79,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
}
sg_free_table(shmem->pages);
kfree(shmem->pages);
shmem->pages = NULL;
drm_gem_shmem_unpin(&bo->base.base);
}