diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index 03f01e76add7..a8669330b456 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -124,6 +124,43 @@ [Insert diagram of typical DRM stack here] + + Style Guidelines + + For consistency this documentation uses American English. Abbreviations + are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so + on. To aid in reading, documentations make full use of the markup + characters kerneldoc provides: @parameter for function parameters, @member + for structure members, &structure to reference structures and + function() for functions. These all get automatically hyperlinked if + kerneldoc for the referenced objects exists. When referencing entries in + function vtables please use ->vfunc(). Note that kerneldoc does + not support referencing struct members directly, so please add a reference + to the vtable struct somewhere in the same paragraph or at least section. + + + Except in special situations (to separate locked from unlocked variants) + locking requirements for functions aren't documented in the kerneldoc. + Instead locking should be check at runtime using e.g. + WARN_ON(!mutex_is_locked(...));. Since it's much easier to + ignore documentation than runtime noise this provides more value. And on + top of that runtime checks do need to be updated when the locking rules + change, increasing the chances that they're correct. Within the + documentation the locking rules should be explained in the relevant + structures: Either in the comment for the lock explaining what it + protects, or data fields need a note about which lock protects them, or + both. + + + Functions which have a non-void return value should have a + section called "Returns" explaining the expected return values in + different cases and their meanings. Currently there's no consensus whether + that section name should be all upper-case or not, and whether it should + end in a colon or not. Go with the file-local style. Other common section + names are "Notes" with information for dangerous or tricky corner cases, + and "FIXME" where the interface could be cleaned up. + + @@ -946,12 +983,10 @@ int max_width, max_height; Atomic Mode Setting Function Reference !Edrivers/gpu/drm/drm_atomic.c +!Idrivers/gpu/drm/drm_atomic.c - Frame Buffer Creation - struct drm_framebuffer *(*fb_create)(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd); + Frame Buffer Abstraction Frame buffers are abstract memory objects that provide a source of pixels to scanout to a CRTC. Applications explicitly request the @@ -969,73 +1004,6 @@ int max_width, max_height; handles, e.g. vmwgfx directly exposes special TTM handles to userspace and so expects TTM handles in the create ioctl and not GEM handles. - - Drivers must first validate the requested frame buffer parameters passed - through the mode_cmd argument. In particular this is where invalid - sizes, pixel formats or pitches can be caught. - - - If the parameters are deemed valid, drivers then create, initialize and - return an instance of struct drm_framebuffer. - If desired the instance can be embedded in a larger driver-specific - structure. Drivers must fill its width, - height, pitches, - offsets, depth, - bits_per_pixel and - pixel_format fields from the values passed - through the drm_mode_fb_cmd2 argument. They - should call the drm_helper_mode_fill_fb_struct - helper function to do so. - - - - The initialization of the new framebuffer instance is finalized with a - call to drm_framebuffer_init which takes a pointer - to DRM frame buffer operations (struct - drm_framebuffer_funcs). Note that this function - publishes the framebuffer and so from this point on it can be accessed - concurrently from other threads. Hence it must be the last step in the - driver's framebuffer initialization sequence. Frame buffer operations - are - - - int (*create_handle)(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned int *handle); - - Create a handle to the frame buffer underlying memory object. If - the frame buffer uses a multi-plane format, the handle will - reference the memory object associated with the first plane. - - - Drivers call drm_gem_handle_create to create - the handle. - - - - void (*destroy)(struct drm_framebuffer *framebuffer); - - Destroy the frame buffer object and frees all associated - resources. Drivers must call - drm_framebuffer_cleanup to free resources - allocated by the DRM core for the frame buffer object, and must - make sure to unreference all memory objects associated with the - frame buffer. Handles created by the - create_handle operation are released by - the DRM core. - - - - int (*dirty)(struct drm_framebuffer *framebuffer, - struct drm_file *file_priv, unsigned flags, unsigned color, - struct drm_clip_rect *clips, unsigned num_clips); - - This optional operation notifies the driver that a region of the - frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB - ioctl call. - - - - The lifetime of a drm framebuffer is controlled with a reference count, drivers can grab additional references with @@ -1173,137 +1141,6 @@ int max_width, max_height; pointer to CRTC functions. - - CRTC Operations - - Set Configuration - int (*set_config)(struct drm_mode_set *set); - - Apply a new CRTC configuration to the device. The configuration - specifies a CRTC, a frame buffer to scan out from, a (x,y) position in - the frame buffer, a display mode and an array of connectors to drive - with the CRTC if possible. - - - If the frame buffer specified in the configuration is NULL, the driver - must detach all encoders connected to the CRTC and all connectors - attached to those encoders and disable them. - - - This operation is called with the mode config lock held. - - - Note that the drm core has no notion of restoring the mode setting - state after resume, since all resume handling is in the full - responsibility of the driver. The common mode setting helper library - though provides a helper which can be used for this: - drm_helper_resume_force_mode. - - - - Page Flipping - int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event); - - Schedule a page flip to the given frame buffer for the CRTC. This - operation is called with the mode config mutex held. - - - Page flipping is a synchronization mechanism that replaces the frame - buffer being scanned out by the CRTC with a new frame buffer during - vertical blanking, avoiding tearing. When an application requests a page - flip the DRM core verifies that the new frame buffer is large enough to - be scanned out by the CRTC in the currently configured mode and then - calls the CRTC page_flip operation with a - pointer to the new frame buffer. - - - The page_flip operation schedules a page flip. - Once any pending rendering targeting the new frame buffer has - completed, the CRTC will be reprogrammed to display that frame buffer - after the next vertical refresh. The operation must return immediately - without waiting for rendering or page flip to complete and must block - any new rendering to the frame buffer until the page flip completes. - - - If a page flip can be successfully scheduled the driver must set the - drm_crtc->fb field to the new framebuffer pointed to - by fb. This is important so that the reference counting - on framebuffers stays balanced. - - - If a page flip is already pending, the - page_flip operation must return - -EBUSY. - - - To synchronize page flip to vertical blanking the driver will likely - need to enable vertical blanking interrupts. It should call - drm_vblank_get for that purpose, and call - drm_vblank_put after the page flip completes. - - - If the application has requested to be notified when page flip completes - the page_flip operation will be called with a - non-NULL event argument pointing to a - drm_pending_vblank_event instance. Upon page - flip completion the driver must call drm_send_vblank_event - to fill in the event and send to wake up any waiting processes. - This can be performed with - event_lock, flags); - ... - drm_send_vblank_event(dev, pipe, event); - spin_unlock_irqrestore(&dev->event_lock, flags); - ]]> - - - FIXME: Could drivers that don't need to wait for rendering to complete - just add the event to dev->vblank_event_list and - let the DRM core handle everything, as for "normal" vertical blanking - events? - - - While waiting for the page flip to complete, the - event->base.link list head can be used freely by - the driver to store the pending event in a driver-specific list. - - - If the file handle is closed before the event is signaled, drivers must - take care to destroy the event in their - preclose operation (and, if needed, call - drm_vblank_put). - - - - Miscellaneous - - - void (*set_property)(struct drm_crtc *crtc, - struct drm_property *property, uint64_t value); - - Set the value of the given CRTC property to - value. See - for more information about properties. - - - - void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size); - - Apply a gamma table to the device. The operation is optional. - - - - void (*destroy)(struct drm_crtc *crtc); - - Destroy the CRTC when not needed anymore. See - . - - - - - Planes (struct <structname>drm_plane</structname>) @@ -1320,7 +1157,7 @@ int max_width, max_height; DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary planes are the planes operated upon by CRTC modesetting and flipping - operations described in . + operations described in the page_flip hook in drm_crtc_funcs. DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC. Cursor @@ -1357,52 +1194,6 @@ int max_width, max_height; primary plane with standard capabilities. - - Plane Operations - - - int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h); - - Enable and configure the plane to use the given CRTC and frame buffer. - - - The source rectangle in frame buffer memory coordinates is given by - the src_x, src_y, - src_w and src_h - parameters (as 16.16 fixed point values). Devices that don't support - subpixel plane coordinates can ignore the fractional part. - - - The destination rectangle in CRTC coordinates is given by the - crtc_x, crtc_y, - crtc_w and crtc_h - parameters (as integer values). Devices scale the source rectangle to - the destination rectangle. If scaling is not supported, and the source - rectangle size doesn't match the destination rectangle size, the - driver must return a -EINVAL error. - - - - int (*disable_plane)(struct drm_plane *plane); - - Disable the plane. The DRM core calls this method in response to a - DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0. - Disabled planes must not be processed by the CRTC. - - - - void (*destroy)(struct drm_plane *plane); - - Destroy the plane when not needed anymore. See - . - - - - Encoders (struct <structname>drm_encoder</structname>) @@ -1459,27 +1250,6 @@ int max_width, max_height; encoders they want to use to a CRTC. - - Encoder Operations - - - void (*destroy)(struct drm_encoder *encoder); - - Called to destroy the encoder when not needed anymore. See - . - - - - void (*set_property)(struct drm_plane *plane, - struct drm_property *property, uint64_t value); - - Set the value of the given plane property to - value. See - for more information about properties. - - - - Connectors (struct <structname>drm_connector</structname>) @@ -1683,27 +1453,6 @@ int max_width, max_height; connector_status_unknown. - - Miscellaneous - - - void (*set_property)(struct drm_connector *connector, - struct drm_property *property, uint64_t value); - - Set the value of the given connector property to - value. See - for more information about properties. - - - - void (*destroy)(struct drm_connector *connector); - - Destroy the connector when not needed anymore. See - . - - - - @@ -1829,462 +1578,6 @@ void intel_crt_init(struct drm_device *dev) To use it, a driver must provide bottom functions for all of the three KMS entities. - - Helper Functions - - - int drm_crtc_helper_set_config(struct drm_mode_set *set); - - The drm_crtc_helper_set_config helper function - is a CRTC set_config implementation. It - first tries to locate the best encoder for each connector by calling - the connector best_encoder helper - operation. - - - After locating the appropriate encoders, the helper function will - call the mode_fixup encoder and CRTC helper - operations to adjust the requested mode, or reject it completely in - which case an error will be returned to the application. If the new - configuration after mode adjustment is identical to the current - configuration the helper function will return without performing any - other operation. - - - If the adjusted mode is identical to the current mode but changes to - the frame buffer need to be applied, the - drm_crtc_helper_set_config function will call - the CRTC mode_set_base helper operation. If - the adjusted mode differs from the current mode, or if the - mode_set_base helper operation is not - provided, the helper function performs a full mode set sequence by - calling the prepare, - mode_set and - commit CRTC and encoder helper operations, - in that order. - - - - void drm_helper_connector_dpms(struct drm_connector *connector, int mode); - - The drm_helper_connector_dpms helper function - is a connector dpms implementation that - tracks power state of connectors. To use the function, drivers must - provide dpms helper operations for CRTCs - and encoders to apply the DPMS state to the device. - - - The mid-layer doesn't track the power state of CRTCs and encoders. - The dpms helper operations can thus be - called with a mode identical to the currently active mode. - - - - int drm_helper_probe_single_connector_modes(struct drm_connector *connector, - uint32_t maxX, uint32_t maxY); - - The drm_helper_probe_single_connector_modes helper - function is a connector fill_modes - implementation that updates the connection status for the connector - and then retrieves a list of modes by calling the connector - get_modes helper operation. - - - If the helper operation returns no mode, and if the connector status - is connector_status_connected, standard VESA DMT modes up to - 1024x768 are automatically added to the modes list by a call to - drm_add_modes_noedid. - - - The function then filters out modes larger than - max_width and max_height - if specified. It finally calls the optional connector - mode_valid helper operation for each mode in - the probed list to check whether the mode is valid for the connector. - - - - - - CRTC Helper Operations - - - bool (*mode_fixup)(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - Let CRTCs adjust the requested mode or reject it completely. This - operation returns true if the mode is accepted (possibly after being - adjusted) or false if it is rejected. - - - The mode_fixup operation should reject the - mode if it can't reasonably use it. The definition of "reasonable" - is currently fuzzy in this context. One possible behaviour would be - to set the adjusted mode to the panel timings when a fixed-mode - panel is used with hardware capable of scaling. Another behaviour - would be to accept any input mode and adjust it to the closest mode - supported by the hardware (FIXME: This needs to be clarified). - - - - int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) - - Move the CRTC on the current frame buffer (stored in - crtc->fb) to position (x,y). Any of the frame - buffer, x position or y position may have been modified. - - - This helper operation is optional. If not provided, the - drm_crtc_helper_set_config function will fall - back to the mode_set helper operation. - - - FIXME: Why are x and y passed as arguments, as they can be accessed - through crtc->x and - crtc->y? - - - - void (*prepare)(struct drm_crtc *crtc); - - Prepare the CRTC for mode setting. This operation is called after - validating the requested mode. Drivers use it to perform - device-specific operations required before setting the new mode. - - - - int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb); - - Set a new mode, position and frame buffer. Depending on the device - requirements, the mode can be stored internally by the driver and - applied in the commit operation, or - programmed to the hardware immediately. - - - The mode_set operation returns 0 on success - or a negative error code if an error occurs. - - - - void (*commit)(struct drm_crtc *crtc); - - Commit a mode. This operation is called after setting the new mode. - Upon return the device must use the new mode and be fully - operational. - - - - - - Encoder Helper Operations - - - bool (*mode_fixup)(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - Let encoders adjust the requested mode or reject it completely. This - operation returns true if the mode is accepted (possibly after being - adjusted) or false if it is rejected. See the - mode_fixup CRTC helper - operation for an explanation of the allowed adjustments. - - - - void (*prepare)(struct drm_encoder *encoder); - - Prepare the encoder for mode setting. This operation is called after - validating the requested mode. Drivers use it to perform - device-specific operations required before setting the new mode. - - - - void (*mode_set)(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - Set a new mode. Depending on the device requirements, the mode can - be stored internally by the driver and applied in the - commit operation, or programmed to the - hardware immediately. - - - - void (*commit)(struct drm_encoder *encoder); - - Commit a mode. This operation is called after setting the new mode. - Upon return the device must use the new mode and be fully - operational. - - - - - - Connector Helper Operations - - - struct drm_encoder *(*best_encoder)(struct drm_connector *connector); - - Return a pointer to the best encoder for the connecter. Device that - map connectors to encoders 1:1 simply return the pointer to the - associated encoder. This operation is mandatory. - - - - int (*get_modes)(struct drm_connector *connector); - - Fill the connector's probed_modes list - by parsing EDID data with drm_add_edid_modes, - adding standard VESA DMT modes with drm_add_modes_noedid, - or calling drm_mode_probed_add directly for every - supported mode and return the number of modes it has detected. This - operation is mandatory. - - - Note that the caller function will automatically add standard VESA - DMT modes up to 1024x768 if the get_modes - helper operation returns no mode and if the connector status is - connector_status_connected. There is no need to call - drm_add_edid_modes manually in that case. - - - When adding modes manually the driver creates each mode with a call to - drm_mode_create and must fill the following fields. - - - __u32 type; - - Mode type bitmask, a combination of - - - DRM_MODE_TYPE_BUILTIN - not used? - - - DRM_MODE_TYPE_CLOCK_C - not used? - - - DRM_MODE_TYPE_CRTC_C - not used? - - - - DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector - - - not used? - - - - DRM_MODE_TYPE_DEFAULT - not used? - - - DRM_MODE_TYPE_USERDEF - not used? - - - DRM_MODE_TYPE_DRIVER - - - The mode has been created by the driver (as opposed to - to user-created modes). - - - - - Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they - create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred - mode. - - - - __u32 clock; - Pixel clock frequency in kHz unit - - - __u16 hdisplay, hsync_start, hsync_end, htotal; - __u16 vdisplay, vsync_start, vsync_end, vtotal; - Horizontal and vertical timing information - <----------------><-------------><--------------> - - //////////////////////| - ////////////////////// | - ////////////////////// |.................. ................ - _______________ - - <----- [hv]display -----> - <------------- [hv]sync_start ------------> - <--------------------- [hv]sync_end ---------------------> - <-------------------------------- [hv]total -----------------------------> -]]> - - - __u16 hskew; - __u16 vscan; - Unknown - - - __u32 flags; - - Mode flags, a combination of - - - DRM_MODE_FLAG_PHSYNC - - Horizontal sync is active high - - - - DRM_MODE_FLAG_NHSYNC - - Horizontal sync is active low - - - - DRM_MODE_FLAG_PVSYNC - - Vertical sync is active high - - - - DRM_MODE_FLAG_NVSYNC - - Vertical sync is active low - - - - DRM_MODE_FLAG_INTERLACE - - Mode is interlaced - - - - DRM_MODE_FLAG_DBLSCAN - - Mode uses doublescan - - - - DRM_MODE_FLAG_CSYNC - - Mode uses composite sync - - - - DRM_MODE_FLAG_PCSYNC - - Composite sync is active high - - - - DRM_MODE_FLAG_NCSYNC - - Composite sync is active low - - - - DRM_MODE_FLAG_HSKEW - - hskew provided (not used?) - - - - DRM_MODE_FLAG_BCAST - - not used? - - - - DRM_MODE_FLAG_PIXMUX - - not used? - - - - DRM_MODE_FLAG_DBLCLK - - not used? - - - - DRM_MODE_FLAG_CLKDIV2 - - ? - - - - - - Note that modes marked with the INTERLACE or DBLSCAN flags will be - filtered out by - drm_helper_probe_single_connector_modes if - the connector's interlace_allowed or - doublescan_allowed field is set to 0. - - - - char name[DRM_DISPLAY_MODE_LEN]; - - Mode name. The driver must call - drm_mode_set_name to fill the mode name from - hdisplay, - vdisplay and interlace flag after - filling the corresponding fields. - - - - - - The vrefresh value is computed by - drm_helper_probe_single_connector_modes. - - - When parsing EDID data, drm_add_edid_modes fills the - connector display_info - width_mm and - height_mm fields. When creating modes - manually the get_modes helper operation must - set the display_info - width_mm and - height_mm fields if they haven't been set - already (for instance at initialization time when a fixed-size panel is - attached to the connector). The mode width_mm - and height_mm fields are only used internally - during EDID parsing and should not be set when creating modes manually. - - - - int (*mode_valid)(struct drm_connector *connector, - struct drm_display_mode *mode); - - Verify whether a mode is valid for the connector. Return MODE_OK for - supported modes and one of the enum drm_mode_status values (MODE_*) - for unsupported modes. This operation is optional. - - - As the mode rejection reason is currently not used beside for - immediately removing the unsupported mode, an implementation can - return MODE_BAD regardless of the exact reason why the mode is not - valid. - - - Note that the mode_valid helper operation is - only called for modes detected by the device, and - not for modes set by the user through the CRTC - set_config operation. - - - - Atomic Modeset Helper Functions Reference @@ -2303,8 +1596,12 @@ void intel_crt_init(struct drm_device *dev) !Edrivers/gpu/drm/drm_atomic_helper.c - Modeset Helper Functions Reference -!Iinclude/drm/drm_crtc_helper.h + Modeset Helper Reference for Common Vtables +!Iinclude/drm/drm_modeset_helper_vtables.h +!Pinclude/drm/drm_modeset_helper_vtables.h overview + + + Legacy CRTC/Modeset Helper Functions Reference !Edrivers/gpu/drm/drm_crtc_helper.c !Pdrivers/gpu/drm/drm_crtc_helper.c overview @@ -4015,92 +3312,6 @@ int num_ioctls; DPIO !Pdrivers/gpu/drm/i915/i915_reg.h DPIO - - Dual channel PHY (VLV/CHV/BXT) - - - - - - - - - - - - - - - - - - CH0 - CH1 - - - - - CMN/PLL/REF - CMN/PLL/REF - - - PCS01 - PCS23 - PCS01 - PCS23 - - - TX0 - TX1 - TX2 - TX3 - TX0 - TX1 - TX2 - TX3 - - - DDI0 - DDI1 - - - -
- - Single channel PHY (CHV/BXT) - - - - - - - - - - - CH0 - - - - - CMN/PLL/REF - - - PCS01 - PCS23 - - - TX0 - TX1 - TX2 - TX3 - - - DDI2 - - - -
@@ -4226,41 +3437,63 @@ int num_ioctls; Modes of Use - - Manual switching and manual power control + + Manual switching and manual power control !Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control - - - Driver power control + + + Driver power control !Pdrivers/gpu/vga/vga_switcheroo.c Driver power control - + - - Public functions + + API + + Public functions !Edrivers/gpu/vga/vga_switcheroo.c - - - - Public structures + + + Public structures !Finclude/linux/vga_switcheroo.h vga_switcheroo_handler !Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops - - - - Public constants + + + Public constants !Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id !Finclude/linux/vga_switcheroo.h vga_switcheroo_state - - - - Private structures + + + Private structures !Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv !Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client + + + + + Handlers + + apple-gmux Handler +!Pdrivers/platform/x86/apple-gmux.c Overview +!Pdrivers/platform/x86/apple-gmux.c Interrupt + + Graphics mux +!Pdrivers/platform/x86/apple-gmux.c Graphics mux + + + Power control +!Pdrivers/platform/x86/apple-gmux.c Power control + + + Backlight control +!Pdrivers/platform/x86/apple-gmux.c Backlight control + + !Cdrivers/gpu/vga/vga_switcheroo.c !Cinclude/linux/vga_switcheroo.h +!Cdrivers/platform/x86/apple-gmux.c diff --git a/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt new file mode 100644 index 000000000000..ed5e0a7894ad --- /dev/null +++ b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt @@ -0,0 +1,54 @@ +Etnaviv DRM master device +========================= + +The Etnaviv DRM master device is a virtual device needed to list all +Vivante GPU cores that comprise the GPU subsystem. + +Required properties: +- compatible: Should be one of + "fsl,imx-gpu-subsystem" + "marvell,dove-gpu-subsystem" +- cores: Should contain a list of phandles pointing to Vivante GPU devices + +example: + +gpu-subsystem { + compatible = "fsl,imx-gpu-subsystem"; + cores = <&gpu_2d>, <&gpu_3d>; +}; + + +Vivante GPU core devices +======================== + +Required properties: +- compatible: Should be "vivante,gc" + A more specific compatible is not needed, as the cores contain chip + identification registers at fixed locations, which provide all the + necessary information to the driver. +- reg: should be register base and length as documented in the + datasheet +- interrupts: Should contain the cores interrupt line +- clocks: should contain one clock for entry in clock-names + see Documentation/devicetree/bindings/clock/clock-bindings.txt +- clock-names: + - "bus": AXI/register clock + - "core": GPU core clock + - "shader": Shader clock (only required if GPU has feature PIPE_3D) + +Optional properties: +- power-domains: a power domain consumer specifier according to + Documentation/devicetree/bindings/power/power_domain.txt + +example: + +gpu_3d: gpu@00130000 { + compatible = "vivante,gc"; + reg = <0x00130000 0x4000>; + interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clks IMX6QDL_CLK_GPU3D_AXI>, + <&clks IMX6QDL_CLK_GPU3D_CORE>, + <&clks IMX6QDL_CLK_GPU3D_SHADER>; + clock-names = "bus", "core", "shader"; + power-domains = <&gpc 1>; +}; diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt index 64693f2ebc51..fe4a7a2dea9c 100644 --- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt +++ b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt @@ -1,3 +1,20 @@ +Device-Tree bindings for Samsung Exynos Embedded DisplayPort Transmitter(eDP) + +DisplayPort is industry standard to accommodate the growing board adoption +of digital display technology within the PC and CE industries. +It consolidates the internal and external connection methods to reduce device +complexity and cost. It also supports necessary features for important cross +industry applications and provides performance scalability to enable the next +generation of displays that feature higher color depths, refresh rates, and +display resolutions. + +eDP (embedded display port) device is compliant with Embedded DisplayPort +standard as follows, +- DisplayPort standard 1.1a for Exynos5250 and Exynos5260. +- DisplayPort standard 1.3 for Exynos5422s and Exynos5800. + +eDP resides between FIMD and panel or FIMD and bridge such as LVDS. + The Exynos display port interface should be configured based on the type of panel connected to it. @@ -66,8 +83,15 @@ Optional properties for dp-controller: Hotplug detect GPIO. Indicates which GPIO should be used for hotplug detection - -video interfaces: Device node can contain video interface port - nodes according to [1]. +Video interfaces: + Device node can contain video interface port nodes according to [1]. + The following are properties specific to those nodes: + + endpoint node connected to bridge or panel node: + - remote-endpoint: specifies the endpoint in panel or bridge node. + This node is required in all kinds of exynos dp + to represent the connection between dp and bridge + or dp and panel. [1]: Documentation/devicetree/bindings/media/video-interfaces.txt @@ -111,9 +135,18 @@ Board Specific portion: }; ports { - port@0 { + port { dp_out: endpoint { - remote-endpoint = <&bridge_in>; + remote-endpoint = <&dp_in>; + }; + }; + }; + + panel { + ... + port { + dp_in: endpoint { + remote-endpoint = <&dp_out>; }; }; }; diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index f344b9e49198..e7423bea1424 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -14,17 +14,20 @@ Required properties: - clocks: device clocks See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details. - clock-names: the following clocks are required: - * "bus_clk" - * "byte_clk" - * "core_clk" - * "core_mmss_clk" - * "iface_clk" * "mdp_core_clk" + * "iface_clk" + * "bus_clk" + * "core_mmss_clk" + * "byte_clk" * "pixel_clk" + * "core_clk" + For DSIv2, we need an additional clock: + * "src_clk" - vdd-supply: phandle to vdd regulator device node - vddio-supply: phandle to vdd-io regulator device node - vdda-supply: phandle to vdda regulator device node - qcom,dsi-phy: phandle to DSI PHY device node +- syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2) Optional properties: - panel@0: Node of panel connected to this DSI controller. @@ -51,6 +54,7 @@ Required properties: * "qcom,dsi-phy-28nm-hpm" * "qcom,dsi-phy-28nm-lp" * "qcom,dsi-phy-20nm" + * "qcom,dsi-phy-28nm-8960" - reg: Physical base address and length of the registers of PLL, PHY and PHY regulator - reg-names: The names of register regions. The following regions are required: diff --git a/Documentation/devicetree/bindings/display/msm/mdp.txt b/Documentation/devicetree/bindings/display/msm/mdp.txt index 0833edaba4c3..a214f6cd0363 100644 --- a/Documentation/devicetree/bindings/display/msm/mdp.txt +++ b/Documentation/devicetree/bindings/display/msm/mdp.txt @@ -2,18 +2,28 @@ Qualcomm adreno/snapdragon display controller Required properties: - compatible: - * "qcom,mdp" - mdp4 + * "qcom,mdp4" - mdp4 + * "qcom,mdp5" - mdp5 - reg: Physical base address and length of the controller's registers. - interrupts: The interrupt signal from the display controller. - connectors: array of phandles for output device(s) - clocks: device clocks See ../clocks/clock-bindings.txt for details. -- clock-names: the following clocks are required: - * "core_clk" - * "iface_clk" - * "src_clk" - * "hdmi_clk" - * "mpd_clk" +- clock-names: the following clocks are required. + For MDP4: + * "core_clk" + * "iface_clk" + * "lut_clk" + * "src_clk" + * "hdmi_clk" + * "mdp_clk" + For MDP5: + * "bus_clk" + * "iface_clk" + * "core_clk_src" + * "core_clk" + * "lut_clk" (some MDP5 versions may not need this) + * "vsync_clk" Optional properties: - gpus: phandle for gpu device @@ -26,7 +36,7 @@ Example: ... mdp: qcom,mdp@5100000 { - compatible = "qcom,mdp"; + compatible = "qcom,mdp4"; reg = <0x05100000 0xf0000>; interrupts = ; connectors = <&hdmi>; diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt b/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt new file mode 100644 index 000000000000..50be5e2438b2 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt @@ -0,0 +1,7 @@ +Boe Corporation 8.0" WUXGA TFT LCD panel + +Required properties: +- compatible: should be "boe,tv080wum-nl0" + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt b/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt new file mode 100644 index 000000000000..649744620ae1 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt @@ -0,0 +1,7 @@ +Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel + +Required properties: +- compatible: should be "innolux,g121x1-l03" + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt b/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt new file mode 100644 index 000000000000..a8e940fe731e --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt @@ -0,0 +1,7 @@ +Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel + +Required properties: +- compatible: should be "kyo,tcg121xglp" + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt new file mode 100644 index 000000000000..37dedf6a6702 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt @@ -0,0 +1,20 @@ +Panasonic 10" WUXGA TFT LCD panel + +Required properties: +- compatible: should be "panasonic,vvx10f034n00" +- reg: DSI virtual channel of the peripheral +- power-supply: phandle of the regulator that provides the supply voltage + +Optional properties: +- backlight: phandle of the backlight device attached to the panel + +Example: + + mdss_dsi@fd922800 { + panel@0 { + compatible = "panasonic,vvx10f034n00"; + reg = <0>; + power-supply = <&vreg_vsp>; + backlight = <&lp8566_wled>; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt b/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt new file mode 100644 index 000000000000..0fbdab89ac3d --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt @@ -0,0 +1,7 @@ +QiaoDian XianShi Corporation 4"3 TFT LCD panel + +Required properties: +- compatible: should be "qiaodian,qd43003c0-40" + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt new file mode 100644 index 000000000000..3770a111968b --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt @@ -0,0 +1,22 @@ +Sharp Microelectronics 4.3" qHD TFT LCD panel + +Required properties: +- compatible: should be "sharp,ls043t1le01-qhd" +- reg: DSI virtual channel of the peripheral +- power-supply: phandle of the regulator that provides the supply voltage + +Optional properties: +- backlight: phandle of the backlight device attached to the panel +- reset-gpios: a GPIO spec for the reset pin + +Example: + + mdss_dsi@fd922800 { + panel@0 { + compatible = "sharp,ls043t1le01-qhd"; + reg = <0>; + avdd-supply = <&pm8941_l22>; + backlight = <&pm8941_wled>; + reset-gpios = <&pm8941_gpios 19 GPIO_ACTIVE_HIGH>; + }; + }; diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt new file mode 100644 index 000000000000..1753f0cc6fad --- /dev/null +++ b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt @@ -0,0 +1,60 @@ +Rockchip specific extensions to the Synopsys Designware MIPI DSI +================================ + +Required properties: +- #address-cells: Should be <1>. +- #size-cells: Should be <0>. +- compatible: "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi". +- reg: Represent the physical address range of the controller. +- interrupts: Represent the controller's interrupt to the CPU(s). +- clocks, clock-names: Phandles to the controller's pll reference + clock(ref) and APB clock(pclk), as described in [1]. +- rockchip,grf: this soc should set GRF regs to mux vopl/vopb. +- ports: contain a port node with endpoint definitions as defined in [2]. + For vopb,set the reg = <0> and set the reg = <1> for vopl. + +[1] Documentation/devicetree/bindings/clock/clock-bindings.txt +[2] Documentation/devicetree/bindings/media/video-interfaces.txt + +Example: + mipi_dsi: mipi@ff960000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi"; + reg = <0xff960000 0x4000>; + interrupts = ; + clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPI_DSI0>; + clock-names = "ref", "pclk"; + rockchip,grf = <&grf>; + status = "okay"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + mipi_in: port { + #address-cells = <1>; + #size-cells = <0>; + mipi_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_mipi>; + }; + mipi_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_mipi>; + }; + }; + }; + + panel { + compatible ="boe,tv080wum-nl0"; + reg = <0>; + + enable-gpios = <&gpio7 3 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&lcd_en>; + backlight = <&backlight>; + status = "okay"; + }; + }; diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt index d15351f2313d..5489b59e3d41 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt @@ -7,6 +7,7 @@ buffer to an external LCD interface. Required properties: - compatible: value should be one of the following "rockchip,rk3288-vop"; + "rockchip,rk3036-vop"; - interrupts: should contain a list of all VOP IP block interrupts in the order: VSYNC, LCD_SYSTEM. The interrupt specifier diff --git a/Documentation/devicetree/bindings/media/exynos5-gsc.txt b/Documentation/devicetree/bindings/media/exynos5-gsc.txt index 0604d42f38d1..5fe9372abb37 100644 --- a/Documentation/devicetree/bindings/media/exynos5-gsc.txt +++ b/Documentation/devicetree/bindings/media/exynos5-gsc.txt @@ -7,6 +7,10 @@ Required properties: - reg: should contain G-Scaler physical address location and length. - interrupts: should contain G-Scaler interrupt number +Optional properties: +- samsung,sysreg: handle to syscon used to control the system registers to + set writeback input and destination + Example: gsc_0: gsc@0x13e00000 { diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index a4f2035569ce..084439d35747 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -33,6 +33,7 @@ auo AU Optronics Corporation avago Avago Technologies avic Shanghai AVIC Optoelectronics Co., Ltd. axis Axis Communications AB +boe BOE Technology Group Co., Ltd. bosch Bosch Sensortec GmbH boundary Boundary Devices Inc. brcm Broadcom Corporation @@ -123,6 +124,7 @@ jedec JEDEC Solid State Technology Association karo Ka-Ro electronics GmbH keymile Keymile GmbH kinetic Kinetic Technologies +kyo Kyocera Corporation lacie LaCie lantiq Lantiq Semiconductor lenovo Lenovo Group Ltd. @@ -181,6 +183,7 @@ qca Qualcomm Atheros, Inc. qcom Qualcomm Technologies, Inc qemu QEMU, a generic and open source machine emulator and virtualizer qi Qi Hardware +qiaodian QiaoDian XianShi Corporation qnap QNAP Systems, Inc. radxa Radxa raidsonic RaidSonic Technology GmbH @@ -239,6 +242,7 @@ v3 V3 Semiconductor variscite Variscite Ltd. via VIA Technologies, Inc. virtio Virtual I/O Device Specification, developed by the OASIS consortium +vivante Vivante Corporation voipac Voipac Technologies s.r.o. wexler Wexler winbond Winbond Electronics corp. diff --git a/MAINTAINERS b/MAINTAINERS index d38c324191de..1d23f701489c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3768,6 +3768,15 @@ S: Maintained F: drivers/gpu/drm/sti F: Documentation/devicetree/bindings/display/st,stih4xx.txt +DRM DRIVERS FOR VIVANTE GPU IP +M: Lucas Stach +R: Russell King +R: Christian Gmeiner +L: dri-devel@lists.freedesktop.org +S: Maintained +F: drivers/gpu/drm/etnaviv +F: Documentation/devicetree/bindings/display/etnaviv + DSBR100 USB FM RADIO DRIVER M: Alexey Klimov L: linux-media@vger.kernel.org diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index 49a4f43e5ac2..1cc2e95ffc66 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -122,6 +122,12 @@ compatible = "auo,b133htn01"; power-supply = <&tps65090_fet6>; backlight = <&backlight>; + + port { + panel_in: endpoint { + remote-endpoint = <&dp_out>; + }; + }; }; mmc1_pwrseq: mmc1_pwrseq { @@ -148,7 +154,14 @@ samsung,link-rate = <0x0a>; samsung,lane-count = <2>; samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>; - panel = <&panel>; + + ports { + port { + dp_out: endpoint { + remote-endpoint = <&panel_in>; + }; + }; + }; }; &fimd { diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c4bf9a1cf4a6..59babd5a5396 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -160,6 +160,7 @@ config DRM_AMDGPU If M is selected, the module will be called amdgpu. source "drivers/gpu/drm/amd/amdgpu/Kconfig" +source "drivers/gpu/drm/amd/powerplay/Kconfig" source "drivers/gpu/drm/nouveau/Kconfig" @@ -266,3 +267,5 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig" source "drivers/gpu/drm/imx/Kconfig" source "drivers/gpu/drm/vc4/Kconfig" + +source "drivers/gpu/drm/etnaviv/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 1e9ff4c3e3db..f858aa25fbb2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -75,3 +75,4 @@ obj-y += i2c/ obj-y += panel/ obj-y += bridge/ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ +obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 04c270757030..66f729eaf00b 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -2,10 +2,13 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \ - -Idrivers/gpu/drm/amd/include \ - -Idrivers/gpu/drm/amd/amdgpu \ - -Idrivers/gpu/drm/amd/scheduler +FULL_AMD_PATH=$(src)/.. + +ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \ + -I$(FULL_AMD_PATH)/include \ + -I$(FULL_AMD_PATH)/amdgpu \ + -I$(FULL_AMD_PATH)/scheduler \ + -I$(FULL_AMD_PATH)/powerplay/inc amdgpu-y := amdgpu_drv.o @@ -44,6 +47,7 @@ amdgpu-y += \ # add SMC block amdgpu-y += \ amdgpu_dpm.o \ + amdgpu_powerplay.o \ cz_smc.o cz_dpm.o \ tonga_smc.o tonga_dpm.o \ fiji_smc.o fiji_dpm.o \ @@ -94,6 +98,14 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o +ifneq ($(CONFIG_DRM_AMD_POWERPLAY),) + +include $(FULL_AMD_PATH)/powerplay/Makefile + +amdgpu-y += $(AMD_POWERPLAY_FILES) + +endif + obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o CFLAGS_amdgpu_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 048cfe073dae..313b0cc8d676 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -52,6 +52,7 @@ #include "amdgpu_irq.h" #include "amdgpu_ucode.h" #include "amdgpu_gds.h" +#include "amd_powerplay.h" #include "gpu_scheduler.h" @@ -85,6 +86,7 @@ extern int amdgpu_enable_scheduler; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern int amdgpu_enable_semaphores; +extern int amdgpu_powerplay; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -918,8 +920,8 @@ struct amdgpu_ring { #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 struct amdgpu_vm_pt { - struct amdgpu_bo *bo; - uint64_t addr; + struct amdgpu_bo_list_entry entry; + uint64_t addr; }; struct amdgpu_vm_id { @@ -981,9 +983,12 @@ struct amdgpu_vm_manager { void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); -struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct list_head *head); +void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, + struct list_head *validated, + struct amdgpu_bo_list_entry *entry); +void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); +void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, + struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync); void amdgpu_vm_flush(struct amdgpu_ring *ring, @@ -1024,11 +1029,9 @@ int amdgpu_vm_free_job(struct amdgpu_job *job); * context related structures */ -#define AMDGPU_CTX_MAX_CS_PENDING 16 - struct amdgpu_ctx_ring { uint64_t sequence; - struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING]; + struct fence **fences; struct amd_sched_entity entity; }; @@ -1037,6 +1040,7 @@ struct amdgpu_ctx { struct amdgpu_device *adev; unsigned reset_counter; spinlock_t ring_lock; + struct fence **fences; struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; }; @@ -1047,7 +1051,7 @@ struct amdgpu_ctx_mgr { struct idr ctx_handles; }; -int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, +int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, struct amdgpu_ctx *ctx); void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); @@ -1254,7 +1258,7 @@ struct amdgpu_cs_parser { unsigned nchunks; struct amdgpu_cs_chunk *chunks; /* relocations */ - struct amdgpu_bo_list_entry *vm_bos; + struct amdgpu_bo_list_entry vm_pd; struct list_head validated; struct fence *fence; @@ -1301,31 +1305,7 @@ struct amdgpu_wb { int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); -/** - * struct amdgpu_pm - power management datas - * It keeps track of various data needed to take powermanagement decision. - */ -enum amdgpu_pm_state_type { - /* not used for dpm */ - POWER_STATE_TYPE_DEFAULT, - POWER_STATE_TYPE_POWERSAVE, - /* user selectable states */ - POWER_STATE_TYPE_BATTERY, - POWER_STATE_TYPE_BALANCED, - POWER_STATE_TYPE_PERFORMANCE, - /* internal states */ - POWER_STATE_TYPE_INTERNAL_UVD, - POWER_STATE_TYPE_INTERNAL_UVD_SD, - POWER_STATE_TYPE_INTERNAL_UVD_HD, - POWER_STATE_TYPE_INTERNAL_UVD_HD2, - POWER_STATE_TYPE_INTERNAL_UVD_MVC, - POWER_STATE_TYPE_INTERNAL_BOOT, - POWER_STATE_TYPE_INTERNAL_THERMAL, - POWER_STATE_TYPE_INTERNAL_ACPI, - POWER_STATE_TYPE_INTERNAL_ULV, - POWER_STATE_TYPE_INTERNAL_3DPERF, -}; enum amdgpu_int_thermal_type { THERMAL_TYPE_NONE, @@ -1607,8 +1587,8 @@ struct amdgpu_dpm { /* vce requirements */ struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; enum amdgpu_vce_level vce_level; - enum amdgpu_pm_state_type state; - enum amdgpu_pm_state_type user_state; + enum amd_pm_state_type state; + enum amd_pm_state_type user_state; u32 platform_caps; u32 voltage_response_time; u32 backbias_response_time; @@ -1661,8 +1641,13 @@ struct amdgpu_pm { const struct firmware *fw; /* SMC firmware */ uint32_t fw_version; const struct amdgpu_dpm_funcs *funcs; + uint32_t pcie_gen_mask; + uint32_t pcie_mlw_mask; + struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ }; +void amdgpu_get_pcie_info(struct amdgpu_device *adev); + /* * UVD */ @@ -1830,6 +1815,8 @@ struct amdgpu_cu_info { */ struct amdgpu_asic_funcs { bool (*read_disabled_bios)(struct amdgpu_device *adev); + bool (*read_bios_from_rom)(struct amdgpu_device *adev, + u8 *bios, u32 length_bytes); int (*read_register)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); @@ -2060,6 +2047,10 @@ struct amdgpu_device { /* interrupts */ struct amdgpu_irq irq; + /* powerplay */ + struct amd_powerplay powerplay; + bool pp_enabled; + /* dpm */ struct amdgpu_pm pm; u32 cg_flags; @@ -2236,6 +2227,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) +#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info)) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) @@ -2277,24 +2269,78 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) -#define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev)) #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) -#define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l)) -#define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l)) #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) -#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)) -#define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l)) #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) -#define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g)) -#define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g)) #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) -#define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m)) -#define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev)) -#define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) -#define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) + +#define amdgpu_dpm_get_temperature(adev) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ + (adev)->pm.funcs->get_temperature((adev)) + +#define amdgpu_dpm_set_fan_control_mode(adev, m) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ + (adev)->pm.funcs->set_fan_control_mode((adev), (m)) + +#define amdgpu_dpm_get_fan_control_mode(adev) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ + (adev)->pm.funcs->get_fan_control_mode((adev)) + +#define amdgpu_dpm_set_fan_speed_percent(adev, s) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ + (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) + +#define amdgpu_dpm_get_fan_speed_percent(adev, s) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ + (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) + +#define amdgpu_dpm_get_sclk(adev, l) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ + (adev)->pm.funcs->get_sclk((adev), (l)) + +#define amdgpu_dpm_get_mclk(adev, l) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ + (adev)->pm.funcs->get_mclk((adev), (l)) + + +#define amdgpu_dpm_force_performance_level(adev, l) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ + (adev)->pm.funcs->force_performance_level((adev), (l)) + +#define amdgpu_dpm_powergate_uvd(adev, g) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ + (adev)->pm.funcs->powergate_uvd((adev), (g)) + +#define amdgpu_dpm_powergate_vce(adev, g) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ + (adev)->pm.funcs->powergate_vce((adev), (g)) + +#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ + (adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ + (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)) + +#define amdgpu_dpm_get_current_power_state(adev) \ + (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) + +#define amdgpu_dpm_get_performance_level(adev) \ + (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) + +#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ + (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index a142d5ae148d..5cd7b736a9de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -29,66 +29,10 @@ #include #include #include "amdgpu.h" -#include "amdgpu_acpi.h" +#include "amd_acpi.h" #include "atom.h" -#define ACPI_AC_CLASS "ac_adapter" - extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); - -struct atif_verify_interface { - u16 size; /* structure size in bytes (includes size field) */ - u16 version; /* version */ - u32 notification_mask; /* supported notifications mask */ - u32 function_bits; /* supported functions bit vector */ -} __packed; - -struct atif_system_params { - u16 size; /* structure size in bytes (includes size field) */ - u32 valid_mask; /* valid flags mask */ - u32 flags; /* flags */ - u8 command_code; /* notify command code */ -} __packed; - -struct atif_sbios_requests { - u16 size; /* structure size in bytes (includes size field) */ - u32 pending; /* pending sbios requests */ - u8 panel_exp_mode; /* panel expansion mode */ - u8 thermal_gfx; /* thermal state: target gfx controller */ - u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */ - u8 forced_power_gfx; /* forced power state: target gfx controller */ - u8 forced_power_state; /* forced power state: state id */ - u8 system_power_src; /* system power source */ - u8 backlight_level; /* panel backlight level (0-255) */ -} __packed; - -#define ATIF_NOTIFY_MASK 0x3 -#define ATIF_NOTIFY_NONE 0 -#define ATIF_NOTIFY_81 1 -#define ATIF_NOTIFY_N 2 - -struct atcs_verify_interface { - u16 size; /* structure size in bytes (includes size field) */ - u16 version; /* version */ - u32 function_bits; /* supported functions bit vector */ -} __packed; - -#define ATCS_VALID_FLAGS_MASK 0x3 - -struct atcs_pref_req_input { - u16 size; /* structure size in bytes (includes size field) */ - u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ - u16 valid_flags_mask; /* valid flags mask */ - u16 flags; /* flags */ - u8 req_type; /* request type */ - u8 perf_req; /* performance request */ -} __packed; - -struct atcs_pref_req_output { - u16 size; /* structure size in bytes (includes size field) */ - u8 ret_val; /* return value */ -} __packed; - /* Call the ATIF method */ /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 5a8fbadbd27b..3c895863fcf5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -11,7 +11,7 @@ #include #include -#include "amdgpu_acpi.h" +#include "amd_acpi.h" struct amdgpu_atpx_functions { bool px_params; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index c44c0c6afd1b..80add22375ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -35,6 +35,13 @@ * BIOS. */ +#define AMD_VBIOS_SIGNATURE " 761295520" +#define AMD_VBIOS_SIGNATURE_OFFSET 0x30 +#define AMD_VBIOS_SIGNATURE_SIZE sizeof(AMD_VBIOS_SIGNATURE) +#define AMD_VBIOS_SIGNATURE_END (AMD_VBIOS_SIGNATURE_OFFSET + AMD_VBIOS_SIGNATURE_SIZE) +#define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA) +#define AMD_VBIOS_LENGTH(p) ((p)[2] << 9) + /* If you boot an IGP board with a discrete card as the primary, * the IGP rom is not accessible via the rom bar as the IGP rom is * part of the system bios. On boot, the system bios puts a @@ -58,7 +65,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) return false; } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) { iounmap(bios); return false; } @@ -74,7 +81,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) bool amdgpu_read_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios, val1, val2; + uint8_t __iomem *bios, val[2]; size_t size; adev->bios = NULL; @@ -84,10 +91,10 @@ bool amdgpu_read_bios(struct amdgpu_device *adev) return false; } - val1 = readb(&bios[0]); - val2 = readb(&bios[1]); + val[0] = readb(&bios[0]); + val[1] = readb(&bios[1]); - if (size == 0 || val1 != 0x55 || val2 != 0xaa) { + if (size == 0 || !AMD_IS_VALID_VBIOS(val)) { pci_unmap_rom(adev->pdev, bios); return false; } @@ -101,6 +108,38 @@ bool amdgpu_read_bios(struct amdgpu_device *adev) return true; } +static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) +{ + u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0}; + int len; + + if (!adev->asic_funcs->read_bios_from_rom) + return false; + + /* validate VBIOS signature */ + if (amdgpu_asic_read_bios_from_rom(adev, &header[0], sizeof(header)) == false) + return false; + header[AMD_VBIOS_SIGNATURE_END] = 0; + + if ((!AMD_IS_VALID_VBIOS(header)) || + 0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET], + AMD_VBIOS_SIGNATURE, + strlen(AMD_VBIOS_SIGNATURE))) + return false; + + /* valid vbios, go on */ + len = AMD_VBIOS_LENGTH(header); + len = ALIGN(len, 4); + adev->bios = kmalloc(len, GFP_KERNEL); + if (!adev->bios) { + DRM_ERROR("no memory to allocate for BIOS\n"); + return false; + } + + /* read complete BIOS */ + return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len); +} + static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { uint8_t __iomem *bios; @@ -113,7 +152,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) return false; } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) { return false; } adev->bios = kmemdup(bios, size, GFP_KERNEL); @@ -230,7 +269,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) break; } - if (i == 0 || adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { + if (i == 0 || !AMD_IS_VALID_VBIOS(adev->bios)) { kfree(adev->bios); return false; } @@ -319,6 +358,9 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) r = igp_read_bios_from_vram(adev); if (r == false) r = amdgpu_read_bios(adev); + if (r == false) { + r = amdgpu_read_bios_from_rom(adev); + } if (r == false) { r = amdgpu_read_disabled_bios(adev); } @@ -330,7 +372,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) adev->bios = NULL; return false; } - if (adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { + if (!AMD_IS_VALID_VBIOS(adev->bios)) { printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]); goto free_bios; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 8e995148f56e..a081dda9fa2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -32,7 +33,6 @@ #include "atom.h" #include "amdgpu_ucode.h" - struct amdgpu_cgs_device { struct cgs_device base; struct amdgpu_device *adev; @@ -398,6 +398,41 @@ static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr, WARN(ret, "pci_write_config_dword error"); } + +static int amdgpu_cgs_get_pci_resource(void *cgs_device, + enum cgs_resource_type resource_type, + uint64_t size, + uint64_t offset, + uint64_t *resource_base) +{ + CGS_FUNC_ADEV; + + if (resource_base == NULL) + return -EINVAL; + + switch (resource_type) { + case CGS_RESOURCE_TYPE_MMIO: + if (adev->rmmio_size == 0) + return -ENOENT; + if ((offset + size) > adev->rmmio_size) + return -EINVAL; + *resource_base = adev->rmmio_base; + return 0; + case CGS_RESOURCE_TYPE_DOORBELL: + if (adev->doorbell.size == 0) + return -ENOENT; + if ((offset + size) > adev->doorbell.size) + return -EINVAL; + *resource_base = adev->doorbell.base; + return 0; + case CGS_RESOURCE_TYPE_FB: + case CGS_RESOURCE_TYPE_IO: + case CGS_RESOURCE_TYPE_ROM: + default: + return -EINVAL; + } +} + static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device, unsigned table, uint16_t *size, uint8_t *frev, uint8_t *crev) @@ -703,6 +738,9 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device, case CHIP_TONGA: strcpy(fw_name, "amdgpu/tonga_smc.bin"); break; + case CHIP_FIJI: + strcpy(fw_name, "amdgpu/fiji_smc.bin"); + break; default: DRM_ERROR("SMC firmware not supported\n"); return -EINVAL; @@ -736,6 +774,288 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device, return 0; } +static int amdgpu_cgs_query_system_info(void *cgs_device, + struct cgs_system_info *sys_info) +{ + CGS_FUNC_ADEV; + + if (NULL == sys_info) + return -ENODEV; + + if (sizeof(struct cgs_system_info) != sys_info->size) + return -ENODEV; + + switch (sys_info->info_id) { + case CGS_SYSTEM_INFO_ADAPTER_BDF_ID: + sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8); + break; + case CGS_SYSTEM_INFO_PCIE_GEN_INFO: + sys_info->value = adev->pm.pcie_gen_mask; + break; + case CGS_SYSTEM_INFO_PCIE_MLW: + sys_info->value = adev->pm.pcie_mlw_mask; + break; + default: + return -ENODEV; + } + + return 0; +} + +static int amdgpu_cgs_get_active_displays_info(void *cgs_device, + struct cgs_display_info *info) +{ + CGS_FUNC_ADEV; + struct amdgpu_crtc *amdgpu_crtc; + struct drm_device *ddev = adev->ddev; + struct drm_crtc *crtc; + uint32_t line_time_us, vblank_lines; + + if (info == NULL) + return -EINVAL; + + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + amdgpu_crtc = to_amdgpu_crtc(crtc); + if (crtc->enabled) { + info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); + info->display_count++; + } + if (info->mode_info != NULL && + crtc->enabled && amdgpu_crtc->enabled && + amdgpu_crtc->hw_mode.clock) { + line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / + amdgpu_crtc->hw_mode.clock; + vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - + amdgpu_crtc->hw_mode.crtc_vdisplay + + (amdgpu_crtc->v_border * 2); + info->mode_info->vblank_time_us = vblank_lines * line_time_us; + info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); + info->mode_info->ref_clock = adev->clock.spll.reference_freq; + info->mode_info++; + } + } + } + + return 0; +} + +/** \brief evaluate acpi namespace object, handle or pathname must be valid + * \param cgs_device + * \param info input/output arguments for the control method + * \return status + */ + +#if defined(CONFIG_ACPI) +static int amdgpu_cgs_acpi_eval_object(void *cgs_device, + struct cgs_acpi_method_info *info) +{ + CGS_FUNC_ADEV; + acpi_handle handle; + struct acpi_object_list input; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *params = NULL; + union acpi_object *obj = NULL; + uint8_t name[5] = {'\0'}; + struct cgs_acpi_method_argument *argument = NULL; + uint32_t i, count; + acpi_status status; + int result; + uint32_t func_no = 0xFFFFFFFF; + + handle = ACPI_HANDLE(&adev->pdev->dev); + if (!handle) + return -ENODEV; + + memset(&input, 0, sizeof(struct acpi_object_list)); + + /* validate input info */ + if (info->size != sizeof(struct cgs_acpi_method_info)) + return -EINVAL; + + input.count = info->input_count; + if (info->input_count > 0) { + if (info->pinput_argument == NULL) + return -EINVAL; + argument = info->pinput_argument; + func_no = argument->value; + for (i = 0; i < info->input_count; i++) { + if (((argument->type == ACPI_TYPE_STRING) || + (argument->type == ACPI_TYPE_BUFFER)) && + (argument->pointer == NULL)) + return -EINVAL; + argument++; + } + } + + if (info->output_count > 0) { + if (info->poutput_argument == NULL) + return -EINVAL; + argument = info->poutput_argument; + for (i = 0; i < info->output_count; i++) { + if (((argument->type == ACPI_TYPE_STRING) || + (argument->type == ACPI_TYPE_BUFFER)) + && (argument->pointer == NULL)) + return -EINVAL; + argument++; + } + } + + /* The path name passed to acpi_evaluate_object should be null terminated */ + if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) { + strncpy(name, (char *)&(info->name), sizeof(uint32_t)); + name[4] = '\0'; + } + + /* parse input parameters */ + if (input.count > 0) { + input.pointer = params = + kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL); + if (params == NULL) + return -EINVAL; + + argument = info->pinput_argument; + + for (i = 0; i < input.count; i++) { + params->type = argument->type; + switch (params->type) { + case ACPI_TYPE_INTEGER: + params->integer.value = argument->value; + break; + case ACPI_TYPE_STRING: + params->string.length = argument->method_length; + params->string.pointer = argument->pointer; + break; + case ACPI_TYPE_BUFFER: + params->buffer.length = argument->method_length; + params->buffer.pointer = argument->pointer; + break; + default: + break; + } + params++; + argument++; + } + } + + /* parse output info */ + count = info->output_count; + argument = info->poutput_argument; + + /* evaluate the acpi method */ + status = acpi_evaluate_object(handle, name, &input, &output); + + if (ACPI_FAILURE(status)) { + result = -EIO; + goto error; + } + + /* return the output info */ + obj = output.pointer; + + if (count > 1) { + if ((obj->type != ACPI_TYPE_PACKAGE) || + (obj->package.count != count)) { + result = -EIO; + goto error; + } + params = obj->package.elements; + } else + params = obj; + + if (params == NULL) { + result = -EIO; + goto error; + } + + for (i = 0; i < count; i++) { + if (argument->type != params->type) { + result = -EIO; + goto error; + } + switch (params->type) { + case ACPI_TYPE_INTEGER: + argument->value = params->integer.value; + break; + case ACPI_TYPE_STRING: + if ((params->string.length != argument->data_length) || + (params->string.pointer == NULL)) { + result = -EIO; + goto error; + } + strncpy(argument->pointer, + params->string.pointer, + params->string.length); + break; + case ACPI_TYPE_BUFFER: + if (params->buffer.pointer == NULL) { + result = -EIO; + goto error; + } + memcpy(argument->pointer, + params->buffer.pointer, + argument->data_length); + break; + default: + break; + } + argument++; + params++; + } + +error: + if (obj != NULL) + kfree(obj); + kfree((void *)input.pointer); + return result; +} +#else +static int amdgpu_cgs_acpi_eval_object(void *cgs_device, + struct cgs_acpi_method_info *info) +{ + return -EIO; +} +#endif + +int amdgpu_cgs_call_acpi_method(void *cgs_device, + uint32_t acpi_method, + uint32_t acpi_function, + void *pinput, void *poutput, + uint32_t output_count, + uint32_t input_size, + uint32_t output_size) +{ + struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} }; + struct cgs_acpi_method_argument acpi_output = {0}; + struct cgs_acpi_method_info info = {0}; + + acpi_input[0].type = CGS_ACPI_TYPE_INTEGER; + acpi_input[0].method_length = sizeof(uint32_t); + acpi_input[0].data_length = sizeof(uint32_t); + acpi_input[0].value = acpi_function; + + acpi_input[1].type = CGS_ACPI_TYPE_BUFFER; + acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE; + acpi_input[1].data_length = input_size; + acpi_input[1].pointer = pinput; + + acpi_output.type = CGS_ACPI_TYPE_BUFFER; + acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE; + acpi_output.data_length = output_size; + acpi_output.pointer = poutput; + + info.size = sizeof(struct cgs_acpi_method_info); + info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT; + info.input_count = 2; + info.name = acpi_method; + info.pinput_argument = acpi_input; + info.output_count = output_count; + info.poutput_argument = &acpi_output; + + return amdgpu_cgs_acpi_eval_object(cgs_device, &info); +} + static const struct cgs_ops amdgpu_cgs_ops = { amdgpu_cgs_gpu_mem_info, amdgpu_cgs_gmap_kmem, @@ -756,6 +1076,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { amdgpu_cgs_write_pci_config_byte, amdgpu_cgs_write_pci_config_word, amdgpu_cgs_write_pci_config_dword, + amdgpu_cgs_get_pci_resource, amdgpu_cgs_atom_get_data_table, amdgpu_cgs_atom_get_cmd_table_revs, amdgpu_cgs_atom_exec_cmd_table, @@ -768,7 +1089,10 @@ static const struct cgs_ops amdgpu_cgs_ops = { amdgpu_cgs_set_camera_voltages, amdgpu_cgs_get_firmware_info, amdgpu_cgs_set_powergating_state, - amdgpu_cgs_set_clockgating_state + amdgpu_cgs_set_clockgating_state, + amdgpu_cgs_get_active_displays_info, + amdgpu_cgs_call_acpi_method, + amdgpu_cgs_query_system_info, }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 25a3e2485cc2..6f89f8e034d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -406,8 +406,8 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) amdgpu_cs_buckets_get_list(&buckets, &p->validated); } - p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, - &p->validated); + INIT_LIST_HEAD(&duplicates); + amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); if (p->uf.bo) list_add(&p->uf_entry.tv.head, &p->validated); @@ -415,20 +415,23 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) if (need_mmap_lock) down_read(¤t->mm->mmap_sem); - INIT_LIST_HEAD(&duplicates); r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); if (unlikely(r != 0)) goto error_reserve; - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); + amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); + + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); if (r) goto error_validate; - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); error_validate: - if (r) + if (r) { + amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); ttm_eu_backoff_reservation(&p->ticket, &p->validated); + } error_reserve: if (need_mmap_lock) @@ -472,8 +475,11 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, **/ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) { + struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; unsigned i; + amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); + if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. @@ -501,7 +507,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo if (parser->bo_list) amdgpu_bo_list_put(parser->bo_list); - drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index fec65f01c031..17d1fb12128a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -25,7 +25,7 @@ #include #include "amdgpu.h" -int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, +int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, struct amdgpu_ctx *ctx) { unsigned i, j; @@ -35,17 +35,25 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, ctx->adev = adev; kref_init(&ctx->refcount); spin_lock_init(&ctx->ring_lock); - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - ctx->rings[i].sequence = 1; + ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs * + AMDGPU_MAX_RINGS, GFP_KERNEL); + if (!ctx->fences) + return -ENOMEM; + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + ctx->rings[i].sequence = 1; + ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) * + amdgpu_sched_jobs * i; + } if (amdgpu_enable_scheduler) { /* create context entity for each ring */ for (i = 0; i < adev->num_rings; i++) { struct amd_sched_rq *rq; - if (kernel) - rq = &adev->rings[i]->sched.kernel_rq; - else - rq = &adev->rings[i]->sched.sched_rq; + if (pri >= AMD_SCHED_MAX_PRIORITY) { + kfree(ctx->fences); + return -EINVAL; + } + rq = &adev->rings[i]->sched.sched_rq[pri]; r = amd_sched_entity_init(&adev->rings[i]->sched, &ctx->rings[i].entity, rq, amdgpu_sched_jobs); @@ -57,7 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, for (j = 0; j < i; j++) amd_sched_entity_fini(&adev->rings[j]->sched, &ctx->rings[j].entity); - kfree(ctx); + kfree(ctx->fences); return r; } } @@ -73,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) return; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) + for (j = 0; j < amdgpu_sched_jobs; ++j) fence_put(ctx->rings[i].fences[j]); + kfree(ctx->fences); if (amdgpu_enable_scheduler) { for (i = 0; i < adev->num_rings; i++) @@ -103,9 +112,13 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, return r; } *id = (uint32_t)r; - r = amdgpu_ctx_init(adev, false, ctx); + r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx); + if (r) { + idr_remove(&mgr->ctx_handles, *id); + *id = 0; + kfree(ctx); + } mutex_unlock(&mgr->lock); - return r; } @@ -239,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, unsigned idx = 0; struct fence *other = NULL; - idx = seq % AMDGPU_CTX_MAX_CS_PENDING; + idx = seq & (amdgpu_sched_jobs - 1); other = cring->fences[idx]; if (other) { signed long r; @@ -274,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, } - if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) { + if (seq + amdgpu_sched_jobs < cring->sequence) { spin_unlock(&ctx->ring_lock); return NULL; } - fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]); + fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); spin_unlock(&ctx->ring_lock); return fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d5b421330145..65531463f88e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -38,6 +38,7 @@ #include "amdgpu_i2c.h" #include "atom.h" #include "amdgpu_atombios.h" +#include "amd_pcie.h" #ifdef CONFIG_DRM_AMDGPU_CIK #include "cik.h" #endif @@ -949,6 +950,15 @@ static bool amdgpu_check_pot_argument(int arg) */ static void amdgpu_check_arguments(struct amdgpu_device *adev) { + if (amdgpu_sched_jobs < 4) { + dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", + amdgpu_sched_jobs); + amdgpu_sched_jobs = 4; + } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){ + dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", + amdgpu_sched_jobs); + amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); + } /* vramlimit must be a power of two */ if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) { dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n", @@ -1214,12 +1224,14 @@ static int amdgpu_early_init(struct amdgpu_device *adev) } else { if (adev->ip_blocks[i].funcs->early_init) { r = adev->ip_blocks[i].funcs->early_init((void *)adev); - if (r == -ENOENT) + if (r == -ENOENT) { adev->ip_block_status[i].valid = false; - else if (r) + } else if (r) { + DRM_ERROR("early_init %d failed %d\n", i, r); return r; - else + } else { adev->ip_block_status[i].valid = true; + } } else { adev->ip_block_status[i].valid = true; } @@ -1237,20 +1249,28 @@ static int amdgpu_init(struct amdgpu_device *adev) if (!adev->ip_block_status[i].valid) continue; r = adev->ip_blocks[i].funcs->sw_init((void *)adev); - if (r) + if (r) { + DRM_ERROR("sw_init %d failed %d\n", i, r); return r; + } adev->ip_block_status[i].sw = true; /* need to do gmc hw init early so we can allocate gpu mem */ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { r = amdgpu_vram_scratch_init(adev); - if (r) + if (r) { + DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); return r; + } r = adev->ip_blocks[i].funcs->hw_init((void *)adev); - if (r) + if (r) { + DRM_ERROR("hw_init %d failed %d\n", i, r); return r; + } r = amdgpu_wb_init(adev); - if (r) + if (r) { + DRM_ERROR("amdgpu_wb_init failed %d\n", r); return r; + } adev->ip_block_status[i].hw = true; } } @@ -1262,8 +1282,10 @@ static int amdgpu_init(struct amdgpu_device *adev) if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) continue; r = adev->ip_blocks[i].funcs->hw_init((void *)adev); - if (r) + if (r) { + DRM_ERROR("hw_init %d failed %d\n", i, r); return r; + } adev->ip_block_status[i].hw = true; } @@ -1280,12 +1302,16 @@ static int amdgpu_late_init(struct amdgpu_device *adev) /* enable clockgating to save power */ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, AMD_CG_STATE_GATE); - if (r) + if (r) { + DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r); return r; + } if (adev->ip_blocks[i].funcs->late_init) { r = adev->ip_blocks[i].funcs->late_init((void *)adev); - if (r) + if (r) { + DRM_ERROR("late_init %d failed %d\n", i, r); return r; + } } } @@ -1306,10 +1332,15 @@ static int amdgpu_fini(struct amdgpu_device *adev) /* ungate blocks before hw fini so that we can shutdown the blocks safely */ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, AMD_CG_STATE_UNGATE); - if (r) + if (r) { + DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r); return r; + } r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini %d failed %d\n", i, r); + } adev->ip_block_status[i].hw = false; } @@ -1318,6 +1349,9 @@ static int amdgpu_fini(struct amdgpu_device *adev) continue; r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); /* XXX handle errors */ + if (r) { + DRM_DEBUG("sw_fini %d failed %d\n", i, r); + } adev->ip_block_status[i].sw = false; adev->ip_block_status[i].valid = false; } @@ -1335,9 +1369,15 @@ static int amdgpu_suspend(struct amdgpu_device *adev) /* ungate blocks so that suspend can properly shut them down */ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, AMD_CG_STATE_UNGATE); + if (r) { + DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r); + } /* XXX handle errors */ r = adev->ip_blocks[i].funcs->suspend(adev); /* XXX handle errors */ + if (r) { + DRM_ERROR("suspend %d failed %d\n", i, r); + } } return 0; @@ -1351,8 +1391,10 @@ static int amdgpu_resume(struct amdgpu_device *adev) if (!adev->ip_block_status[i].valid) continue; r = adev->ip_blocks[i].funcs->resume(adev); - if (r) + if (r) { + DRM_ERROR("resume %d failed %d\n", i, r); return r; + } } return 0; @@ -1484,8 +1526,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, return -EINVAL; } r = amdgpu_atombios_init(adev); - if (r) + if (r) { + dev_err(adev->dev, "amdgpu_atombios_init failed\n"); return r; + } /* Post card if necessary */ if (!amdgpu_card_posted(adev)) { @@ -1499,21 +1543,26 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* Initialize clocks */ r = amdgpu_atombios_get_clock_info(adev); - if (r) + if (r) { + dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); return r; + } /* init i2c buses */ amdgpu_atombios_i2c_init(adev); /* Fence driver */ r = amdgpu_fence_driver_init(adev); - if (r) + if (r) { + dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); return r; + } /* init the mode config */ drm_mode_config_init(adev->ddev); r = amdgpu_init(adev); if (r) { + dev_err(adev->dev, "amdgpu_init failed\n"); amdgpu_fini(adev); return r; } @@ -1528,7 +1577,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, return r; } - r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx); + r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx); if (r) { dev_err(adev->dev, "failed to create kernel context (%d).\n", r); return r; @@ -1570,8 +1619,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, * explicit gating rather than handling it automatically. */ r = amdgpu_late_init(adev); - if (r) + if (r) { + dev_err(adev->dev, "amdgpu_late_init failed\n"); return r; + } return 0; } @@ -1788,6 +1839,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } drm_kms_helper_poll_enable(dev); + drm_helper_hpd_irq_event(dev); if (fbcon) { amdgpu_fbdev_set_suspend(adev, 0); @@ -1881,6 +1933,83 @@ retry: return r; } +void amdgpu_get_pcie_info(struct amdgpu_device *adev) +{ + u32 mask; + int ret; + + if (pci_is_root_bus(adev->pdev->bus)) + return; + + if (amdgpu_pcie_gen2 == 0) + return; + + if (adev->flags & AMD_IS_APU) + return; + + ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); + if (!ret) { + adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); + + if (mask & DRM_PCIE_SPEED_25) + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; + if (mask & DRM_PCIE_SPEED_50) + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; + if (mask & DRM_PCIE_SPEED_80) + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; + } + ret = drm_pcie_get_max_link_width(adev->ddev, &mask); + if (!ret) { + switch (mask) { + case 32: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 16: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 12: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 8: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 4: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 2: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 1: + adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; + break; + default: + break; + } + } +} /* * Debugfs diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0508c5cd103a..b5dbbb573491 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -79,9 +79,10 @@ int amdgpu_vm_fault_stop = 0; int amdgpu_vm_debug = 0; int amdgpu_exp_hw_support = 0; int amdgpu_enable_scheduler = 1; -int amdgpu_sched_jobs = 16; +int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_enable_semaphores = 0; +int amdgpu_powerplay = -1; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -155,7 +156,7 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)"); module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); -MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)"); +MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)"); module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); @@ -164,6 +165,11 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))"); module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); +#ifdef CONFIG_DRM_AMD_POWERPLAY +MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); +module_param_named(powerplay, amdgpu_powerplay, int, 0444); +#endif + static struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 6fcbbcc2e99e..cfb6caad2a73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -263,7 +263,7 @@ out_unref: } if (fb && ret) { - drm_gem_object_unreference(gobj); + drm_gem_object_unreference_unlocked(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 9c253c535d26..7380f782cd14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -448,7 +448,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint32_t operation) { struct ttm_validate_buffer tv, *entry; - struct amdgpu_bo_list_entry *vm_bos; + struct amdgpu_bo_list_entry vm_pd; struct ww_acquire_ctx ticket; struct list_head list, duplicates; unsigned domain; @@ -461,15 +461,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, tv.shared = true; list_add(&tv.head, &list); - vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list); - if (!vm_bos) - return; + amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); /* Provide duplicates to avoid -EALREADY */ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) - goto error_free; + goto error_print; + amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); list_for_each_entry(entry, &list, head) { domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); /* if anything is swapped out don't swap it in here, @@ -499,9 +498,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, error_unreserve: ttm_eu_backoff_reservation(&ticket, &list); -error_free: - drm_free_large(vm_bos); - +error_print: if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 7c42ff670080..f594cfaa97e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include #include #include #include @@ -312,6 +313,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, } adev->irq.sources[src_id] = source; + return 0; } @@ -335,15 +337,19 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, return; } - src = adev->irq.sources[src_id]; - if (!src) { - DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); - return; - } + if (adev->irq.virq[src_id]) { + generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); + } else { + src = adev->irq.sources[src_id]; + if (!src) { + DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); + return; + } - r = src->funcs->process(adev, src, entry); - if (r) - DRM_ERROR("error processing interrupt (%d)\n", r); + r = src->funcs->process(adev, src, entry); + if (r) + DRM_ERROR("error processing interrupt (%d)\n", r); + } } /** @@ -461,3 +467,90 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, return !!atomic_read(&src->enabled_types[type]); } + +/* gen irq */ +static void amdgpu_irq_mask(struct irq_data *irqd) +{ + /* XXX */ +} + +static void amdgpu_irq_unmask(struct irq_data *irqd) +{ + /* XXX */ +} + +static struct irq_chip amdgpu_irq_chip = { + .name = "amdgpu-ih", + .irq_mask = amdgpu_irq_mask, + .irq_unmask = amdgpu_irq_unmask, +}; + +static int amdgpu_irqdomain_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hwirq) +{ + if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID) + return -EPERM; + + irq_set_chip_and_handler(irq, + &amdgpu_irq_chip, handle_simple_irq); + return 0; +} + +static struct irq_domain_ops amdgpu_hw_irqdomain_ops = { + .map = amdgpu_irqdomain_map, +}; + +/** + * amdgpu_irq_add_domain - create a linear irq domain + * + * @adev: amdgpu device pointer + * + * Create an irq domain for GPU interrupt sources + * that may be driven by another driver (e.g., ACP). + */ +int amdgpu_irq_add_domain(struct amdgpu_device *adev) +{ + adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, + &amdgpu_hw_irqdomain_ops, adev); + if (!adev->irq.domain) { + DRM_ERROR("GPU irq add domain failed\n"); + return -ENODEV; + } + + return 0; +} + +/** + * amdgpu_irq_remove_domain - remove the irq domain + * + * @adev: amdgpu device pointer + * + * Remove the irq domain for GPU interrupt sources + * that may be driven by another driver (e.g., ACP). + */ +void amdgpu_irq_remove_domain(struct amdgpu_device *adev) +{ + if (adev->irq.domain) { + irq_domain_remove(adev->irq.domain); + adev->irq.domain = NULL; + } +} + +/** + * amdgpu_irq_create_mapping - create a mapping between a domain irq and a + * Linux irq + * + * @adev: amdgpu device pointer + * @src_id: IH source id + * + * Create a mapping between a domain irq (GPU IH src id) and a Linux irq + * Use this for components that generate a GPU interrupt, but are driven + * by a different driver (e.g., ACP). + * Returns the Linux irq. + */ +unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) +{ + adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); + + return adev->irq.virq[src_id]; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 17b01aef4278..e124b59f39c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -24,6 +24,7 @@ #ifndef __AMDGPU_IRQ_H__ #define __AMDGPU_IRQ_H__ +#include #include "amdgpu_ih.h" #define AMDGPU_MAX_IRQ_SRC_ID 0x100 @@ -65,6 +66,10 @@ struct amdgpu_irq { /* interrupt ring */ struct amdgpu_ih_ring ih; const struct amdgpu_ih_funcs *ih_funcs; + + /* gen irq stuff */ + struct irq_domain *domain; /* GPU irq controller domain */ + unsigned virq[AMDGPU_MAX_IRQ_SRC_ID]; }; void amdgpu_irq_preinstall(struct drm_device *dev); @@ -90,4 +95,8 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); +int amdgpu_irq_add_domain(struct amdgpu_device *adev); +void amdgpu_irq_remove_domain(struct amdgpu_device *adev); +unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index a53d756672fe..fdc1be8550da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index ea756e77b023..5107fb291bdb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -96,6 +96,7 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) */ static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) { + WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); return bo->tbo.offset; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 22a8c7d3a3ab..7d8d84eaea4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -30,10 +30,16 @@ #include #include +#include "amd_powerplay.h" + static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) { + if (adev->pp_enabled) + /* TODO */ + return; + if (adev->pm.dpm_enabled) { mutex_lock(&adev->pm.mutex); if (power_supply_is_system_supplied() > 0) @@ -52,7 +58,12 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - enum amdgpu_pm_state_type pm = adev->pm.dpm.user_state; + enum amd_pm_state_type pm; + + if (adev->pp_enabled) { + pm = amdgpu_dpm_get_current_power_state(adev); + } else + pm = adev->pm.dpm.user_state; return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -66,40 +77,57 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + enum amd_pm_state_type state; - mutex_lock(&adev->pm.mutex); if (strncmp("battery", buf, strlen("battery")) == 0) - adev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; + state = POWER_STATE_TYPE_BATTERY; else if (strncmp("balanced", buf, strlen("balanced")) == 0) - adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; + state = POWER_STATE_TYPE_BALANCED; else if (strncmp("performance", buf, strlen("performance")) == 0) - adev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; + state = POWER_STATE_TYPE_PERFORMANCE; else { - mutex_unlock(&adev->pm.mutex); count = -EINVAL; goto fail; } - mutex_unlock(&adev->pm.mutex); - /* Can't set dpm state when the card is off */ - if (!(adev->flags & AMD_IS_PX) || - (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) - amdgpu_pm_compute_clocks(adev); + if (adev->pp_enabled) { + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); + } else { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); + + /* Can't set dpm state when the card is off */ + if (!(adev->flags & AMD_IS_PX) || + (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) + amdgpu_pm_compute_clocks(adev); + } fail: return count; } static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; - return snprintf(buf, PAGE_SIZE, "%s\n", - (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" : - (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); + if (adev->pp_enabled) { + enum amd_dpm_forced_level level; + + level = amdgpu_dpm_get_performance_level(adev); + return snprintf(buf, PAGE_SIZE, "%s\n", + (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : + (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); + } else { + enum amdgpu_dpm_forced_level level; + + level = adev->pm.dpm.forced_level; + return snprintf(buf, PAGE_SIZE, "%s\n", + (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" : + (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); + } } static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, @@ -112,7 +140,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, enum amdgpu_dpm_forced_level level; int ret = 0; - mutex_lock(&adev->pm.mutex); if (strncmp("low", buf, strlen("low")) == 0) { level = AMDGPU_DPM_FORCED_LEVEL_LOW; } else if (strncmp("high", buf, strlen("high")) == 0) { @@ -123,7 +150,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, count = -EINVAL; goto fail; } - if (adev->pm.funcs->force_performance_level) { + + if (adev->pp_enabled) + amdgpu_dpm_force_performance_level(adev, level); + else { + mutex_lock(&adev->pm.mutex); if (adev->pm.dpm.thermal_active) { count = -EINVAL; goto fail; @@ -131,6 +162,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, ret = amdgpu_dpm_force_performance_level(adev, level); if (ret) count = -EINVAL; + else + adev->pm.dpm.forced_level = level; + mutex_unlock(&adev->pm.mutex); } fail: mutex_unlock(&adev->pm.mutex); @@ -150,10 +184,10 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct amdgpu_device *adev = dev_get_drvdata(dev); int temp; - if (adev->pm.funcs->get_temperature) - temp = amdgpu_dpm_get_temperature(adev); - else + if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) temp = 0; + else + temp = amdgpu_dpm_get_temperature(adev); return snprintf(buf, PAGE_SIZE, "%d\n", temp); } @@ -181,8 +215,10 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, struct amdgpu_device *adev = dev_get_drvdata(dev); u32 pwm_mode = 0; - if (adev->pm.funcs->get_fan_control_mode) - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode) + return -EINVAL; + + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); /* never 0 (full-speed), fuse or smc-controlled always */ return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2); @@ -197,7 +233,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, int err; int value; - if(!adev->pm.funcs->set_fan_control_mode) + if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode) return -EINVAL; err = kstrtoint(buf, 10, &value); @@ -290,11 +326,11 @@ static struct attribute *hwmon_attributes[] = { static umode_t hwmon_attributes_visible(struct kobject *kobj, struct attribute *attr, int index) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* Skip attributes if DPM is not enabled */ + /* Skip limit attributes if DPM is not enabled */ if (!adev->pm.dpm_enabled && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || @@ -304,6 +340,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; + if (adev->pp_enabled) + return effective_mode; + /* Skip fan attributes if fan is not present */ if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || @@ -351,7 +390,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) container_of(work, struct amdgpu_device, pm.dpm.thermal.work); /* switch to the thermal state */ - enum amdgpu_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; + enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; if (!adev->pm.dpm_enabled) return; @@ -379,7 +418,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) } static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, - enum amdgpu_pm_state_type dpm_state) + enum amd_pm_state_type dpm_state) { int i; struct amdgpu_ps *ps; @@ -516,7 +555,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) { int i; struct amdgpu_ps *ps; - enum amdgpu_pm_state_type dpm_state; + enum amd_pm_state_type dpm_state; int ret; /* if dpm init failed */ @@ -635,49 +674,54 @@ done: void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { - if (adev->pm.funcs->powergate_uvd) { - mutex_lock(&adev->pm.mutex); - /* enable/disable UVD */ + if (adev->pp_enabled) amdgpu_dpm_powergate_uvd(adev, !enable); - mutex_unlock(&adev->pm.mutex); - } else { - if (enable) { + else { + if (adev->pm.funcs->powergate_uvd) { mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; + /* enable/disable UVD */ + amdgpu_dpm_powergate_uvd(adev, !enable); mutex_unlock(&adev->pm.mutex); } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = false; - mutex_unlock(&adev->pm.mutex); + if (enable) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; + mutex_unlock(&adev->pm.mutex); + } else { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.uvd_active = false; + mutex_unlock(&adev->pm.mutex); + } + amdgpu_pm_compute_clocks(adev); } - amdgpu_pm_compute_clocks(adev); } } void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { - if (adev->pm.funcs->powergate_vce) { - mutex_lock(&adev->pm.mutex); - /* enable/disable VCE */ + if (adev->pp_enabled) amdgpu_dpm_powergate_vce(adev, !enable); - - mutex_unlock(&adev->pm.mutex); - } else { - if (enable) { + else { + if (adev->pm.funcs->powergate_vce) { mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; + amdgpu_dpm_powergate_vce(adev, !enable); mutex_unlock(&adev->pm.mutex); } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = false; - mutex_unlock(&adev->pm.mutex); + if (enable) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; + mutex_unlock(&adev->pm.mutex); + } else { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.vce_active = false; + mutex_unlock(&adev->pm.mutex); + } + amdgpu_pm_compute_clocks(adev); } - - amdgpu_pm_compute_clocks(adev); } } @@ -685,10 +729,13 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->pm.dpm.num_ps; i++) { - printk("== power state %d ==\n", i); + if (adev->pp_enabled) + /* TO DO */ + return; + + for (i = 0; i < adev->pm.dpm.num_ps; i++) amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); - } + } int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) @@ -698,8 +745,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if (adev->pm.sysfs_initialized) return 0; - if (adev->pm.funcs->get_temperature == NULL) - return 0; + if (!adev->pp_enabled) { + if (adev->pm.funcs->get_temperature == NULL) + return 0; + } + adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, DRIVER_NAME, adev, hwmon_groups); @@ -748,32 +798,43 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) if (!adev->pm.dpm_enabled) return; - mutex_lock(&adev->pm.mutex); + if (adev->pp_enabled) { + int i = 0; - /* update active crtc counts */ - adev->pm.dpm.new_active_crtcs = 0; - adev->pm.dpm.new_active_crtc_count = 0; - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled) { - adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); - adev->pm.dpm.new_active_crtc_count++; + amdgpu_display_bandwidth_update(adev); + mutex_lock(&adev->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->ready) + amdgpu_fence_wait_empty(ring); + } + mutex_unlock(&adev->ring_lock); + + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); + } else { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.new_active_crtcs = 0; + adev->pm.dpm.new_active_crtc_count = 0; + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + amdgpu_crtc = to_amdgpu_crtc(crtc); + if (crtc->enabled) { + adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); + adev->pm.dpm.new_active_crtc_count++; + } } } + /* update battery/ac status */ + if (power_supply_is_system_supplied() > 0) + adev->pm.dpm.ac_power = true; + else + adev->pm.dpm.ac_power = false; + + amdgpu_dpm_change_power_state_locked(adev); + + mutex_unlock(&adev->pm.mutex); } - - /* update battery/ac status */ - if (power_supply_is_system_supplied() > 0) - adev->pm.dpm.ac_power = true; - else - adev->pm.dpm.ac_power = false; - - amdgpu_dpm_change_power_state_locked(adev); - - mutex_unlock(&adev->pm.mutex); - } /* @@ -787,7 +848,13 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - if (adev->pm.dpm_enabled) { + if (!adev->pm.dpm_enabled) { + seq_printf(m, "dpm not enabled\n"); + return 0; + } + if (adev->pp_enabled) { + amdgpu_dpm_debugfs_print_current_performance_level(adev, m); + } else { mutex_lock(&adev->pm.mutex); if (adev->pm.funcs->debugfs_print_current_performance_level) amdgpu_dpm_debugfs_print_current_performance_level(adev, m); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c new file mode 100644 index 000000000000..5ee9a0690278 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -0,0 +1,317 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "atom.h" +#include "amdgpu.h" +#include "amd_shared.h" +#include +#include +#include "amdgpu_pm.h" +#include +#include "amdgpu_powerplay.h" +#include "cik_dpm.h" +#include "vi_dpm.h" + +static int amdgpu_powerplay_init(struct amdgpu_device *adev) +{ + int ret = 0; + struct amd_powerplay *amd_pp; + + amd_pp = &(adev->powerplay); + + if (adev->pp_enabled) { +#ifdef CONFIG_DRM_AMD_POWERPLAY + struct amd_pp_init *pp_init; + + pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL); + + if (pp_init == NULL) + return -ENOMEM; + + pp_init->chip_family = adev->family; + pp_init->chip_id = adev->asic_type; + pp_init->device = amdgpu_cgs_create_device(adev); + + ret = amd_powerplay_init(pp_init, amd_pp); + kfree(pp_init); +#endif + } else { + amd_pp->pp_handle = (void *)adev; + + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + amd_pp->ip_funcs = &ci_dpm_ip_funcs; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + case CHIP_KAVERI: + amd_pp->ip_funcs = &kv_dpm_ip_funcs; + break; +#endif + case CHIP_TOPAZ: + amd_pp->ip_funcs = &iceland_dpm_ip_funcs; + break; + case CHIP_TONGA: + amd_pp->ip_funcs = &tonga_dpm_ip_funcs; + break; + case CHIP_FIJI: + amd_pp->ip_funcs = &fiji_dpm_ip_funcs; + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + amd_pp->ip_funcs = &cz_dpm_ip_funcs; + break; + default: + ret = -EINVAL; + break; + } + } + return ret; +} + +static int amdgpu_pp_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; + +#ifdef CONFIG_DRM_AMD_POWERPLAY + switch (adev->asic_type) { + case CHIP_TONGA: + case CHIP_FIJI: + adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; + break; + default: + adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; + break; + } +#else + adev->pp_enabled = false; +#endif + + ret = amdgpu_powerplay_init(adev); + if (ret) + return ret; + + if (adev->powerplay.ip_funcs->early_init) + ret = adev->powerplay.ip_funcs->early_init( + adev->powerplay.pp_handle); + return ret; +} + + +static int amdgpu_pp_late_init(void *handle) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->late_init) + ret = adev->powerplay.ip_funcs->late_init( + adev->powerplay.pp_handle); + +#ifdef CONFIG_DRM_AMD_POWERPLAY + if (adev->pp_enabled) + amdgpu_pm_sysfs_init(adev); +#endif + return ret; +} + +static int amdgpu_pp_sw_init(void *handle) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->sw_init) + ret = adev->powerplay.ip_funcs->sw_init( + adev->powerplay.pp_handle); + +#ifdef CONFIG_DRM_AMD_POWERPLAY + if (adev->pp_enabled) { + if (amdgpu_dpm == 0) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + } +#endif + + return ret; +} + +static int amdgpu_pp_sw_fini(void *handle) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->sw_fini) + ret = adev->powerplay.ip_funcs->sw_fini( + adev->powerplay.pp_handle); + if (ret) + return ret; + +#ifdef CONFIG_DRM_AMD_POWERPLAY + if (adev->pp_enabled) { + amdgpu_pm_sysfs_fini(adev); + amd_powerplay_fini(adev->powerplay.pp_handle); + } +#endif + + return ret; +} + +static int amdgpu_pp_hw_init(void *handle) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pp_enabled && adev->firmware.smu_load) + amdgpu_ucode_init_bo(adev); + + if (adev->powerplay.ip_funcs->hw_init) + ret = adev->powerplay.ip_funcs->hw_init( + adev->powerplay.pp_handle); + + return ret; +} + +static int amdgpu_pp_hw_fini(void *handle) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->hw_fini) + ret = adev->powerplay.ip_funcs->hw_fini( + adev->powerplay.pp_handle); + + if (adev->pp_enabled && adev->firmware.smu_load) + amdgpu_ucode_fini_bo(adev); + + return ret; +} + +static int amdgpu_pp_suspend(void *handle) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->suspend) + ret = adev->powerplay.ip_funcs->suspend( + adev->powerplay.pp_handle); + return ret; +} + +static int amdgpu_pp_resume(void *handle) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->resume) + ret = adev->powerplay.ip_funcs->resume( + adev->powerplay.pp_handle); + return ret; +} + +static int amdgpu_pp_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->set_clockgating_state) + ret = adev->powerplay.ip_funcs->set_clockgating_state( + adev->powerplay.pp_handle, state); + return ret; +} + +static int amdgpu_pp_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->set_powergating_state) + ret = adev->powerplay.ip_funcs->set_powergating_state( + adev->powerplay.pp_handle, state); + return ret; +} + + +static bool amdgpu_pp_is_idle(void *handle) +{ + bool ret = true; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->is_idle) + ret = adev->powerplay.ip_funcs->is_idle( + adev->powerplay.pp_handle); + return ret; +} + +static int amdgpu_pp_wait_for_idle(void *handle) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->wait_for_idle) + ret = adev->powerplay.ip_funcs->wait_for_idle( + adev->powerplay.pp_handle); + return ret; +} + +static int amdgpu_pp_soft_reset(void *handle) +{ + int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->soft_reset) + ret = adev->powerplay.ip_funcs->soft_reset( + adev->powerplay.pp_handle); + return ret; +} + +static void amdgpu_pp_print_status(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->powerplay.ip_funcs->print_status) + adev->powerplay.ip_funcs->print_status( + adev->powerplay.pp_handle); +} + +const struct amd_ip_funcs amdgpu_pp_ip_funcs = { + .early_init = amdgpu_pp_early_init, + .late_init = amdgpu_pp_late_init, + .sw_init = amdgpu_pp_sw_init, + .sw_fini = amdgpu_pp_sw_fini, + .hw_init = amdgpu_pp_hw_init, + .hw_fini = amdgpu_pp_hw_fini, + .suspend = amdgpu_pp_suspend, + .resume = amdgpu_pp_resume, + .is_idle = amdgpu_pp_is_idle, + .wait_for_idle = amdgpu_pp_wait_for_idle, + .soft_reset = amdgpu_pp_soft_reset, + .print_status = amdgpu_pp_print_status, + .set_clockgating_state = amdgpu_pp_set_clockgating_state, + .set_powergating_state = amdgpu_pp_set_powergating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h new file mode 100644 index 000000000000..da5cf47cfd99 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_POPWERPLAY_H__ +#define __AMDGPU_POPWERPLAY_H__ + +#include "amd_shared.h" + +extern const struct amd_ip_funcs amdgpu_pp_ip_funcs; + +#endif /* __AMDSOC_DM_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index dd005c336c97..181ce39ef5e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, fence = to_amdgpu_fence(sync->sync_to[i]); /* check if we really need to sync */ - if (!amdgpu_fence_need_sync(fence, ring)) + if (!amdgpu_enable_scheduler && + !amdgpu_fence_need_sync(fence, ring)) continue; /* prevent GPU deadlocks */ @@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, } if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { - r = fence_wait(&fence->base, true); + r = fence_wait(sync->sync_to[i], true); if (r) return r; continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b53d273eb7a1..aefc668e6b5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -75,50 +75,77 @@ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) } /** - * amdgpu_vm_get_bos - add the vm BOs to a validation list + * amdgpu_vm_get_pd_bo - add the VM PD to a validation list * * @vm: vm providing the BOs - * @head: head of validation list + * @validated: head of validation list + * @entry: entry to add * * Add the page directory to the list of BOs to - * validate for command submission (cayman+). + * validate for command submission. */ -struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct list_head *head) +void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, + struct list_head *validated, + struct amdgpu_bo_list_entry *entry) { - struct amdgpu_bo_list_entry *list; - unsigned i, idx; + entry->robj = vm->page_directory; + entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; + entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; + entry->priority = 0; + entry->tv.bo = &vm->page_directory->tbo; + entry->tv.shared = true; + list_add(&entry->tv.head, validated); +} - list = drm_malloc_ab(vm->max_pde_used + 2, - sizeof(struct amdgpu_bo_list_entry)); - if (!list) { - return NULL; - } +/** + * amdgpu_vm_get_bos - add the vm BOs to a duplicates list + * + * @vm: vm providing the BOs + * @duplicates: head of duplicates list + * + * Add the page directory to the BO duplicates list + * for command submission. + */ +void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) +{ + unsigned i; /* add the vm page table to the list */ - list[0].robj = vm->page_directory; - list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; - list[0].priority = 0; - list[0].tv.bo = &vm->page_directory->tbo; - list[0].tv.shared = true; - list_add(&list[0].tv.head, head); + for (i = 0; i <= vm->max_pde_used; ++i) { + struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; - for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { - if (!vm->page_tables[i].bo) + if (!entry->robj) continue; - list[idx].robj = vm->page_tables[i].bo; - list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; - list[idx].priority = 0; - list[idx].tv.bo = &list[idx].robj->tbo; - list[idx].tv.shared = true; - list_add(&list[idx++].tv.head, head); + list_add(&entry->tv.head, duplicates); } - return list; +} + +/** + * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail + * + * @adev: amdgpu device instance + * @vm: vm providing the BOs + * + * Move the PT BOs to the tail of the LRU. + */ +void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ + struct ttm_bo_global *glob = adev->mman.bdev.glob; + unsigned i; + + spin_lock(&glob->lru_lock); + for (i = 0; i <= vm->max_pde_used; ++i) { + struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; + + if (!entry->robj) + continue; + + ttm_bo_move_to_lru_tail(&entry->robj->tbo); + } + spin_unlock(&glob->lru_lock); } /** @@ -461,7 +488,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { - struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; + struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; uint64_t pde, pt; if (bo == NULL) @@ -638,7 +665,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, /* walk over the address space and update the page tables */ for (addr = start; addr < end; ) { uint64_t pt_idx = addr >> amdgpu_vm_block_size; - struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo; + struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; unsigned nptes; uint64_t pte; int r; @@ -1010,13 +1037,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, return -EINVAL; /* make sure object fit at this offset */ - eaddr = saddr + size; + eaddr = saddr + size - 1; if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) return -EINVAL; last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; - if (last_pfn > adev->vm_manager.max_pfn) { - dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", + if (last_pfn >= adev->vm_manager.max_pfn) { + dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n", last_pfn, adev->vm_manager.max_pfn); return -EINVAL; } @@ -1025,7 +1052,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, eaddr /= AMDGPU_GPU_PAGE_SIZE; spin_lock(&vm->it_lock); - it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); + it = interval_tree_iter_first(&vm->va, saddr, eaddr); spin_unlock(&vm->it_lock); if (it) { struct amdgpu_bo_va_mapping *tmp; @@ -1046,7 +1073,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, INIT_LIST_HEAD(&mapping->list); mapping->it.start = saddr; - mapping->it.last = eaddr - 1; + mapping->it.last = eaddr; mapping->offset = offset; mapping->flags = flags; @@ -1070,9 +1097,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* walk over the address space and allocate the page tables */ for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { struct reservation_object *resv = vm->page_directory->tbo.resv; + struct amdgpu_bo_list_entry *entry; struct amdgpu_bo *pt; - if (vm->page_tables[pt_idx].bo) + entry = &vm->page_tables[pt_idx].entry; + if (entry->robj) continue; r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, @@ -1094,8 +1123,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, goto error_free; } + entry->robj = pt; + entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; + entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; + entry->priority = 0; + entry->tv.bo = &entry->robj->tbo; + entry->tv.shared = true; vm->page_tables[pt_idx].addr = 0; - vm->page_tables[pt_idx].bo = pt; } return 0; @@ -1326,7 +1360,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) - amdgpu_bo_unref(&vm->page_tables[i].bo); + amdgpu_bo_unref(&vm->page_tables[i].entry.robj); kfree(vm->page_tables); amdgpu_bo_unref(&vm->page_directory); diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 92b6acadfc52..21aacc1f45c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA /* convert bits per color to bits per pixel */ /* get bpc from the EDID */ -static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc) +static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc) { if (bpc == 0) return 24; @@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc) return bpc * 3; } -/* get the max pix clock supported by the link rate and lane num */ -static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate, - int lane_num, - int bpp) -{ - return (link_rate * lane_num * 8) / bpp; -} - /***** amdgpu specific DP functions *****/ -/* First get the min lane# when low rate is used according to pixel clock - * (prefer low rate), second check max lane# supported by DP panel, - * if the max lane# < low rate lane# then use max lane# instead. - */ -static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector, +static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector, const u8 dpcd[DP_DPCD_SIZE], - int pix_clock) + unsigned pix_clock, + unsigned *dp_lanes, unsigned *dp_rate) { - int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector)); - int max_link_rate = drm_dp_max_link_rate(dpcd); - int max_lane_num = drm_dp_max_lane_count(dpcd); - int lane_num; - int max_dp_pix_clock; + unsigned bpp = + amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector)); + static const unsigned link_rates[3] = { 162000, 270000, 540000 }; + unsigned max_link_rate = drm_dp_max_link_rate(dpcd); + unsigned max_lane_num = drm_dp_max_lane_count(dpcd); + unsigned lane_num, i, max_pix_clock; - for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) { - max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp); - if (pix_clock <= max_dp_pix_clock) - break; + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; + if (max_pix_clock >= pix_clock) { + *dp_lanes = lane_num; + *dp_rate = link_rates[i]; + return 0; + } + } } - return lane_num; -} - -static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector, - const u8 dpcd[DP_DPCD_SIZE], - int pix_clock) -{ - int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector)); - int lane_num, max_pix_clock; - - if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) == - ENCODER_OBJECT_ID_NUTMEG) - return 270000; - - lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock); - max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp); - if (pix_clock <= max_pix_clock) - return 162000; - max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp); - if (pix_clock <= max_pix_clock) - return 270000; - if (amdgpu_connector_is_dp12_capable(connector)) { - max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp); - if (pix_clock <= max_pix_clock) - return 540000; - } - - return drm_dp_max_link_rate(dpcd); + return -EINVAL; } static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev, @@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector, { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector_atom_dig *dig_connector; + int ret; if (!amdgpu_connector->con_priv) return; @@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector, if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { - dig_connector->dp_clock = - amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); - dig_connector->dp_lane_count = - amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock); + ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd, + mode->clock, + &dig_connector->dp_lane_count, + &dig_connector->dp_clock); + if (ret) { + dig_connector->dp_clock = 0; + dig_connector->dp_lane_count = 0; + } } } @@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector, { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector_atom_dig *dig_connector; - int dp_clock; + unsigned dp_lanes, dp_clock; + int ret; if (!amdgpu_connector->con_priv) return MODE_CLOCK_HIGH; dig_connector = amdgpu_connector->con_priv; - dp_clock = - amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); + ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd, + mode->clock, &dp_lanes, &dp_clock); + if (ret) + return MODE_CLOCK_HIGH; if ((dp_clock == 540000) && (!amdgpu_connector_is_dp12_capable(connector))) diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 57a2e347f04d..8b4731d4e10e 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -1395,7 +1395,6 @@ static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev) ci_fan_ctrl_set_default_mode(adev); } -#if 0 static int ci_read_smc_soft_register(struct amdgpu_device *adev, u16 reg_offset, u32 *value) { @@ -1405,7 +1404,6 @@ static int ci_read_smc_soft_register(struct amdgpu_device *adev, pi->soft_regs_start + reg_offset, value, pi->sram_end); } -#endif static int ci_write_smc_soft_register(struct amdgpu_device *adev, u16 reg_offset, u32 value) @@ -6084,11 +6082,23 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, struct amdgpu_ps *rps = &pi->current_rps; u32 sclk = ci_get_average_sclk_freq(adev); u32 mclk = ci_get_average_mclk_freq(adev); + u32 activity_percent = 50; + int ret; + + ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA), + &activity_percent); + + if (ret == 0) { + activity_percent += 0x80; + activity_percent >>= 8; + activity_percent = activity_percent > 100 ? 100 : activity_percent; + } seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); seq_printf(m, "power level avg sclk: %u mclk: %u\n", sclk, mclk); + seq_printf(m, "GPU load: %u %%\n", activity_percent); } static void ci_dpm_print_power_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 484710cfdf82..fd9c9588ef46 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -32,6 +32,7 @@ #include "amdgpu_vce.h" #include "cikd.h" #include "atom.h" +#include "amd_pcie.h" #include "cik.h" #include "gmc_v7_0.h" @@ -65,6 +66,7 @@ #include "oss/oss_2_0_sh_mask.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_powerplay.h" /* * Indirect registers accessor @@ -929,6 +931,37 @@ static bool cik_read_disabled_bios(struct amdgpu_device *adev) return r; } +static bool cik_read_bios_from_rom(struct amdgpu_device *adev, + u8 *bios, u32 length_bytes) +{ + u32 *dw_ptr; + unsigned long flags; + u32 i, length_dw; + + if (bios == NULL) + return false; + if (length_bytes == 0) + return false; + /* APU vbios image is part of sbios image */ + if (adev->flags & AMD_IS_APU) + return false; + + dw_ptr = (u32 *)bios; + length_dw = ALIGN(length_bytes, 4) / 4; + /* take the smc lock since we are using the smc index */ + spin_lock_irqsave(&adev->smc_idx_lock, flags); + /* set rom index to 0 */ + WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); + WREG32(mmSMC_IND_DATA_0, 0); + /* set index to data for continous read */ + WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); + for (i = 0; i < length_dw; i++) + dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return true; +} + static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { {mmGRBM_STATUS, false}, {mmGB_ADDR_CONFIG, false}, @@ -1563,8 +1596,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) { struct pci_dev *root = adev->pdev->bus->self; int bridge_pos, gpu_pos; - u32 speed_cntl, mask, current_data_rate; - int ret, i; + u32 speed_cntl, current_data_rate; + int i; u16 tmp16; if (pci_is_root_bus(adev->pdev->bus)) @@ -1576,23 +1609,20 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret != 0) - return; - - if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) return; speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL); current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; - if (mask & DRM_PCIE_SPEED_80) { + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { if (current_data_rate == 2) { DRM_INFO("PCIE gen 3 link speeds already enabled\n"); return; } DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); - } else if (mask & DRM_PCIE_SPEED_50) { + } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) { if (current_data_rate == 1) { DRM_INFO("PCIE gen 2 link speeds already enabled\n"); return; @@ -1608,7 +1638,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) if (!gpu_pos) return; - if (mask & DRM_PCIE_SPEED_80) { + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { /* re-try equalization if gen3 is not already enabled */ if (current_data_rate != 2) { u16 bridge_cfg, gpu_cfg; @@ -1703,9 +1733,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); tmp16 &= ~0xf; - if (mask & DRM_PCIE_SPEED_80) + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) tmp16 |= 3; /* gen3 */ - else if (mask & DRM_PCIE_SPEED_50) + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) tmp16 |= 2; /* gen2 */ else tmp16 |= 1; /* gen1 */ @@ -1922,7 +1952,7 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = .major = 7, .minor = 0, .rev = 0, - .funcs = &ci_dpm_ip_funcs, + .funcs = &amdgpu_pp_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_DCE, @@ -1990,7 +2020,7 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = .major = 7, .minor = 0, .rev = 0, - .funcs = &ci_dpm_ip_funcs, + .funcs = &amdgpu_pp_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_DCE, @@ -2058,7 +2088,7 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] = .major = 7, .minor = 0, .rev = 0, - .funcs = &kv_dpm_ip_funcs, + .funcs = &amdgpu_pp_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_DCE, @@ -2126,7 +2156,7 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] = .major = 7, .minor = 0, .rev = 0, - .funcs = &kv_dpm_ip_funcs, + .funcs = &amdgpu_pp_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_DCE, @@ -2194,7 +2224,7 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = .major = 7, .minor = 0, .rev = 0, - .funcs = &kv_dpm_ip_funcs, + .funcs = &amdgpu_pp_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_DCE, @@ -2267,6 +2297,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) static const struct amdgpu_asic_funcs cik_asic_funcs = { .read_disabled_bios = &cik_read_disabled_bios, + .read_bios_from_rom = &cik_read_bios_from_rom, .read_register = &cik_read_register, .reset = &cik_asic_reset, .set_vga_state = &cik_vga_set_state, @@ -2417,6 +2448,8 @@ static int cik_common_early_init(void *handle) return -EINVAL; } + amdgpu_get_pcie_info(adev); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 8993c50cb89f..30c9b3beeef9 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -274,6 +274,11 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev) static int cik_ih_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + ret = amdgpu_irq_add_domain(adev); + if (ret) + return ret; cik_ih_set_interrupt_funcs(adev); @@ -300,6 +305,7 @@ static int cik_ih_sw_fini(void *handle) amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); + amdgpu_irq_remove_domain(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 8035d4d6a4f5..4dd17f2dd905 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -1078,6 +1078,37 @@ static uint32_t cz_get_eclk_level(struct amdgpu_device *adev, return i; } +static uint32_t cz_get_uvd_level(struct amdgpu_device *adev, + uint32_t clock, uint16_t msg) +{ + int i = 0; + struct amdgpu_uvd_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + + switch (msg) { + case PPSMC_MSG_SetUvdSoftMin: + case PPSMC_MSG_SetUvdHardMin: + for (i = 0; i < table->count; i++) + if (clock <= table->entries[i].vclk) + break; + if (i == table->count) + i = table->count - 1; + break; + case PPSMC_MSG_SetUvdSoftMax: + case PPSMC_MSG_SetUvdHardMax: + for (i = table->count - 1; i >= 0; i--) + if (clock >= table->entries[i].vclk) + break; + if (i < 0) + i = 0; + break; + default: + break; + } + + return i; +} + static int cz_program_bootup_state(struct amdgpu_device *adev) { struct cz_power_info *pi = cz_get_pi(adev); @@ -1739,6 +1770,200 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev) return 0; } +static int cz_dpm_uvd_force_highest(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + int ret = 0; + + if (pi->uvd_dpm.soft_min_clk != pi->uvd_dpm.soft_max_clk) { + pi->uvd_dpm.soft_min_clk = + pi->uvd_dpm.soft_max_clk; + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetUvdSoftMin, + cz_get_uvd_level(adev, + pi->uvd_dpm.soft_min_clk, + PPSMC_MSG_SetUvdSoftMin)); + if (ret) + return ret; + } + + return ret; +} + +static int cz_dpm_uvd_force_lowest(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + int ret = 0; + + if (pi->uvd_dpm.soft_max_clk != pi->uvd_dpm.soft_min_clk) { + pi->uvd_dpm.soft_max_clk = pi->uvd_dpm.soft_min_clk; + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetUvdSoftMax, + cz_get_uvd_level(adev, + pi->uvd_dpm.soft_max_clk, + PPSMC_MSG_SetUvdSoftMax)); + if (ret) + return ret; + } + + return ret; +} + +static uint32_t cz_dpm_get_max_uvd_level(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + + if (!pi->max_uvd_level) { + cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); + pi->max_uvd_level = cz_get_argument(adev) + 1; + } + + if (pi->max_uvd_level > CZ_MAX_HARDWARE_POWERLEVELS) { + DRM_ERROR("Invalid max uvd level!\n"); + return -EINVAL; + } + + return pi->max_uvd_level; +} + +static int cz_dpm_unforce_uvd_dpm_levels(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_uvd_clock_voltage_dependency_table *dep_table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + uint32_t level = 0; + int ret = 0; + + pi->uvd_dpm.soft_min_clk = dep_table->entries[0].vclk; + level = cz_dpm_get_max_uvd_level(adev) - 1; + if (level < dep_table->count) + pi->uvd_dpm.soft_max_clk = dep_table->entries[level].vclk; + else + pi->uvd_dpm.soft_max_clk = + dep_table->entries[dep_table->count - 1].vclk; + + /* get min/max sclk soft value + * notify SMU to execute */ + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetUvdSoftMin, + cz_get_uvd_level(adev, + pi->uvd_dpm.soft_min_clk, + PPSMC_MSG_SetUvdSoftMin)); + if (ret) + return ret; + + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetUvdSoftMax, + cz_get_uvd_level(adev, + pi->uvd_dpm.soft_max_clk, + PPSMC_MSG_SetUvdSoftMax)); + if (ret) + return ret; + + DRM_DEBUG("DPM uvd unforce state min=%d, max=%d.\n", + pi->uvd_dpm.soft_min_clk, + pi->uvd_dpm.soft_max_clk); + + return 0; +} + +static int cz_dpm_vce_force_highest(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + int ret = 0; + + if (pi->vce_dpm.soft_min_clk != pi->vce_dpm.soft_max_clk) { + pi->vce_dpm.soft_min_clk = + pi->vce_dpm.soft_max_clk; + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetEclkSoftMin, + cz_get_eclk_level(adev, + pi->vce_dpm.soft_min_clk, + PPSMC_MSG_SetEclkSoftMin)); + if (ret) + return ret; + } + + return ret; +} + +static int cz_dpm_vce_force_lowest(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + int ret = 0; + + if (pi->vce_dpm.soft_max_clk != pi->vce_dpm.soft_min_clk) { + pi->vce_dpm.soft_max_clk = pi->vce_dpm.soft_min_clk; + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetEclkSoftMax, + cz_get_uvd_level(adev, + pi->vce_dpm.soft_max_clk, + PPSMC_MSG_SetEclkSoftMax)); + if (ret) + return ret; + } + + return ret; +} + +static uint32_t cz_dpm_get_max_vce_level(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + + if (!pi->max_vce_level) { + cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); + pi->max_vce_level = cz_get_argument(adev) + 1; + } + + if (pi->max_vce_level > CZ_MAX_HARDWARE_POWERLEVELS) { + DRM_ERROR("Invalid max vce level!\n"); + return -EINVAL; + } + + return pi->max_vce_level; +} + +static int cz_dpm_unforce_vce_dpm_levels(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_vce_clock_voltage_dependency_table *dep_table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + uint32_t level = 0; + int ret = 0; + + pi->vce_dpm.soft_min_clk = dep_table->entries[0].ecclk; + level = cz_dpm_get_max_vce_level(adev) - 1; + if (level < dep_table->count) + pi->vce_dpm.soft_max_clk = dep_table->entries[level].ecclk; + else + pi->vce_dpm.soft_max_clk = + dep_table->entries[dep_table->count - 1].ecclk; + + /* get min/max sclk soft value + * notify SMU to execute */ + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetEclkSoftMin, + cz_get_eclk_level(adev, + pi->vce_dpm.soft_min_clk, + PPSMC_MSG_SetEclkSoftMin)); + if (ret) + return ret; + + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetEclkSoftMax, + cz_get_eclk_level(adev, + pi->vce_dpm.soft_max_clk, + PPSMC_MSG_SetEclkSoftMax)); + if (ret) + return ret; + + DRM_DEBUG("DPM vce unforce state min=%d, max=%d.\n", + pi->vce_dpm.soft_min_clk, + pi->vce_dpm.soft_max_clk); + + return 0; +} + static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level) { @@ -1746,23 +1971,68 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, switch (level) { case AMDGPU_DPM_FORCED_LEVEL_HIGH: + /* sclk */ ret = cz_dpm_unforce_dpm_levels(adev); if (ret) return ret; ret = cz_dpm_force_highest(adev); + if (ret) + return ret; + + /* uvd */ + ret = cz_dpm_unforce_uvd_dpm_levels(adev); + if (ret) + return ret; + ret = cz_dpm_uvd_force_highest(adev); + if (ret) + return ret; + + /* vce */ + ret = cz_dpm_unforce_vce_dpm_levels(adev); + if (ret) + return ret; + ret = cz_dpm_vce_force_highest(adev); if (ret) return ret; break; case AMDGPU_DPM_FORCED_LEVEL_LOW: + /* sclk */ ret = cz_dpm_unforce_dpm_levels(adev); if (ret) return ret; ret = cz_dpm_force_lowest(adev); + if (ret) + return ret; + + /* uvd */ + ret = cz_dpm_unforce_uvd_dpm_levels(adev); + if (ret) + return ret; + ret = cz_dpm_uvd_force_lowest(adev); + if (ret) + return ret; + + /* vce */ + ret = cz_dpm_unforce_vce_dpm_levels(adev); + if (ret) + return ret; + ret = cz_dpm_vce_force_lowest(adev); if (ret) return ret; break; case AMDGPU_DPM_FORCED_LEVEL_AUTO: + /* sclk */ ret = cz_dpm_unforce_dpm_levels(adev); + if (ret) + return ret; + + /* uvd */ + ret = cz_dpm_unforce_uvd_dpm_levels(adev); + if (ret) + return ret; + + /* vce */ + ret = cz_dpm_unforce_vce_dpm_levels(adev); if (ret) return ret; break; @@ -1905,7 +2175,8 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev) pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk; } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */ - pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; + /* leave it as set by user */ + /*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/ } cz_send_msg_to_smc_with_parameter(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h index 99e1afc89629..5df8c1faab51 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h @@ -183,6 +183,8 @@ struct cz_power_info { uint32_t voltage_drop_threshold; uint32_t gfx_pg_threshold; uint32_t max_sclk_level; + uint32_t max_uvd_level; + uint32_t max_vce_level; /* flags */ bool didt_enabled; bool video_start; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index bc751bfbcae2..c79638f8e732 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -253,8 +253,14 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev) static int cz_ih_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + ret = amdgpu_irq_add_domain(adev); + if (ret) + return ret; cz_ih_set_interrupt_funcs(adev); + return 0; } @@ -278,6 +284,7 @@ static int cz_ih_sw_fini(void *handle) amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); + amdgpu_irq_remove_domain(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 4dcc8fba5792..093599aba64b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3729,7 +3729,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: @@ -3740,15 +3740,15 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { amdgpu_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } else { drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs); @@ -3766,13 +3766,13 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, amdgpu_encoder->is_ext_encoder = true; if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); else drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 8f1e51128b33..8e67249d4367 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -211,9 +211,9 @@ static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc) */ static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc) { - unsigned i = 0; + unsigned i = 100; - if (crtc >= adev->mode_info.num_crtc) + if (crtc < 0 || crtc >= adev->mode_info.num_crtc) return; if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) @@ -223,14 +223,16 @@ static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc) * wait for another frame. */ while (dce_v11_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v11_0_is_counter_moving(adev, crtc)) break; } } while (!dce_v11_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v11_0_is_counter_moving(adev, crtc)) break; } @@ -239,7 +241,7 @@ static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc) static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) { - if (crtc >= adev->mode_info.num_crtc) + if (crtc < 0 || crtc >= adev->mode_info.num_crtc) return 0; else return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); @@ -3384,7 +3386,7 @@ static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev, { u32 tmp; - if (crtc >= adev->mode_info.num_crtc) { + if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { DRM_DEBUG("invalid crtc %d\n", crtc); return; } @@ -3399,7 +3401,7 @@ static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev, { u32 tmp; - if (crtc >= adev->mode_info.num_crtc) { + if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { DRM_DEBUG("invalid crtc %d\n", crtc); return; } @@ -3722,7 +3724,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: @@ -3733,15 +3735,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { amdgpu_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } else { drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); @@ -3759,13 +3761,13 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, amdgpu_encoder->is_ext_encoder = true; if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); else drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 42d954dc436d..d0e128c24813 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -3659,7 +3659,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: @@ -3670,15 +3670,15 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { amdgpu_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } else { drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs); @@ -3696,13 +3696,13 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, amdgpu_encoder->is_ext_encoder = true; if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); else drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c index 8f9845d9a986..4b0e45a27129 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c @@ -24,7 +24,7 @@ #include #include "drmP.h" #include "amdgpu.h" -#include "fiji_smumgr.h" +#include "fiji_smum.h" MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h deleted file mode 100644 index 3c4824082990..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef FIJI_PP_SMC_H -#define FIJI_PP_SMC_H - -#pragma pack(push, 1) - -#define PPSMC_SWSTATE_FLAG_DC 0x01 -#define PPSMC_SWSTATE_FLAG_UVD 0x02 -#define PPSMC_SWSTATE_FLAG_VCE 0x04 - -#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 -#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 -#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff - -#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 -#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 -#define PPSMC_SYSTEMFLAG_GDDR5 0x04 - -#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 - -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 -#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 - -#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 -#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 -#define PPSMC_DPM2FLAGS_OCP 0x04 - -#define PPSMC_DISPLAY_WATERMARK_LOW 0 -#define PPSMC_DISPLAY_WATERMARK_HIGH 1 - -#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 -#define PPSMC_STATEFLAG_POWERBOOST 0x02 -#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 -#define PPSMC_STATEFLAG_POWERSHIFT 0x08 -#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 -#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 -#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 - -#define FDO_MODE_HARDWARE 0 -#define FDO_MODE_PIECE_WISE_LINEAR 1 - -enum FAN_CONTROL { - FAN_CONTROL_FUZZY, - FAN_CONTROL_TABLE -}; - -//Gemini Modes -#define PPSMC_GeminiModeNone 0 //Single GPU board -#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board -#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board - -#define PPSMC_Result_OK ((uint16_t)0x01) -#define PPSMC_Result_NoMore ((uint16_t)0x02) -#define PPSMC_Result_NotNow ((uint16_t)0x03) -#define PPSMC_Result_Failed ((uint16_t)0xFF) -#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) -#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) - -typedef uint16_t PPSMC_Result; - -#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) - -#define PPSMC_MSG_Halt ((uint16_t)0x10) -#define PPSMC_MSG_Resume ((uint16_t)0x11) -#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) -#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) -#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) -#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) -#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) -#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) -#define PPSMC_MSG_LevelUp ((uint16_t)0x18) -#define PPSMC_MSG_LevelDown ((uint16_t)0x19) -#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) -#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) -#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) -#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) -#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) -#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) -#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) -#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) -#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) -#define PPSMC_MSG_EnableCac ((uint16_t)0x53) -#define PPSMC_MSG_DisableCac ((uint16_t)0x54) -#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) -#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) -#define PPSMC_CACHistoryStart ((uint16_t)0x57) -#define PPSMC_CACHistoryStop ((uint16_t)0x58) -#define PPSMC_TDPClampingActive ((uint16_t)0x59) -#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) -#define PPSMC_StartFanControl ((uint16_t)0x5B) -#define PPSMC_StopFanControl ((uint16_t)0x5C) -#define PPSMC_NoDisplay ((uint16_t)0x5D) -#define PPSMC_HasDisplay ((uint16_t)0x5E) -#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) -#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) -#define PPSMC_MSG_EnableULV ((uint16_t)0x62) -#define PPSMC_MSG_DisableULV ((uint16_t)0x63) -#define PPSMC_MSG_EnterULV ((uint16_t)0x64) -#define PPSMC_MSG_ExitULV ((uint16_t)0x65) -#define PPSMC_PowerShiftActive ((uint16_t)0x6A) -#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) -#define PPSMC_OCPActive ((uint16_t)0x6C) -#define PPSMC_OCPInactive ((uint16_t)0x6D) -#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) -#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) -#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) -#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) -#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) -#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) -#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) -#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) -#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) -#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) -#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) -#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) -#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) -#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) -#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) -#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) -#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) -#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) -#define PPSMC_FlushDataCache ((uint16_t)0x80) -#define PPSMC_FlushInstrCache ((uint16_t)0x81) -#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) -#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) -#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) -#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) -#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) -#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) -#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) -#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) -#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A) -#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B) -#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C) - -#define PPSMC_MSG_BREAK ((uint16_t)0xF8) - -#define PPSMC_MSG_Test ((uint16_t)0x100) -#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250) -#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251) -#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252) -#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253) -#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254) - -typedef uint16_t PPSMC_Msg; - -#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 -#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 -#define PPSMC_EVENT_STATUS_DC 0x00000004 -#define PPSMC_EVENT_STATUS_GPIO17 0x00000008 - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index bda1249eb871..e35340afd3db 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -25,7 +25,7 @@ #include "drmP.h" #include "amdgpu.h" #include "fiji_ppsmc.h" -#include "fiji_smumgr.h" +#include "fiji_smum.h" #include "smu_ucode_xfer_vi.h" #include "amdgpu_ucode.h" diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h b/drivers/gpu/drm/amd/amdgpu/fiji_smum.h similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h rename to drivers/gpu/drm/amd/amdgpu/fiji_smum.h diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e1dcab98e249..13235d84e5a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -66,6 +66,27 @@ #define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT) #define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT) +#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK 0x00000001L +#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK 0x00000002L +#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK 0x00000004L +#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK 0x00000008L +#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK 0x00000010L +#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK 0x00000020L + +/* BPM SERDES CMD */ +#define SET_BPM_SERDES_CMD 1 +#define CLE_BPM_SERDES_CMD 0 + +/* BPM Register Address*/ +enum { + BPM_REG_CGLS_EN = 0, /* Enable/Disable CGLS */ + BPM_REG_CGLS_ON, /* ON/OFF CGLS: shall be controlled by RLC FW */ + BPM_REG_CGCG_OVERRIDE, /* Set/Clear CGCG Override */ + BPM_REG_MGCG_OVERRIDE, /* Set/Clear MGCG Override */ + BPM_REG_FGCG_OVERRIDE, /* Set/Clear FGCG Override */ + BPM_REG_FGCG_MAX +}; + MODULE_FIRMWARE("amdgpu/carrizo_ce.bin"); MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin"); MODULE_FIRMWARE("amdgpu/carrizo_me.bin"); @@ -964,6 +985,322 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) return 0; } +static const u32 vgpr_init_compute_shader[] = +{ + 0x7e000209, 0x7e020208, + 0x7e040207, 0x7e060206, + 0x7e080205, 0x7e0a0204, + 0x7e0c0203, 0x7e0e0202, + 0x7e100201, 0x7e120200, + 0x7e140209, 0x7e160208, + 0x7e180207, 0x7e1a0206, + 0x7e1c0205, 0x7e1e0204, + 0x7e200203, 0x7e220202, + 0x7e240201, 0x7e260200, + 0x7e280209, 0x7e2a0208, + 0x7e2c0207, 0x7e2e0206, + 0x7e300205, 0x7e320204, + 0x7e340203, 0x7e360202, + 0x7e380201, 0x7e3a0200, + 0x7e3c0209, 0x7e3e0208, + 0x7e400207, 0x7e420206, + 0x7e440205, 0x7e460204, + 0x7e480203, 0x7e4a0202, + 0x7e4c0201, 0x7e4e0200, + 0x7e500209, 0x7e520208, + 0x7e540207, 0x7e560206, + 0x7e580205, 0x7e5a0204, + 0x7e5c0203, 0x7e5e0202, + 0x7e600201, 0x7e620200, + 0x7e640209, 0x7e660208, + 0x7e680207, 0x7e6a0206, + 0x7e6c0205, 0x7e6e0204, + 0x7e700203, 0x7e720202, + 0x7e740201, 0x7e760200, + 0x7e780209, 0x7e7a0208, + 0x7e7c0207, 0x7e7e0206, + 0xbf8a0000, 0xbf810000, +}; + +static const u32 sgpr_init_compute_shader[] = +{ + 0xbe8a0100, 0xbe8c0102, + 0xbe8e0104, 0xbe900106, + 0xbe920108, 0xbe940100, + 0xbe960102, 0xbe980104, + 0xbe9a0106, 0xbe9c0108, + 0xbe9e0100, 0xbea00102, + 0xbea20104, 0xbea40106, + 0xbea60108, 0xbea80100, + 0xbeaa0102, 0xbeac0104, + 0xbeae0106, 0xbeb00108, + 0xbeb20100, 0xbeb40102, + 0xbeb60104, 0xbeb80106, + 0xbeba0108, 0xbebc0100, + 0xbebe0102, 0xbec00104, + 0xbec20106, 0xbec40108, + 0xbec60100, 0xbec80102, + 0xbee60004, 0xbee70005, + 0xbeea0006, 0xbeeb0007, + 0xbee80008, 0xbee90009, + 0xbefc0000, 0xbf8a0000, + 0xbf810000, 0x00000000, +}; + +static const u32 vgpr_init_regs[] = +{ + mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, + mmCOMPUTE_RESOURCE_LIMITS, 0, + mmCOMPUTE_NUM_THREAD_X, 256*4, + mmCOMPUTE_NUM_THREAD_Y, 1, + mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC2, 20, + mmCOMPUTE_USER_DATA_0, 0xedcedc00, + mmCOMPUTE_USER_DATA_1, 0xedcedc01, + mmCOMPUTE_USER_DATA_2, 0xedcedc02, + mmCOMPUTE_USER_DATA_3, 0xedcedc03, + mmCOMPUTE_USER_DATA_4, 0xedcedc04, + mmCOMPUTE_USER_DATA_5, 0xedcedc05, + mmCOMPUTE_USER_DATA_6, 0xedcedc06, + mmCOMPUTE_USER_DATA_7, 0xedcedc07, + mmCOMPUTE_USER_DATA_8, 0xedcedc08, + mmCOMPUTE_USER_DATA_9, 0xedcedc09, +}; + +static const u32 sgpr1_init_regs[] = +{ + mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, + mmCOMPUTE_NUM_THREAD_X, 256*5, + mmCOMPUTE_NUM_THREAD_Y, 1, + mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC2, 20, + mmCOMPUTE_USER_DATA_0, 0xedcedc00, + mmCOMPUTE_USER_DATA_1, 0xedcedc01, + mmCOMPUTE_USER_DATA_2, 0xedcedc02, + mmCOMPUTE_USER_DATA_3, 0xedcedc03, + mmCOMPUTE_USER_DATA_4, 0xedcedc04, + mmCOMPUTE_USER_DATA_5, 0xedcedc05, + mmCOMPUTE_USER_DATA_6, 0xedcedc06, + mmCOMPUTE_USER_DATA_7, 0xedcedc07, + mmCOMPUTE_USER_DATA_8, 0xedcedc08, + mmCOMPUTE_USER_DATA_9, 0xedcedc09, +}; + +static const u32 sgpr2_init_regs[] = +{ + mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, + mmCOMPUTE_NUM_THREAD_X, 256*5, + mmCOMPUTE_NUM_THREAD_Y, 1, + mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC2, 20, + mmCOMPUTE_USER_DATA_0, 0xedcedc00, + mmCOMPUTE_USER_DATA_1, 0xedcedc01, + mmCOMPUTE_USER_DATA_2, 0xedcedc02, + mmCOMPUTE_USER_DATA_3, 0xedcedc03, + mmCOMPUTE_USER_DATA_4, 0xedcedc04, + mmCOMPUTE_USER_DATA_5, 0xedcedc05, + mmCOMPUTE_USER_DATA_6, 0xedcedc06, + mmCOMPUTE_USER_DATA_7, 0xedcedc07, + mmCOMPUTE_USER_DATA_8, 0xedcedc08, + mmCOMPUTE_USER_DATA_9, 0xedcedc09, +}; + +static const u32 sec_ded_counter_registers[] = +{ + mmCPC_EDC_ATC_CNT, + mmCPC_EDC_SCRATCH_CNT, + mmCPC_EDC_UCODE_CNT, + mmCPF_EDC_ATC_CNT, + mmCPF_EDC_ROQ_CNT, + mmCPF_EDC_TAG_CNT, + mmCPG_EDC_ATC_CNT, + mmCPG_EDC_DMA_CNT, + mmCPG_EDC_TAG_CNT, + mmDC_EDC_CSINVOC_CNT, + mmDC_EDC_RESTORE_CNT, + mmDC_EDC_STATE_CNT, + mmGDS_EDC_CNT, + mmGDS_EDC_GRBM_CNT, + mmGDS_EDC_OA_DED, + mmSPI_EDC_CNT, + mmSQC_ATC_EDC_GATCL1_CNT, + mmSQC_EDC_CNT, + mmSQ_EDC_DED_CNT, + mmSQ_EDC_INFO, + mmSQ_EDC_SEC_CNT, + mmTCC_EDC_CNT, + mmTCP_ATC_EDC_GATCL1_CNT, + mmTCP_EDC_CNT, + mmTD_EDC_CNT +}; + +static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; + struct amdgpu_ib ib; + struct fence *f = NULL; + int r, i; + u32 tmp; + unsigned total_size, vgpr_offset, sgpr_offset; + u64 gpu_addr; + + /* only supported on CZ */ + if (adev->asic_type != CHIP_CARRIZO) + return 0; + + /* bail if the compute ring is not ready */ + if (!ring->ready) + return 0; + + tmp = RREG32(mmGB_EDC_MODE); + WREG32(mmGB_EDC_MODE, 0); + + total_size = + (((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4; + total_size += + (((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4; + total_size += + (((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4; + total_size = ALIGN(total_size, 256); + vgpr_offset = total_size; + total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256); + sgpr_offset = total_size; + total_size += sizeof(sgpr_init_compute_shader); + + /* allocate an indirect buffer to put the commands in */ + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(ring, NULL, total_size, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + return r; + } + + /* load the compute shaders */ + for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++) + ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i]; + + for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++) + ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i]; + + /* init the ib length to 0 */ + ib.length_dw = 0; + + /* VGPR */ + /* write the register state for the compute dispatch */ + for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) { + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); + ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1]; + } + /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ + gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8; + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); + ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr); + ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr); + + /* write dispatch packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); + ib.ptr[ib.length_dw++] = 8; /* x */ + ib.ptr[ib.length_dw++] = 1; /* y */ + ib.ptr[ib.length_dw++] = 1; /* z */ + ib.ptr[ib.length_dw++] = + REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); + + /* write CS partial flush packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); + ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); + + /* SGPR1 */ + /* write the register state for the compute dispatch */ + for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) { + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); + ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1]; + } + /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ + gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); + ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr); + ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr); + + /* write dispatch packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); + ib.ptr[ib.length_dw++] = 8; /* x */ + ib.ptr[ib.length_dw++] = 1; /* y */ + ib.ptr[ib.length_dw++] = 1; /* z */ + ib.ptr[ib.length_dw++] = + REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); + + /* write CS partial flush packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); + ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); + + /* SGPR2 */ + /* write the register state for the compute dispatch */ + for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) { + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); + ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1]; + } + /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ + gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); + ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr); + ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr); + + /* write dispatch packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); + ib.ptr[ib.length_dw++] = 8; /* x */ + ib.ptr[ib.length_dw++] = 1; /* y */ + ib.ptr[ib.length_dw++] = 1; /* z */ + ib.ptr[ib.length_dw++] = + REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); + + /* write CS partial flush packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); + ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); + + /* shedule the ib on the ring */ + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) { + DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); + goto fail; + } + + /* wait for the GPU to finish processing the IB */ + r = fence_wait(f, false); + if (r) { + DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + goto fail; + } + + tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2); + tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1); + WREG32(mmGB_EDC_MODE, tmp); + + tmp = RREG32(mmCC_GC_EDC_CONFIG); + tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1; + WREG32(mmCC_GC_EDC_CONFIG, tmp); + + + /* read back registers to clear the counters */ + for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) + RREG32(sec_ded_counter_registers[i]); + +fail: + fence_put(f); + amdgpu_ib_free(adev, &ib); + + return r; +} + static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; @@ -1323,1418 +1660,923 @@ static int gfx_v8_0_sw_fini(void *handle) static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) { - const u32 num_tile_mode_states = 32; - const u32 num_secondary_tile_mode_states = 16; - u32 reg_offset, gb_tile_moden, split_equal_to_row_size; + uint32_t *modearray, *mod2array; + const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array); + const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + u32 reg_offset; - switch (adev->gfx.config.mem_row_size_in_kb) { - case 1: - split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; - break; - case 2: - default: - split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; - break; - case 4: - split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; - break; - } + modearray = adev->gfx.config.tile_mode_array; + mod2array = adev->gfx.config.macrotile_mode_array; + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + modearray[reg_offset] = 0; + + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + mod2array[reg_offset] = 0; switch (adev->asic_type) { case CHIP_TOPAZ: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P2)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 7: - case 12: - case 17: - case 23: - /* unused idx */ - continue; - default: - gb_tile_moden = 0; - break; - }; - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 7: - /* unused idx */ - continue; - default: - gb_tile_moden = 0; - break; - }; - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P2)); + modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + + mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 && + reg_offset != 23) + WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); + + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); + + break; case CHIP_FIJI: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 7: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 12: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 17: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 23: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 30: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - case 7: - /* unused idx */ - continue; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); + modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + + mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); + + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); + break; case CHIP_TONGA: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 7: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 12: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 17: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 23: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 30: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - default: - gb_tile_moden = 0; - break; - }; - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - case 7: - /* unused idx */ - continue; - default: - gb_tile_moden = 0; - break; - }; - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); + modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + + mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); + + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); + break; case CHIP_STONEY: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P2)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 7: - case 12: - case 17: - case 23: - /* unused idx */ - continue; - default: - gb_tile_moden = 0; - break; - }; - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 7: - /* unused idx */ - continue; - default: - gb_tile_moden = 0; - break; - }; - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P2)); + modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + + mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 && + reg_offset != 23) + WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); + + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); + break; - case CHIP_CARRIZO: default: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P2)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 7: - case 12: - case 17: - case 23: - /* unused idx */ - continue; - default: - gb_tile_moden = 0; - break; - }; - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 7: - /* unused idx */ - continue; - default: - gb_tile_moden = 0; - break; - }; - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + dev_warn(adev->dev, + "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n", + adev->asic_type); + + case CHIP_CARRIZO: + modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P2)); + modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + + mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 && + reg_offset != 23) + WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); + + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); + + break; } } static u32 gfx_v8_0_create_bitmask(u32 bit_width) { - u32 i, mask = 0; - - for (i = 0; i < bit_width; i++) { - mask <<= 1; - mask |= 1; - } - return mask; + return (u32)((1ULL << bit_width) - 1); } void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) @@ -2809,7 +2651,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < se_num; i++) { gfx_v8_0_select_se_sh(adev, i, 0xffffffff); - data = 0; + data = RREG32(mmPA_SC_RASTER_CONFIG); for (j = 0; j < sh_per_se; j++) { switch (enabled_rbs & 3) { case 0: @@ -2997,17 +2839,11 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, { u32 tmp = RREG32(mmCP_INT_CNTL_RING0); - if (enable) { - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1); - } else { - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 0); - } + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); + WREG32(mmCP_INT_CNTL_RING0, tmp); } @@ -3087,16 +2923,18 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) gfx_v8_0_rlc_reset(adev); - if (!adev->firmware.smu_load) { - /* legacy rlc firmware loading */ - r = gfx_v8_0_rlc_load_microcode(adev); - if (r) - return r; - } else { - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_RLC_G); - if (r) - return -EINVAL; + if (!adev->pp_enabled) { + if (!adev->firmware.smu_load) { + /* legacy rlc firmware loading */ + r = gfx_v8_0_rlc_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_RLC_G); + if (r) + return -EINVAL; + } } gfx_v8_0_rlc_start(adev); @@ -3941,6 +3779,11 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); WREG32(mmCP_HQD_PERSISTENT_STATE, tmp); mqd->cp_hqd_persistent_state = tmp; + if (adev->asic_type == CHIP_STONEY) { + tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1); + WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp); + } /* activate the queue */ mqd->cp_hqd_active = 1; @@ -3982,35 +3825,37 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) if (!(adev->flags & AMD_IS_APU)) gfx_v8_0_enable_gui_idle_interrupt(adev, false); - if (!adev->firmware.smu_load) { - /* legacy firmware loading */ - r = gfx_v8_0_cp_gfx_load_microcode(adev); - if (r) - return r; + if (!adev->pp_enabled) { + if (!adev->firmware.smu_load) { + /* legacy firmware loading */ + r = gfx_v8_0_cp_gfx_load_microcode(adev); + if (r) + return r; - r = gfx_v8_0_cp_compute_load_microcode(adev); - if (r) - return r; - } else { - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_CP_CE); - if (r) - return -EINVAL; + r = gfx_v8_0_cp_compute_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_CE); + if (r) + return -EINVAL; - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_CP_PFP); - if (r) - return -EINVAL; + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_PFP); + if (r) + return -EINVAL; - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_CP_ME); - if (r) - return -EINVAL; + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_ME); + if (r) + return -EINVAL; - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_CP_MEC1); - if (r) - return -EINVAL; + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_MEC1); + if (r) + return -EINVAL; + } } r = gfx_v8_0_cp_gfx_resume(adev); @@ -4458,15 +4303,261 @@ static int gfx_v8_0_early_init(void *handle) return 0; } +static int gfx_v8_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + /* requires IBs so do in late init after IB pool is initialized */ + r = gfx_v8_0_do_edc_gpr_workarounds(adev); + if (r) + return r; + + return 0; +} + static int gfx_v8_0_set_powergating_state(void *handle, enum amd_powergating_state state) { return 0; } +static void fiji_send_serdes_cmd(struct amdgpu_device *adev, + uint32_t reg_addr, uint32_t cmd) +{ + uint32_t data; + + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + + WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); + WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); + + data = RREG32(mmRLC_SERDES_WR_CTRL); + data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | + RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | + RLC_SERDES_WR_CTRL__P1_SELECT_MASK | + RLC_SERDES_WR_CTRL__P2_SELECT_MASK | + RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | + RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | + RLC_SERDES_WR_CTRL__POWER_UP_MASK | + RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | + RLC_SERDES_WR_CTRL__BPM_DATA_MASK | + RLC_SERDES_WR_CTRL__REG_ADDR_MASK | + RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); + data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK | + (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) | + (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) | + (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT)); + + WREG32(mmRLC_SERDES_WR_CTRL, data); +} + +static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t temp, data; + + /* It is disabled by HW by default */ + if (enable) { + /* 1 - RLC memory Light sleep */ + temp = data = RREG32(mmRLC_MEM_SLP_CNTL); + data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; + if (temp != data) + WREG32(mmRLC_MEM_SLP_CNTL, data); + + /* 2 - CP memory Light sleep */ + temp = data = RREG32(mmCP_MEM_SLP_CNTL); + data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + if (temp != data) + WREG32(mmCP_MEM_SLP_CNTL, data); + + /* 3 - RLC_CGTT_MGCG_OVERRIDE */ + temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK | + RLC_CGTT_MGCG_OVERRIDE__RLC_MASK | + RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK | + RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK); + + if (temp != data) + WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); + + /* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ + gfx_v8_0_wait_for_rlc_serdes(adev); + + /* 5 - clear mgcg override */ + fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD); + + /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */ + temp = data = RREG32(mmCGTS_SM_CTRL_REG); + data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK); + data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); + data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; + data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; + data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; + data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; + data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT); + if (temp != data) + WREG32(mmCGTS_SM_CTRL_REG, data); + udelay(50); + + /* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ + gfx_v8_0_wait_for_rlc_serdes(adev); + } else { + /* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */ + temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK | + RLC_CGTT_MGCG_OVERRIDE__RLC_MASK | + RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK | + RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK); + if (temp != data) + WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); + + /* 2 - disable MGLS in RLC */ + data = RREG32(mmRLC_MEM_SLP_CNTL); + if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { + data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; + WREG32(mmRLC_MEM_SLP_CNTL, data); + } + + /* 3 - disable MGLS in CP */ + data = RREG32(mmCP_MEM_SLP_CNTL); + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { + data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + WREG32(mmCP_MEM_SLP_CNTL, data); + } + + /* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */ + temp = data = RREG32(mmCGTS_SM_CTRL_REG); + data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK | + CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK); + if (temp != data) + WREG32(mmCGTS_SM_CTRL_REG, data); + + /* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ + gfx_v8_0_wait_for_rlc_serdes(adev); + + /* 6 - set mgcg override */ + fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD); + + udelay(50); + + /* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ + gfx_v8_0_wait_for_rlc_serdes(adev); + } +} + +static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t temp, temp1, data, data1; + + temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); + + if (enable) { + /* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/ + * Cmp_busy/GFX_Idle interrupts + */ + gfx_v8_0_enable_gui_idle_interrupt(adev, true); + + temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK; + if (temp1 != data1) + WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1); + + /* 2 wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ + gfx_v8_0_wait_for_rlc_serdes(adev); + + /* 3 - clear cgcg override */ + fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD); + + /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ + gfx_v8_0_wait_for_rlc_serdes(adev); + + /* 4 - write cmd to set CGLS */ + fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD); + + /* 5 - enable cgcg */ + data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; + + /* enable cgls*/ + data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; + + temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK; + + if (temp1 != data1) + WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1); + + if (temp != data) + WREG32(mmRLC_CGCG_CGLS_CTRL, data); + } else { + /* disable cntx_empty_int_enable & GFX Idle interrupt */ + gfx_v8_0_enable_gui_idle_interrupt(adev, false); + + /* TEST CGCG */ + temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK | + RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK); + if (temp1 != data1) + WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1); + + /* read gfx register to wake up cgcg */ + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + + /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ + gfx_v8_0_wait_for_rlc_serdes(adev); + + /* write cmd to Set CGCG Overrride */ + fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD); + + /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ + gfx_v8_0_wait_for_rlc_serdes(adev); + + /* write cmd to Clear CGLS */ + fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD); + + /* disable cgcg, cgls should be disabled too. */ + data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | + RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); + if (temp != data) + WREG32(mmRLC_CGCG_CGLS_CTRL, data); + } +} +static int fiji_update_gfx_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + if (enable) { + /* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS) + * === MGCG + MGLS + TS(CG/LS) === + */ + fiji_update_medium_grain_clock_gating(adev, enable); + fiji_update_coarse_grain_clock_gating(adev, enable); + } else { + /* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS) + * === CGCG + CGLS === + */ + fiji_update_coarse_grain_clock_gating(adev, enable); + fiji_update_medium_grain_clock_gating(adev, enable); + } + return 0; +} + static int gfx_v8_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (adev->asic_type) { + case CHIP_FIJI: + fiji_update_gfx_clock_gating(adev, + state == AMD_CG_STATE_GATE ? true : false); + break; + default: + break; + } return 0; } @@ -4627,7 +4718,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5))); amdgpu_ring_write(ring, addr & 0xfffffffc); - amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); amdgpu_ring_write(ring, lower_32_bits(seq)); amdgpu_ring_write(ring, upper_32_bits(seq)); @@ -4995,7 +5086,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .early_init = gfx_v8_0_early_init, - .late_init = NULL, + .late_init = gfx_v8_0_late_init, .sw_init = gfx_v8_0_sw_init, .sw_fini = gfx_v8_0_sw_fini, .hw_init = gfx_v8_0_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index ed8abb58a785..3f956065d069 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -370,6 +370,10 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.visible_vram_size = adev->mc.aper_size; + /* In case the PCI BAR is larger than the actual amount of vram */ + if (adev->mc.visible_vram_size > adev->mc.real_vram_size) + adev->mc.visible_vram_size = adev->mc.real_vram_size; + /* unless the user had overridden it, set the gart * size equal to the 1024 or vram, whichever is larger. */ @@ -1012,7 +1016,6 @@ static int gmc_v7_0_suspend(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - amdgpu_vm_manager_fini(adev); gmc_v7_0_vm_fini(adev); adev->vm_manager.enabled = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index d39028440814..c0c9a0101eb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -476,6 +476,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.visible_vram_size = adev->mc.aper_size; + /* In case the PCI BAR is larger than the actual amount of vram */ + if (adev->mc.visible_vram_size > adev->mc.real_vram_size) + adev->mc.visible_vram_size = adev->mc.real_vram_size; + /* unless the user had overridden it, set the gart * size equal to the 1024 or vram, whichever is larger. */ @@ -1033,7 +1037,6 @@ static int gmc_v8_0_suspend(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - amdgpu_vm_manager_fini(adev); gmc_v8_0_vm_fini(adev); adev->vm_manager.enabled = false; } @@ -1324,9 +1327,181 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data; + + if (enable) { + data = RREG32(mmMC_HUB_MISC_HUB_CG); + data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK; + WREG32(mmMC_HUB_MISC_HUB_CG, data); + + data = RREG32(mmMC_HUB_MISC_SIP_CG); + data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK; + WREG32(mmMC_HUB_MISC_SIP_CG, data); + + data = RREG32(mmMC_HUB_MISC_VM_CG); + data |= MC_HUB_MISC_VM_CG__ENABLE_MASK; + WREG32(mmMC_HUB_MISC_VM_CG, data); + + data = RREG32(mmMC_XPB_CLK_GAT); + data |= MC_XPB_CLK_GAT__ENABLE_MASK; + WREG32(mmMC_XPB_CLK_GAT, data); + + data = RREG32(mmATC_MISC_CG); + data |= ATC_MISC_CG__ENABLE_MASK; + WREG32(mmATC_MISC_CG, data); + + data = RREG32(mmMC_CITF_MISC_WR_CG); + data |= MC_CITF_MISC_WR_CG__ENABLE_MASK; + WREG32(mmMC_CITF_MISC_WR_CG, data); + + data = RREG32(mmMC_CITF_MISC_RD_CG); + data |= MC_CITF_MISC_RD_CG__ENABLE_MASK; + WREG32(mmMC_CITF_MISC_RD_CG, data); + + data = RREG32(mmMC_CITF_MISC_VM_CG); + data |= MC_CITF_MISC_VM_CG__ENABLE_MASK; + WREG32(mmMC_CITF_MISC_VM_CG, data); + + data = RREG32(mmVM_L2_CG); + data |= VM_L2_CG__ENABLE_MASK; + WREG32(mmVM_L2_CG, data); + } else { + data = RREG32(mmMC_HUB_MISC_HUB_CG); + data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK; + WREG32(mmMC_HUB_MISC_HUB_CG, data); + + data = RREG32(mmMC_HUB_MISC_SIP_CG); + data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK; + WREG32(mmMC_HUB_MISC_SIP_CG, data); + + data = RREG32(mmMC_HUB_MISC_VM_CG); + data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK; + WREG32(mmMC_HUB_MISC_VM_CG, data); + + data = RREG32(mmMC_XPB_CLK_GAT); + data &= ~MC_XPB_CLK_GAT__ENABLE_MASK; + WREG32(mmMC_XPB_CLK_GAT, data); + + data = RREG32(mmATC_MISC_CG); + data &= ~ATC_MISC_CG__ENABLE_MASK; + WREG32(mmATC_MISC_CG, data); + + data = RREG32(mmMC_CITF_MISC_WR_CG); + data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK; + WREG32(mmMC_CITF_MISC_WR_CG, data); + + data = RREG32(mmMC_CITF_MISC_RD_CG); + data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK; + WREG32(mmMC_CITF_MISC_RD_CG, data); + + data = RREG32(mmMC_CITF_MISC_VM_CG); + data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK; + WREG32(mmMC_CITF_MISC_VM_CG, data); + + data = RREG32(mmVM_L2_CG); + data &= ~VM_L2_CG__ENABLE_MASK; + WREG32(mmVM_L2_CG, data); + } +} + +static void fiji_update_mc_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data; + + if (enable) { + data = RREG32(mmMC_HUB_MISC_HUB_CG); + data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_HUB_MISC_HUB_CG, data); + + data = RREG32(mmMC_HUB_MISC_SIP_CG); + data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_HUB_MISC_SIP_CG, data); + + data = RREG32(mmMC_HUB_MISC_VM_CG); + data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_HUB_MISC_VM_CG, data); + + data = RREG32(mmMC_XPB_CLK_GAT); + data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; + WREG32(mmMC_XPB_CLK_GAT, data); + + data = RREG32(mmATC_MISC_CG); + data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK; + WREG32(mmATC_MISC_CG, data); + + data = RREG32(mmMC_CITF_MISC_WR_CG); + data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_CITF_MISC_WR_CG, data); + + data = RREG32(mmMC_CITF_MISC_RD_CG); + data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_CITF_MISC_RD_CG, data); + + data = RREG32(mmMC_CITF_MISC_VM_CG); + data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_CITF_MISC_VM_CG, data); + + data = RREG32(mmVM_L2_CG); + data |= VM_L2_CG__MEM_LS_ENABLE_MASK; + WREG32(mmVM_L2_CG, data); + } else { + data = RREG32(mmMC_HUB_MISC_HUB_CG); + data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_HUB_MISC_HUB_CG, data); + + data = RREG32(mmMC_HUB_MISC_SIP_CG); + data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_HUB_MISC_SIP_CG, data); + + data = RREG32(mmMC_HUB_MISC_VM_CG); + data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_HUB_MISC_VM_CG, data); + + data = RREG32(mmMC_XPB_CLK_GAT); + data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; + WREG32(mmMC_XPB_CLK_GAT, data); + + data = RREG32(mmATC_MISC_CG); + data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK; + WREG32(mmATC_MISC_CG, data); + + data = RREG32(mmMC_CITF_MISC_WR_CG); + data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_CITF_MISC_WR_CG, data); + + data = RREG32(mmMC_CITF_MISC_RD_CG); + data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_CITF_MISC_RD_CG, data); + + data = RREG32(mmMC_CITF_MISC_VM_CG); + data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; + WREG32(mmMC_CITF_MISC_VM_CG, data); + + data = RREG32(mmVM_L2_CG); + data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK; + WREG32(mmVM_L2_CG, data); + } +} + static int gmc_v8_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (adev->asic_type) { + case CHIP_FIJI: + fiji_update_mc_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE ? true : false); + fiji_update_mc_light_sleep(adev, + state == AMD_CG_STATE_GATE ? true : false); + break; + default: + break; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 779532d350ff..679e7394a495 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -253,8 +253,14 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev) static int iceland_ih_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + ret = amdgpu_irq_add_domain(adev); + if (ret) + return ret; iceland_ih_set_interrupt_funcs(adev); + return 0; } @@ -278,6 +284,7 @@ static int iceland_ih_sw_fini(void *handle) amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); + amdgpu_irq_remove_domain(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 7253132f04b8..ad54c46751b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -727,18 +727,20 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) { int r, i; - if (!adev->firmware.smu_load) { - r = sdma_v3_0_load_microcode(adev); - if (r) - return r; - } else { - for (i = 0; i < adev->sdma.num_instances; i++) { - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - (i == 0) ? - AMDGPU_UCODE_ID_SDMA0 : - AMDGPU_UCODE_ID_SDMA1); + if (!adev->pp_enabled) { + if (!adev->firmware.smu_load) { + r = sdma_v3_0_load_microcode(adev); if (r) - return -EINVAL; + return r; + } else { + for (i = 0; i < adev->sdma.num_instances; i++) { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + (i == 0) ? + AMDGPU_UCODE_ID_SDMA0 : + AMDGPU_UCODE_ID_SDMA1); + if (r) + return -EINVAL; + } } } @@ -1427,9 +1429,114 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, return 0; } +static void fiji_update_sdma_medium_grain_clock_gating( + struct amdgpu_device *adev, + bool enable) +{ + uint32_t temp, data; + + if (enable) { + temp = data = RREG32(mmSDMA0_CLK_CTRL); + data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK); + if (data != temp) + WREG32(mmSDMA0_CLK_CTRL, data); + + temp = data = RREG32(mmSDMA1_CLK_CTRL); + data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK); + + if (data != temp) + WREG32(mmSDMA1_CLK_CTRL, data); + } else { + temp = data = RREG32(mmSDMA0_CLK_CTRL); + data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK; + + if (data != temp) + WREG32(mmSDMA0_CLK_CTRL, data); + + temp = data = RREG32(mmSDMA1_CLK_CTRL); + data |= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK; + + if (data != temp) + WREG32(mmSDMA1_CLK_CTRL, data); + } +} + +static void fiji_update_sdma_medium_grain_light_sleep( + struct amdgpu_device *adev, + bool enable) +{ + uint32_t temp, data; + + if (enable) { + temp = data = RREG32(mmSDMA0_POWER_CNTL); + data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + + if (temp != data) + WREG32(mmSDMA0_POWER_CNTL, data); + + temp = data = RREG32(mmSDMA1_POWER_CNTL); + data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + + if (temp != data) + WREG32(mmSDMA1_POWER_CNTL, data); + } else { + temp = data = RREG32(mmSDMA0_POWER_CNTL); + data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + + if (temp != data) + WREG32(mmSDMA0_POWER_CNTL, data); + + temp = data = RREG32(mmSDMA1_POWER_CNTL); + data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + + if (temp != data) + WREG32(mmSDMA1_POWER_CNTL, data); + } +} + static int sdma_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (adev->asic_type) { + case CHIP_FIJI: + fiji_update_sdma_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE ? true : false); + fiji_update_sdma_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE ? true : false); + break; + default: + break; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index 204903897b4f..f4a1346525fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c @@ -24,7 +24,7 @@ #include #include "drmP.h" #include "amdgpu.h" -#include "tonga_smumgr.h" +#include "tonga_smum.h" MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 743c372837aa..b6f7d7bff929 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -273,8 +273,14 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev) static int tonga_ih_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + ret = amdgpu_irq_add_domain(adev); + if (ret) + return ret; tonga_ih_set_interrupt_funcs(adev); + return 0; } @@ -301,6 +307,7 @@ static int tonga_ih_sw_fini(void *handle) amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); + amdgpu_irq_add_domain(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h deleted file mode 100644 index 811781f69482..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef TONGA_PP_SMC_H -#define TONGA_PP_SMC_H - -#pragma pack(push, 1) - -#define PPSMC_SWSTATE_FLAG_DC 0x01 -#define PPSMC_SWSTATE_FLAG_UVD 0x02 -#define PPSMC_SWSTATE_FLAG_VCE 0x04 -#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08 - -#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 -#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 -#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff - -#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 -#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 -#define PPSMC_SYSTEMFLAG_GDDR5 0x04 - -#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 - -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 -#define PPSMC_SYSTEMFLAG_12CHANNEL 0x40 - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 -#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 - -#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x10 -#define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17 0x20 -#define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17 0x40 - -#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 -#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 -#define PPSMC_DPM2FLAGS_OCP 0x04 - -#define PPSMC_DISPLAY_WATERMARK_LOW 0 -#define PPSMC_DISPLAY_WATERMARK_HIGH 1 - -#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 -#define PPSMC_STATEFLAG_POWERBOOST 0x02 -#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 -#define PPSMC_STATEFLAG_POWERSHIFT 0x08 -#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 -#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 -#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 - -#define FDO_MODE_HARDWARE 0 -#define FDO_MODE_PIECE_WISE_LINEAR 1 - -enum FAN_CONTROL { - FAN_CONTROL_FUZZY, - FAN_CONTROL_TABLE -}; - -#define PPSMC_Result_OK ((uint16_t)0x01) -#define PPSMC_Result_NoMore ((uint16_t)0x02) -#define PPSMC_Result_NotNow ((uint16_t)0x03) -#define PPSMC_Result_Failed ((uint16_t)0xFF) -#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) -#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) - -typedef uint16_t PPSMC_Result; - -#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) - -#define PPSMC_MSG_Halt ((uint16_t)0x10) -#define PPSMC_MSG_Resume ((uint16_t)0x11) -#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) -#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) -#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) -#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) -#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) -#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) -#define PPSMC_MSG_LevelUp ((uint16_t)0x18) -#define PPSMC_MSG_LevelDown ((uint16_t)0x19) -#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) -#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) -#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) -#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) -#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) -#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) -#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) -#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) -#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) -#define PPSMC_MSG_EnableCac ((uint16_t)0x53) -#define PPSMC_MSG_DisableCac ((uint16_t)0x54) -#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) -#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) -#define PPSMC_CACHistoryStart ((uint16_t)0x57) -#define PPSMC_CACHistoryStop ((uint16_t)0x58) -#define PPSMC_TDPClampingActive ((uint16_t)0x59) -#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) -#define PPSMC_StartFanControl ((uint16_t)0x5B) -#define PPSMC_StopFanControl ((uint16_t)0x5C) -#define PPSMC_NoDisplay ((uint16_t)0x5D) -#define PPSMC_HasDisplay ((uint16_t)0x5E) -#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) -#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) -#define PPSMC_MSG_EnableULV ((uint16_t)0x62) -#define PPSMC_MSG_DisableULV ((uint16_t)0x63) -#define PPSMC_MSG_EnterULV ((uint16_t)0x64) -#define PPSMC_MSG_ExitULV ((uint16_t)0x65) -#define PPSMC_PowerShiftActive ((uint16_t)0x6A) -#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) -#define PPSMC_OCPActive ((uint16_t)0x6C) -#define PPSMC_OCPInactive ((uint16_t)0x6D) -#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) -#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) -#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) -#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) -#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) -#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) -#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) -#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) -#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) -#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) -#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) -#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) -#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) -#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) -#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) -#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) -#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) -#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) -#define PPSMC_FlushDataCache ((uint16_t)0x80) -#define PPSMC_FlushInstrCache ((uint16_t)0x81) -#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) -#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) -#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) -#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) -#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) -#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) -#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) -#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) -#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A) -#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B) -#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C) -#define PPSMC_MSG_ChangeNearTDPLimit ((uint16_t)0x90) -#define PPSMC_MSG_ChangeSafePowerLimit ((uint16_t)0x91) -#define PPSMC_MSG_DPMStateSweepStart ((uint16_t)0x92) -#define PPSMC_MSG_DPMStateSweepStop ((uint16_t)0x93) -#define PPSMC_MSG_OVRDDisableSCLKDS ((uint16_t)0x94) -#define PPSMC_MSG_CancelDisableOVRDSCLKDS ((uint16_t)0x95) -#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint16_t)0x96) -#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint16_t)0x97) -#define PPSMC_MSG_GPIO17 ((uint16_t)0x98) -#define PPSMC_MSG_API_SetSvi2Volt_Vddc ((uint16_t)0x99) -#define PPSMC_MSG_API_SetSvi2Volt_Vddci ((uint16_t)0x9A) -#define PPSMC_MSG_API_SetSvi2Volt_Mvdd ((uint16_t)0x9B) -#define PPSMC_MSG_API_GetSvi2Volt_Vddc ((uint16_t)0x9C) -#define PPSMC_MSG_API_GetSvi2Volt_Vddci ((uint16_t)0x9D) -#define PPSMC_MSG_API_GetSvi2Volt_Mvdd ((uint16_t)0x9E) - -#define PPSMC_MSG_BREAK ((uint16_t)0xF8) - -#define PPSMC_MSG_Test ((uint16_t)0x100) -#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250) -#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251) -#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252) -#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253) -#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254) - -typedef uint16_t PPSMC_Msg; - -#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 -#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 -#define PPSMC_EVENT_STATUS_DC 0x00000004 -#define PPSMC_EVENT_STATUS_GPIO17 0x00000008 - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 5421309c1862..361c49a82323 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -25,7 +25,7 @@ #include "drmP.h" #include "amdgpu.h" #include "tonga_ppsmc.h" -#include "tonga_smumgr.h" +#include "tonga_smum.h" #include "smu_ucode_xfer_vi.h" #include "amdgpu_ucode.h" diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h b/drivers/gpu/drm/amd/amdgpu/tonga_smum.h similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h rename to drivers/gpu/drm/amd/amdgpu/tonga_smum.h diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 121915bbc3b6..3d5913926436 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -279,6 +279,234 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CACHE_SIZE2, size); } +static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, + bool enable) +{ + u32 data, data1; + + data = RREG32(mmUVD_CGC_GATE); + data1 = RREG32(mmUVD_SUVD_CGC_GATE); + if (enable) { + data |= UVD_CGC_GATE__SYS_MASK | + UVD_CGC_GATE__UDEC_MASK | + UVD_CGC_GATE__MPEG2_MASK | + UVD_CGC_GATE__RBC_MASK | + UVD_CGC_GATE__LMI_MC_MASK | + UVD_CGC_GATE__IDCT_MASK | + UVD_CGC_GATE__MPRD_MASK | + UVD_CGC_GATE__MPC_MASK | + UVD_CGC_GATE__LBSI_MASK | + UVD_CGC_GATE__LRBBM_MASK | + UVD_CGC_GATE__UDEC_RE_MASK | + UVD_CGC_GATE__UDEC_CM_MASK | + UVD_CGC_GATE__UDEC_IT_MASK | + UVD_CGC_GATE__UDEC_DB_MASK | + UVD_CGC_GATE__UDEC_MP_MASK | + UVD_CGC_GATE__WCB_MASK | + UVD_CGC_GATE__VCPU_MASK | + UVD_CGC_GATE__SCPU_MASK; + data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | + UVD_SUVD_CGC_GATE__SIT_MASK | + UVD_SUVD_CGC_GATE__SMP_MASK | + UVD_SUVD_CGC_GATE__SCM_MASK | + UVD_SUVD_CGC_GATE__SDB_MASK | + UVD_SUVD_CGC_GATE__SRE_H264_MASK | + UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | + UVD_SUVD_CGC_GATE__SIT_H264_MASK | + UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | + UVD_SUVD_CGC_GATE__SCM_H264_MASK | + UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | + UVD_SUVD_CGC_GATE__SDB_H264_MASK | + UVD_SUVD_CGC_GATE__SDB_HEVC_MASK; + } else { + data &= ~(UVD_CGC_GATE__SYS_MASK | + UVD_CGC_GATE__UDEC_MASK | + UVD_CGC_GATE__MPEG2_MASK | + UVD_CGC_GATE__RBC_MASK | + UVD_CGC_GATE__LMI_MC_MASK | + UVD_CGC_GATE__LMI_UMC_MASK | + UVD_CGC_GATE__IDCT_MASK | + UVD_CGC_GATE__MPRD_MASK | + UVD_CGC_GATE__MPC_MASK | + UVD_CGC_GATE__LBSI_MASK | + UVD_CGC_GATE__LRBBM_MASK | + UVD_CGC_GATE__UDEC_RE_MASK | + UVD_CGC_GATE__UDEC_CM_MASK | + UVD_CGC_GATE__UDEC_IT_MASK | + UVD_CGC_GATE__UDEC_DB_MASK | + UVD_CGC_GATE__UDEC_MP_MASK | + UVD_CGC_GATE__WCB_MASK | + UVD_CGC_GATE__VCPU_MASK | + UVD_CGC_GATE__SCPU_MASK); + data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK | + UVD_SUVD_CGC_GATE__SIT_MASK | + UVD_SUVD_CGC_GATE__SMP_MASK | + UVD_SUVD_CGC_GATE__SCM_MASK | + UVD_SUVD_CGC_GATE__SDB_MASK | + UVD_SUVD_CGC_GATE__SRE_H264_MASK | + UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | + UVD_SUVD_CGC_GATE__SIT_H264_MASK | + UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | + UVD_SUVD_CGC_GATE__SCM_H264_MASK | + UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | + UVD_SUVD_CGC_GATE__SDB_H264_MASK | + UVD_SUVD_CGC_GATE__SDB_HEVC_MASK); + } + WREG32(mmUVD_CGC_GATE, data); + WREG32(mmUVD_SUVD_CGC_GATE, data1); +} + +static void tonga_set_uvd_clock_gating_branches(struct amdgpu_device *adev, + bool enable) +{ + u32 data, data1; + + data = RREG32(mmUVD_CGC_GATE); + data1 = RREG32(mmUVD_SUVD_CGC_GATE); + if (enable) { + data |= UVD_CGC_GATE__SYS_MASK | + UVD_CGC_GATE__UDEC_MASK | + UVD_CGC_GATE__MPEG2_MASK | + UVD_CGC_GATE__RBC_MASK | + UVD_CGC_GATE__LMI_MC_MASK | + UVD_CGC_GATE__IDCT_MASK | + UVD_CGC_GATE__MPRD_MASK | + UVD_CGC_GATE__MPC_MASK | + UVD_CGC_GATE__LBSI_MASK | + UVD_CGC_GATE__LRBBM_MASK | + UVD_CGC_GATE__UDEC_RE_MASK | + UVD_CGC_GATE__UDEC_CM_MASK | + UVD_CGC_GATE__UDEC_IT_MASK | + UVD_CGC_GATE__UDEC_DB_MASK | + UVD_CGC_GATE__UDEC_MP_MASK | + UVD_CGC_GATE__WCB_MASK | + UVD_CGC_GATE__VCPU_MASK | + UVD_CGC_GATE__SCPU_MASK; + data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | + UVD_SUVD_CGC_GATE__SIT_MASK | + UVD_SUVD_CGC_GATE__SMP_MASK | + UVD_SUVD_CGC_GATE__SCM_MASK | + UVD_SUVD_CGC_GATE__SDB_MASK; + } else { + data &= ~(UVD_CGC_GATE__SYS_MASK | + UVD_CGC_GATE__UDEC_MASK | + UVD_CGC_GATE__MPEG2_MASK | + UVD_CGC_GATE__RBC_MASK | + UVD_CGC_GATE__LMI_MC_MASK | + UVD_CGC_GATE__LMI_UMC_MASK | + UVD_CGC_GATE__IDCT_MASK | + UVD_CGC_GATE__MPRD_MASK | + UVD_CGC_GATE__MPC_MASK | + UVD_CGC_GATE__LBSI_MASK | + UVD_CGC_GATE__LRBBM_MASK | + UVD_CGC_GATE__UDEC_RE_MASK | + UVD_CGC_GATE__UDEC_CM_MASK | + UVD_CGC_GATE__UDEC_IT_MASK | + UVD_CGC_GATE__UDEC_DB_MASK | + UVD_CGC_GATE__UDEC_MP_MASK | + UVD_CGC_GATE__WCB_MASK | + UVD_CGC_GATE__VCPU_MASK | + UVD_CGC_GATE__SCPU_MASK); + data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK | + UVD_SUVD_CGC_GATE__SIT_MASK | + UVD_SUVD_CGC_GATE__SMP_MASK | + UVD_SUVD_CGC_GATE__SCM_MASK | + UVD_SUVD_CGC_GATE__SDB_MASK); + } + WREG32(mmUVD_CGC_GATE, data); + WREG32(mmUVD_SUVD_CGC_GATE, data1); +} + +static void uvd_v6_0_set_uvd_dynamic_clock_mode(struct amdgpu_device *adev, + bool swmode) +{ + u32 data, data1 = 0, data2; + + /* Always un-gate UVD REGS bit */ + data = RREG32(mmUVD_CGC_GATE); + data &= ~(UVD_CGC_GATE__REGS_MASK); + WREG32(mmUVD_CGC_GATE, data); + + data = RREG32(mmUVD_CGC_CTRL); + data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | + UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); + data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | + 1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER) | + 4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY); + + data2 = RREG32(mmUVD_SUVD_CGC_CTRL); + if (swmode) { + data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | + UVD_CGC_CTRL__UDEC_CM_MODE_MASK | + UVD_CGC_CTRL__UDEC_IT_MODE_MASK | + UVD_CGC_CTRL__UDEC_DB_MODE_MASK | + UVD_CGC_CTRL__UDEC_MP_MODE_MASK | + UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__UDEC_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK | + UVD_CGC_CTRL__JPEG_MODE_MASK | + UVD_CGC_CTRL__SCPU_MODE_MASK); + data1 |= UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | + UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK; + data1 &= ~UVD_CGC_CTRL2__GATER_DIV_ID_MASK; + data1 |= 7 << REG_FIELD_SHIFT(UVD_CGC_CTRL2, GATER_DIV_ID); + data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | + UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | + UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | + UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | + UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); + } else { + data |= UVD_CGC_CTRL__UDEC_RE_MODE_MASK | + UVD_CGC_CTRL__UDEC_CM_MODE_MASK | + UVD_CGC_CTRL__UDEC_IT_MODE_MASK | + UVD_CGC_CTRL__UDEC_DB_MODE_MASK | + UVD_CGC_CTRL__UDEC_MP_MODE_MASK | + UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__UDEC_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK | + UVD_CGC_CTRL__SCPU_MODE_MASK; + data2 |= UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | + UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | + UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | + UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | + UVD_SUVD_CGC_CTRL__SDB_MODE_MASK; + } + WREG32(mmUVD_CGC_CTRL, data); + WREG32(mmUVD_SUVD_CGC_CTRL, data2); + + data = RREG32_UVD_CTX(ixUVD_CGC_CTRL2); + data &= ~(REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) | + REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) | + REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID)); + data1 &= (REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) | + REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) | + REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID)); + data |= data1; + WREG32_UVD_CTX(ixUVD_CGC_CTRL2, data); +} + /** * uvd_v6_0_start - start UVD block * @@ -303,8 +531,19 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uvd_v6_0_mc_resume(adev); - /* disable clock gating */ - WREG32(mmUVD_CGC_GATE, 0); + /* Set dynamic clock gating in S/W control mode */ + if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) { + if (adev->flags & AMD_IS_APU) + cz_set_uvd_clock_gating_branches(adev, false); + else + tonga_set_uvd_clock_gating_branches(adev, false); + uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true); + } else { + /* disable clock gating */ + uint32_t data = RREG32(mmUVD_CGC_CTRL); + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + WREG32(mmUVD_CGC_CTRL, data); + } /* disable interupt */ WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); @@ -758,6 +997,24 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) + return 0; + + if (enable) { + if (adev->flags & AMD_IS_APU) + cz_set_uvd_clock_gating_branches(adev, enable); + else + tonga_set_uvd_clock_gating_branches(adev, enable); + uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true); + } else { + uint32_t data = RREG32(mmUVD_CGC_CTRL); + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + WREG32(mmUVD_CGC_CTRL, data); + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 370c6c9d81c2..e99af81e4aec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -103,6 +103,108 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmVCE_RB_WPTR2, ring->wptr); } +static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) +{ + u32 tmp, data; + + tmp = data = RREG32(mmVCE_RB_ARB_CTRL); + if (override) + data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; + else + data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; + + if (tmp != data) + WREG32(mmVCE_RB_ARB_CTRL, data); +} + +static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, + bool gated) +{ + u32 tmp, data; + /* Set Override to disable Clock Gating */ + vce_v3_0_override_vce_clock_gating(adev, true); + + if (!gated) { + /* Force CLOCK ON for VCE_CLOCK_GATING_B, + * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} + * VREG can be FORCE ON or set to Dynamic, but can't be OFF + */ + tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + data |= 0x1ff; + data &= ~0xef0000; + if (tmp != data) + WREG32(mmVCE_CLOCK_GATING_B, data); + + /* Force CLOCK ON for VCE_UENC_CLOCK_GATING, + * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} + */ + tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + data |= 0x3ff000; + data &= ~0xffc00000; + if (tmp != data) + WREG32(mmVCE_UENC_CLOCK_GATING, data); + + /* set VCE_UENC_CLOCK_GATING_2 */ + tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + data |= 0x2; + data &= ~0x2; + if (tmp != data) + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + + /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */ + tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + data |= 0x37f; + if (tmp != data) + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + + /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */ + tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8; + if (tmp != data) + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + } else { + /* Force CLOCK OFF for VCE_CLOCK_GATING_B, + * {*, *_FORCE_OFF} = {*, 1} + * set VREG to Dynamic, as it can't be OFF + */ + tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + data &= ~0x80010; + data |= 0xe70008; + if (tmp != data) + WREG32(mmVCE_CLOCK_GATING_B, data); + /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING, + * Force ClOCK OFF takes precedent over Force CLOCK ON setting. + * {*_FORCE_ON, *_FORCE_OFF} = {*, 1} + */ + tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + data |= 0xffc00000; + if (tmp != data) + WREG32(mmVCE_UENC_CLOCK_GATING, data); + /* Set VCE_UENC_CLOCK_GATING_2 */ + tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + data |= 0x10000; + if (tmp != data) + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */ + tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + data &= ~0xffc00000; + if (tmp != data) + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */ + tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8); + if (tmp != data) + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + } + vce_v3_0_override_vce_clock_gating(adev, false); +} + /** * vce_v3_0_start - start VCE block * @@ -121,7 +223,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - if(idx == 0) + if (idx == 0) WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); else @@ -174,6 +276,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev) /* clear BUSY flag */ WREG32_P(mmVCE_STATUS, 0, ~1); + /* Set Clock-Gating off */ + if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG) + vce_v3_0_set_vce_sw_clock_gating(adev, false); + if (r) { DRM_ERROR("VCE not responding, giving up!!!\n"); mutex_unlock(&adev->grbm_idx_mutex); @@ -208,14 +314,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev) static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) { u32 tmp; - unsigned ret; /* Fiji, Stoney are single pipe */ if ((adev->asic_type == CHIP_FIJI) || - (adev->asic_type == CHIP_STONEY)){ - ret = AMDGPU_VCE_HARVEST_VCE1; - return ret; - } + (adev->asic_type == CHIP_STONEY)) + return AMDGPU_VCE_HARVEST_VCE1; /* Tonga and CZ are dual or single pipe */ if (adev->flags & AMD_IS_APU) @@ -229,19 +332,14 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) switch (tmp) { case 1: - ret = AMDGPU_VCE_HARVEST_VCE0; - break; + return AMDGPU_VCE_HARVEST_VCE0; case 2: - ret = AMDGPU_VCE_HARVEST_VCE1; - break; + return AMDGPU_VCE_HARVEST_VCE1; case 3: - ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; - break; + return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; default: - ret = 0; + return 0; } - - return ret; } static int vce_v3_0_early_init(void *handle) @@ -316,28 +414,22 @@ static int vce_v3_0_sw_fini(void *handle) static int vce_v3_0_hw_init(void *handle) { - struct amdgpu_ring *ring; - int r; + int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = vce_v3_0_start(adev); if (r) return r; - ring = &adev->vce.ring[0]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - return r; - } + adev->vce.ring[0].ready = false; + adev->vce.ring[1].ready = false; - ring = &adev->vce.ring[1]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - return r; + for (i = 0; i < 2; i++) { + r = amdgpu_ring_test_ring(&adev->vce.ring[i]); + if (r) + return r; + else + adev->vce.ring[i].ready = true; } DRM_INFO("VCE initialized successfully.\n"); @@ -437,17 +529,9 @@ static bool vce_v3_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 mask = 0; - int idx; - for (idx = 0; idx < 2; ++idx) { - if (adev->vce.harvest_config & (1 << idx)) - continue; - - if (idx == 0) - mask |= SRBM_STATUS2__VCE0_BUSY_MASK; - else - mask |= SRBM_STATUS2__VCE1_BUSY_MASK; - } + mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; + mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; return !(RREG32(mmSRBM_STATUS2) & mask); } @@ -456,23 +540,11 @@ static int vce_v3_0_wait_for_idle(void *handle) { unsigned i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 mask = 0; - int idx; - for (idx = 0; idx < 2; ++idx) { - if (adev->vce.harvest_config & (1 << idx)) - continue; - - if (idx == 0) - mask |= SRBM_STATUS2__VCE0_BUSY_MASK; - else - mask |= SRBM_STATUS2__VCE1_BUSY_MASK; - } - - for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS2) & mask)) + for (i = 0; i < adev->usec_timeout; i++) + if (vce_v3_0_is_idle(handle)) return 0; - } + return -ETIMEDOUT; } @@ -480,17 +552,10 @@ static int vce_v3_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 mask = 0; - int idx; - for (idx = 0; idx < 2; ++idx) { - if (adev->vce.harvest_config & (1 << idx)) - continue; + mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK; + mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK; - if (idx == 0) - mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK; - else - mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK; - } WREG32_P(mmSRBM_SOFT_RESET, mask, ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK | SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK)); @@ -592,10 +657,8 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_data) { case 0: - amdgpu_fence_process(&adev->vce.ring[0]); - break; case 1: - amdgpu_fence_process(&adev->vce.ring[1]); + amdgpu_fence_process(&adev->vce.ring[entry->src_data]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -609,6 +672,47 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + int i; + + if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) + return 0; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < 2; i++) { + /* Program VCE Instance 0 or 1 if not harvested */ + if (adev->vce.harvest_config & (1 << i)) + continue; + + if (i == 0) + WREG32_P(mmGRBM_GFX_INDEX, 0, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + else + WREG32_P(mmGRBM_GFX_INDEX, + GRBM_GFX_INDEX__VCE_INSTANCE_MASK, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + + if (enable) { + /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ + uint32_t data = RREG32(mmVCE_CLOCK_GATING_A); + data &= ~(0xf | 0xff0); + data |= ((0x0 << 0) | (0x04 << 4)); + WREG32(mmVCE_CLOCK_GATING_A, data); + + /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ + data = RREG32(mmVCE_UENC_CLOCK_GATING); + data &= ~(0xf | 0xff0); + data |= ((0x0 << 0) | (0x04 << 4)); + WREG32(mmVCE_UENC_CLOCK_GATING, data); + } + + vce_v3_0_set_vce_sw_clock_gating(adev, enable); + } + + WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + mutex_unlock(&adev->grbm_idx_mutex); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 2adc1c855e85..652e76644c31 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -31,6 +31,7 @@ #include "amdgpu_vce.h" #include "amdgpu_ucode.h" #include "atom.h" +#include "amd_pcie.h" #include "gmc/gmc_8_1_d.h" #include "gmc/gmc_8_1_sh_mask.h" @@ -71,6 +72,7 @@ #include "uvd_v5_0.h" #include "uvd_v6_0.h" #include "vce_v3_0.h" +#include "amdgpu_powerplay.h" /* * Indirect registers accessor @@ -376,6 +378,38 @@ static bool vi_read_disabled_bios(struct amdgpu_device *adev) WREG32_SMC(ixROM_CNTL, rom_cntl); return r; } + +static bool vi_read_bios_from_rom(struct amdgpu_device *adev, + u8 *bios, u32 length_bytes) +{ + u32 *dw_ptr; + unsigned long flags; + u32 i, length_dw; + + if (bios == NULL) + return false; + if (length_bytes == 0) + return false; + /* APU vbios image is part of sbios image */ + if (adev->flags & AMD_IS_APU) + return false; + + dw_ptr = (u32 *)bios; + length_dw = ALIGN(length_bytes, 4) / 4; + /* take the smc lock since we are using the smc index */ + spin_lock_irqsave(&adev->smc_idx_lock, flags); + /* set rom index to 0 */ + WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); + WREG32(mmSMC_IND_DATA_0, 0); + /* set index to data for continous read */ + WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); + for (i = 0; i < length_dw; i++) + dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return true; +} + static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { {mmGB_MACROTILE_MODE7, true}, }; @@ -1019,9 +1053,6 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) static void vi_pcie_gen3_enable(struct amdgpu_device *adev) { - u32 mask; - int ret; - if (pci_is_root_bus(adev->pdev->bus)) return; @@ -1031,11 +1062,8 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret != 0) - return; - - if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) return; /* todo */ @@ -1098,7 +1126,7 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = .major = 7, .minor = 1, .rev = 0, - .funcs = &iceland_dpm_ip_funcs, + .funcs = &amdgpu_pp_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_GFX, @@ -1145,7 +1173,7 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = .major = 7, .minor = 1, .rev = 0, - .funcs = &tonga_dpm_ip_funcs, + .funcs = &amdgpu_pp_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_DCE, @@ -1213,7 +1241,7 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] = .major = 7, .minor = 1, .rev = 0, - .funcs = &fiji_dpm_ip_funcs, + .funcs = &amdgpu_pp_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_DCE, @@ -1281,7 +1309,7 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] = .major = 8, .minor = 0, .rev = 0, - .funcs = &cz_dpm_ip_funcs, + .funcs = &amdgpu_pp_ip_funcs }, { .type = AMD_IP_BLOCK_TYPE_DCE, @@ -1354,20 +1382,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) static uint32_t vi_get_rev_id(struct amdgpu_device *adev) { - if (adev->asic_type == CHIP_TOPAZ) - return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) - >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; - else if (adev->flags & AMD_IS_APU) + if (adev->flags & AMD_IS_APU) return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) >> ATI_REV_ID_FUSE_MACRO__SHIFT; else - return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) - >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; + return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) + >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; } static const struct amdgpu_asic_funcs vi_asic_funcs = { .read_disabled_bios = &vi_read_disabled_bios, + .read_bios_from_rom = &vi_read_bios_from_rom, .read_register = &vi_read_register, .reset = &vi_asic_reset, .set_vga_state = &vi_vga_set_state, @@ -1416,7 +1442,8 @@ static int vi_common_early_init(void *handle) break; case CHIP_FIJI: adev->has_uvd = true; - adev->cg_flags = 0; + adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG | + AMDGPU_CG_SUPPORT_VCE_MGCG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x3c; break; @@ -1442,6 +1469,8 @@ static int vi_common_early_init(void *handle) if (amdgpu_smc_load_fw && smc_enabled) adev->firmware.smu_load = true; + amdgpu_get_pcie_info(adev); + return 0; } @@ -1515,9 +1544,95 @@ static int vi_common_soft_reset(void *handle) return 0; } +static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t temp, data; + + temp = data = RREG32_PCIE(ixPCIE_CNTL2); + + if (enable) + data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | + PCIE_CNTL2__MST_MEM_LS_EN_MASK | + PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; + else + data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | + PCIE_CNTL2__MST_MEM_LS_EN_MASK | + PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); + + if (temp != data) + WREG32_PCIE(ixPCIE_CNTL2, data); +} + +static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t temp, data; + + temp = data = RREG32(mmHDP_HOST_PATH_CNTL); + + if (enable) + data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; + else + data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; + + if (temp != data) + WREG32(mmHDP_HOST_PATH_CNTL, data); +} + +static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t temp, data; + + temp = data = RREG32(mmHDP_MEM_POWER_LS); + + if (enable) + data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; + else + data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; + + if (temp != data) + WREG32(mmHDP_MEM_POWER_LS, data); +} + +static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t temp, data; + + temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); + + if (enable) + data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); + else + data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; + + if (temp != data) + WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); +} + static int vi_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (adev->asic_type) { + case CHIP_FIJI: + fiji_update_bif_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE ? true : false); + fiji_update_hdp_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE ? true : false); + fiji_update_hdp_light_sleep(adev, + state == AMD_CG_STATE_GATE ? true : false); + fiji_update_rom_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE ? true : false); + break; + default: + break; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h similarity index 91% rename from drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h rename to drivers/gpu/drm/amd/include/amd_acpi.h index 01a29c3d7011..496360eb3fba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h +++ b/drivers/gpu/drm/amd/include/amd_acpi.h @@ -21,14 +21,63 @@ * */ -#ifndef AMDGPU_ACPI_H -#define AMDGPU_ACPI_H +#ifndef AMD_ACPI_H +#define AMD_ACPI_H -struct amdgpu_device; -struct acpi_bus_event; +#define ACPI_AC_CLASS "ac_adapter" -int amdgpu_atif_handler(struct amdgpu_device *adev, - struct acpi_bus_event *event); +struct atif_verify_interface { + u16 size; /* structure size in bytes (includes size field) */ + u16 version; /* version */ + u32 notification_mask; /* supported notifications mask */ + u32 function_bits; /* supported functions bit vector */ +} __packed; + +struct atif_system_params { + u16 size; /* structure size in bytes (includes size field) */ + u32 valid_mask; /* valid flags mask */ + u32 flags; /* flags */ + u8 command_code; /* notify command code */ +} __packed; + +struct atif_sbios_requests { + u16 size; /* structure size in bytes (includes size field) */ + u32 pending; /* pending sbios requests */ + u8 panel_exp_mode; /* panel expansion mode */ + u8 thermal_gfx; /* thermal state: target gfx controller */ + u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */ + u8 forced_power_gfx; /* forced power state: target gfx controller */ + u8 forced_power_state; /* forced power state: state id */ + u8 system_power_src; /* system power source */ + u8 backlight_level; /* panel backlight level (0-255) */ +} __packed; + +#define ATIF_NOTIFY_MASK 0x3 +#define ATIF_NOTIFY_NONE 0 +#define ATIF_NOTIFY_81 1 +#define ATIF_NOTIFY_N 2 + +struct atcs_verify_interface { + u16 size; /* structure size in bytes (includes size field) */ + u16 version; /* version */ + u32 function_bits; /* supported functions bit vector */ +} __packed; + +#define ATCS_VALID_FLAGS_MASK 0x3 + +struct atcs_pref_req_input { + u16 size; /* structure size in bytes (includes size field) */ + u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ + u16 valid_flags_mask; /* valid flags mask */ + u16 flags; /* flags */ + u8 req_type; /* request type */ + u8 perf_req; /* performance request */ +} __packed; + +struct atcs_pref_req_output { + u16 size; /* structure size in bytes (includes size field) */ + u8 ret_val; /* return value */ +} __packed; /* AMD hw uses four ACPI control methods: * 1. ATIF diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h new file mode 100644 index 000000000000..7c2a916c1e63 --- /dev/null +++ b/drivers/gpu/drm/amd/include/amd_pcie.h @@ -0,0 +1,50 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMD_PCIE_H__ +#define __AMD_PCIE_H__ + +/* Following flags shows PCIe link speed supported in driver which are decided by chipset and ASIC */ +#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00010000 +#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000 +#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000 +#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000 +#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16 + +/* Following flags shows PCIe link speed supported by ASIC H/W.*/ +#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00000001 +#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002 +#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004 +#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF +#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0 + +/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */ +#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000 +#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000 +#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 0x00040000 +#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 0x00080000 +#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 0x00100000 +#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 0x00200000 +#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000 +#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16 + +#endif diff --git a/drivers/gpu/drm/amd/include/amd_pcie_helpers.h b/drivers/gpu/drm/amd/include/amd_pcie_helpers.h new file mode 100644 index 000000000000..5725bf85eacc --- /dev/null +++ b/drivers/gpu/drm/amd/include/amd_pcie_helpers.h @@ -0,0 +1,141 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMD_PCIE_HELPERS_H__ +#define __AMD_PCIE_HELPERS_H__ + +#include "amd_pcie.h" + +static inline bool is_pcie_gen3_supported(uint32_t pcie_link_speed_cap) +{ + if (pcie_link_speed_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + return true; + + return false; +} + +static inline bool is_pcie_gen2_supported(uint32_t pcie_link_speed_cap) +{ + if (pcie_link_speed_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) + return true; + + return false; +} + +/* Get the new PCIE speed given the ASIC PCIE Cap and the NewState's requested PCIE speed*/ +static inline uint16_t get_pcie_gen_support(uint32_t pcie_link_speed_cap, + uint16_t ns_pcie_gen) +{ + uint32_t asic_pcie_link_speed_cap = (pcie_link_speed_cap & + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK); + uint32_t sys_pcie_link_speed_cap = (pcie_link_speed_cap & + CAIL_PCIE_LINK_SPEED_SUPPORT_MASK); + + switch (asic_pcie_link_speed_cap) { + case CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1: + return PP_PCIEGen1; + + case CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2: + return PP_PCIEGen2; + + case CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3: + return PP_PCIEGen3; + + default: + if (is_pcie_gen3_supported(sys_pcie_link_speed_cap) && + (ns_pcie_gen == PP_PCIEGen3)) { + return PP_PCIEGen3; + } else if (is_pcie_gen2_supported(sys_pcie_link_speed_cap) && + ((ns_pcie_gen == PP_PCIEGen3) || (ns_pcie_gen == PP_PCIEGen2))) { + return PP_PCIEGen2; + } + } + + return PP_PCIEGen1; +} + +static inline uint16_t get_pcie_lane_support(uint32_t pcie_lane_width_cap, + uint16_t ns_pcie_lanes) +{ + int i, j; + uint16_t new_pcie_lanes = ns_pcie_lanes; + uint16_t pcie_lanes[7] = {1, 2, 4, 8, 12, 16, 32}; + + switch (pcie_lane_width_cap) { + case 0: + printk(KERN_ERR "No valid PCIE lane width reported"); + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1: + new_pcie_lanes = 1; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2: + new_pcie_lanes = 2; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4: + new_pcie_lanes = 4; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8: + new_pcie_lanes = 8; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12: + new_pcie_lanes = 12; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16: + new_pcie_lanes = 16; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32: + new_pcie_lanes = 32; + break; + default: + for (i = 0; i < 7; i++) { + if (ns_pcie_lanes == pcie_lanes[i]) { + if (pcie_lane_width_cap & (0x10000 << i)) { + break; + } else { + for (j = i - 1; j >= 0; j--) { + if (pcie_lane_width_cap & (0x10000 << j)) { + new_pcie_lanes = pcie_lanes[j]; + break; + } + } + + if (j < 0) { + for (j = i + 1; j < 7; j++) { + if (pcie_lane_width_cap & (0x10000 << j)) { + new_pcie_lanes = pcie_lanes[j]; + break; + } + } + if (j > 7) + printk(KERN_ERR "Cannot find a valid PCIE lane width!"); + } + } + break; + } + } + break; + } + + return new_pcie_lanes; +} + +#endif diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index fe28fb353fab..1195d06f55bc 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -85,6 +85,27 @@ enum amd_powergating_state { AMD_PG_STATE_UNGATE, }; +enum amd_pm_state_type { + /* not used for dpm */ + POWER_STATE_TYPE_DEFAULT, + POWER_STATE_TYPE_POWERSAVE, + /* user selectable states */ + POWER_STATE_TYPE_BATTERY, + POWER_STATE_TYPE_BALANCED, + POWER_STATE_TYPE_PERFORMANCE, + /* internal states */ + POWER_STATE_TYPE_INTERNAL_UVD, + POWER_STATE_TYPE_INTERNAL_UVD_SD, + POWER_STATE_TYPE_INTERNAL_UVD_HD, + POWER_STATE_TYPE_INTERNAL_UVD_HD2, + POWER_STATE_TYPE_INTERNAL_UVD_MVC, + POWER_STATE_TYPE_INTERNAL_BOOT, + POWER_STATE_TYPE_INTERNAL_THERMAL, + POWER_STATE_TYPE_INTERNAL_ACPI, + POWER_STATE_TYPE_INTERNAL_ULV, + POWER_STATE_TYPE_INTERNAL_3DPERF, +}; + struct amd_ip_funcs { /* sets up early driver state (pre sw_init), does not configure hw - Optional */ int (*early_init)(void *handle); diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h index 92b6ba0047af..293329719bba 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h @@ -596,6 +596,7 @@ #define mmSWRST_EP_CONTROL_0 0x14ac #define mmCPM_CONTROL 0x14b8 #define mmGSKT_CONTROL 0x14bf +#define ixSWRST_COMMAND_1 0x1400103 #define ixLM_CONTROL 0x1400120 #define ixLM_PCIETXMUX0 0x1400121 #define ixLM_PCIETXMUX1 0x1400122 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h index daf763ba1a8f..a9b6923192ee 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h @@ -2807,5 +2807,18 @@ #define ixDIDT_DBR_WEIGHT0_3 0x90 #define ixDIDT_DBR_WEIGHT4_7 0x91 #define ixDIDT_DBR_WEIGHT8_11 0x92 +#define mmTD_EDC_CNT 0x252e +#define mmCPF_EDC_TAG_CNT 0x3188 +#define mmCPF_EDC_ROQ_CNT 0x3189 +#define mmCPF_EDC_ATC_CNT 0x318a +#define mmCPG_EDC_TAG_CNT 0x318b +#define mmCPG_EDC_ATC_CNT 0x318c +#define mmCPG_EDC_DMA_CNT 0x318d +#define mmCPC_EDC_SCRATCH_CNT 0x318e +#define mmCPC_EDC_UCODE_CNT 0x318f +#define mmCPC_EDC_ATC_CNT 0x3190 +#define mmDC_EDC_STATE_CNT 0x3191 +#define mmDC_EDC_CSINVOC_CNT 0x3192 +#define mmDC_EDC_RESTORE_CNT 0x3193 #endif /* GFX_8_0_D_H */ diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index 552622675ace..eaf451e26643 100644 --- a/drivers/gpu/drm/amd/include/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h @@ -550,6 +550,13 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 //MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL #define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04 +// use for ComputeMemoryClockParamTable +typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 +{ + COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; + ULONG ulReserved; +}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2; + typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER { ATOM_COMPUTE_CLOCK_FREQ ulClock; @@ -4988,6 +4995,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_3 ULONG ulSDCMargine; }ATOM_ASIC_PROFILING_INFO_V3_3; +// for Fiji speed EVV algorithm +typedef struct _ATOM_ASIC_PROFILING_INFO_V3_4 +{ + ATOM_COMMON_TABLE_HEADER asHeader; + ULONG ulEvvLkgFactor; + ULONG ulBoardCoreTemp; + ULONG ulMaxVddc; + ULONG ulMinVddc; + ULONG ulLoadLineSlop; + ULONG ulLeakageTemp; + ULONG ulLeakageVoltage; + EFUSE_LINEAR_FUNC_PARAM sCACm; + EFUSE_LINEAR_FUNC_PARAM sCACb; + EFUSE_LOGISTIC_FUNC_PARAM sKt_b; + EFUSE_LOGISTIC_FUNC_PARAM sKv_m; + EFUSE_LOGISTIC_FUNC_PARAM sKv_b; + USHORT usLkgEuseIndex; + UCHAR ucLkgEfuseBitLSB; + UCHAR ucLkgEfuseLength; + ULONG ulLkgEncodeLn_MaxDivMin; + ULONG ulLkgEncodeMax; + ULONG ulLkgEncodeMin; + ULONG ulEfuseLogisticAlpha; + USHORT usPowerDpm0; + USHORT usPowerDpm1; + USHORT usPowerDpm2; + USHORT usPowerDpm3; + USHORT usPowerDpm4; + USHORT usPowerDpm5; + USHORT usPowerDpm6; + USHORT usPowerDpm7; + ULONG ulTdpDerateDPM0; + ULONG ulTdpDerateDPM1; + ULONG ulTdpDerateDPM2; + ULONG ulTdpDerateDPM3; + ULONG ulTdpDerateDPM4; + ULONG ulTdpDerateDPM5; + ULONG ulTdpDerateDPM6; + ULONG ulTdpDerateDPM7; + EFUSE_LINEAR_FUNC_PARAM sRoFuse; + ULONG ulEvvDefaultVddc; + ULONG ulEvvNoCalcVddc; + USHORT usParamNegFlag; + USHORT usSpeed_Model; + ULONG ulSM_A0; + ULONG ulSM_A1; + ULONG ulSM_A2; + ULONG ulSM_A3; + ULONG ulSM_A4; + ULONG ulSM_A5; + ULONG ulSM_A6; + ULONG ulSM_A7; + UCHAR ucSM_A0_sign; + UCHAR ucSM_A1_sign; + UCHAR ucSM_A2_sign; + UCHAR ucSM_A3_sign; + UCHAR ucSM_A4_sign; + UCHAR ucSM_A5_sign; + UCHAR ucSM_A6_sign; + UCHAR ucSM_A7_sign; + ULONG ulMargin_RO_a; + ULONG ulMargin_RO_b; + ULONG ulMargin_RO_c; + ULONG ulMargin_fixed; + ULONG ulMargin_Fmax_mean; + ULONG ulMargin_plat_mean; + ULONG ulMargin_Fmax_sigma; + ULONG ulMargin_plat_sigma; + ULONG ulMargin_DC_sigma; + ULONG ulReserved[8]; // Reserved for future ASIC +}ATOM_ASIC_PROFILING_INFO_V3_4; + typedef struct _ATOM_POWER_SOURCE_OBJECT { UCHAR ucPwrSrcId; // Power source diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 992dcd8a5c6a..713aec954692 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -105,6 +105,34 @@ enum cgs_ucode_id { CGS_UCODE_ID_MAXIMUM, }; +enum cgs_system_info_id { + CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, + CGS_SYSTEM_INFO_PCIE_GEN_INFO, + CGS_SYSTEM_INFO_PCIE_MLW, + CGS_SYSTEM_INFO_ID_MAXIMUM, +}; + +struct cgs_system_info { + uint64_t size; + uint64_t info_id; + union { + void *ptr; + uint64_t value; + }; + uint64_t padding[13]; +}; + +/* + * enum cgs_resource_type - GPU resource type + */ +enum cgs_resource_type { + CGS_RESOURCE_TYPE_MMIO = 0, + CGS_RESOURCE_TYPE_FB, + CGS_RESOURCE_TYPE_IO, + CGS_RESOURCE_TYPE_DOORBELL, + CGS_RESOURCE_TYPE_ROM, +}; + /** * struct cgs_clock_limits - Clock limits * @@ -127,8 +155,53 @@ struct cgs_firmware_info { void *kptr; }; +struct cgs_mode_info { + uint32_t refresh_rate; + uint32_t ref_clock; + uint32_t vblank_time_us; +}; + +struct cgs_display_info { + uint32_t display_count; + uint32_t active_display_mask; + struct cgs_mode_info *mode_info; +}; + typedef unsigned long cgs_handle_t; +#define CGS_ACPI_METHOD_ATCS 0x53435441 +#define CGS_ACPI_METHOD_ATIF 0x46495441 +#define CGS_ACPI_METHOD_ATPX 0x58505441 +#define CGS_ACPI_FIELD_METHOD_NAME 0x00000001 +#define CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT 0x00000002 +#define CGS_ACPI_MAX_BUFFER_SIZE 256 +#define CGS_ACPI_TYPE_ANY 0x00 +#define CGS_ACPI_TYPE_INTEGER 0x01 +#define CGS_ACPI_TYPE_STRING 0x02 +#define CGS_ACPI_TYPE_BUFFER 0x03 +#define CGS_ACPI_TYPE_PACKAGE 0x04 + +struct cgs_acpi_method_argument { + uint32_t type; + uint32_t method_length; + uint32_t data_length; + union{ + uint32_t value; + void *pointer; + }; +}; + +struct cgs_acpi_method_info { + uint32_t size; + uint32_t field; + uint32_t input_count; + uint32_t name; + struct cgs_acpi_method_argument *pinput_argument; + uint32_t output_count; + struct cgs_acpi_method_argument *poutput_argument; + uint32_t padding[9]; +}; + /** * cgs_gpu_mem_info() - Return information about memory heaps * @cgs_device: opaque device handle @@ -355,6 +428,23 @@ typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr, typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr, uint32_t value); + +/** + * cgs_get_pci_resource() - provide access to a device resource (PCI BAR) + * @cgs_device: opaque device handle + * @resource_type: Type of Resource (MMIO, IO, ROM, FB, DOORBELL) + * @size: size of the region + * @offset: offset from the start of the region + * @resource_base: base address (not including offset) returned + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_get_pci_resource_t)(void *cgs_device, + enum cgs_resource_type resource_type, + uint64_t size, + uint64_t offset, + uint64_t *resource_base); + /** * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table * @cgs_device: opaque device handle @@ -493,6 +583,21 @@ typedef int(*cgs_set_clockgating_state)(void *cgs_device, enum amd_ip_block_type block_type, enum amd_clockgating_state state); +typedef int(*cgs_get_active_displays_info)( + void *cgs_device, + struct cgs_display_info *info); + +typedef int (*cgs_call_acpi_method)(void *cgs_device, + uint32_t acpi_method, + uint32_t acpi_function, + void *pinput, void *poutput, + uint32_t output_count, + uint32_t input_size, + uint32_t output_size); + +typedef int (*cgs_query_system_info)(void *cgs_device, + struct cgs_system_info *sys_info); + struct cgs_ops { /* memory management calls (similar to KFD interface) */ cgs_gpu_mem_info_t gpu_mem_info; @@ -516,6 +621,8 @@ struct cgs_ops { cgs_write_pci_config_byte_t write_pci_config_byte; cgs_write_pci_config_word_t write_pci_config_word; cgs_write_pci_config_dword_t write_pci_config_dword; + /* PCI resources */ + cgs_get_pci_resource_t get_pci_resource; /* ATOM BIOS */ cgs_atom_get_data_table_t atom_get_data_table; cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs; @@ -533,7 +640,12 @@ struct cgs_ops { /* cg pg interface*/ cgs_set_powergating_state set_powergating_state; cgs_set_clockgating_state set_clockgating_state; - /* ACPI (TODO) */ + /* display manager */ + cgs_get_active_displays_info get_active_displays_info; + /* ACPI */ + cgs_call_acpi_method call_acpi_method; + /* get system info */ + cgs_query_system_info query_system_info; }; struct cgs_os_ops; /* To be define in OS-specific CGS header */ @@ -620,5 +732,15 @@ struct cgs_device CGS_CALL(set_powergating_state, dev, block_type, state) #define cgs_set_clockgating_state(dev, block_type, state) \ CGS_CALL(set_clockgating_state, dev, block_type, state) +#define cgs_get_active_displays_info(dev, info) \ + CGS_CALL(get_active_displays_info, dev, info) +#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \ + CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) +#define cgs_query_system_info(dev, sys_info) \ + CGS_CALL(query_system_info, dev, sys_info) +#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \ + resource_base) \ + CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \ + resource_base) #endif /* _CGS_COMMON_H */ diff --git a/drivers/gpu/drm/amd/powerplay/Kconfig b/drivers/gpu/drm/amd/powerplay/Kconfig new file mode 100644 index 000000000000..af380335b425 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/Kconfig @@ -0,0 +1,6 @@ +config DRM_AMD_POWERPLAY + bool "Enable AMD powerplay component" + depends on DRM_AMDGPU + default n + help + select this option will enable AMD powerplay component. diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile new file mode 100644 index 000000000000..e195bf59da86 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/Makefile @@ -0,0 +1,22 @@ + +subdir-ccflags-y += -Iinclude/drm \ + -Idrivers/gpu/drm/amd/powerplay/inc/ \ + -Idrivers/gpu/drm/amd/include/asic_reg \ + -Idrivers/gpu/drm/amd/include \ + -Idrivers/gpu/drm/amd/powerplay/smumgr\ + -Idrivers/gpu/drm/amd/powerplay/hwmgr \ + -Idrivers/gpu/drm/amd/powerplay/eventmgr + +AMD_PP_PATH = ../powerplay + +PP_LIBS = smumgr hwmgr eventmgr + +AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS))) + +include $(AMD_POWERPLAY) + +POWER_MGR = amd_powerplay.o + +AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR)) + +AMD_POWERPLAY_FILES += $(AMD_PP_POWER) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c new file mode 100644 index 000000000000..8f5d5edcf193 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -0,0 +1,660 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include +#include "amd_shared.h" +#include "amd_powerplay.h" +#include "pp_instance.h" +#include "power_state.h" +#include "eventmanager.h" + +#define PP_CHECK(handle) \ + do { \ + if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \ + return -EINVAL; \ + } while (0) + +static int pp_early_init(void *handle) +{ + return 0; +} + +static int pp_sw_init(void *handle) +{ + struct pp_instance *pp_handle; + struct pp_hwmgr *hwmgr; + int ret = 0; + + if (handle == NULL) + return -EINVAL; + + pp_handle = (struct pp_instance *)handle; + hwmgr = pp_handle->hwmgr; + + if (hwmgr == NULL || hwmgr->pptable_func == NULL || + hwmgr->hwmgr_func == NULL || + hwmgr->pptable_func->pptable_init == NULL || + hwmgr->hwmgr_func->backend_init == NULL) + return -EINVAL; + + ret = hwmgr->pptable_func->pptable_init(hwmgr); + + if (ret == 0) + ret = hwmgr->hwmgr_func->backend_init(hwmgr); + + return ret; +} + +static int pp_sw_fini(void *handle) +{ + struct pp_instance *pp_handle; + struct pp_hwmgr *hwmgr; + int ret = 0; + + if (handle == NULL) + return -EINVAL; + + pp_handle = (struct pp_instance *)handle; + hwmgr = pp_handle->hwmgr; + + if (hwmgr != NULL || hwmgr->hwmgr_func != NULL || + hwmgr->hwmgr_func->backend_fini != NULL) + ret = hwmgr->hwmgr_func->backend_fini(hwmgr); + + return ret; +} + +static int pp_hw_init(void *handle) +{ + struct pp_instance *pp_handle; + struct pp_smumgr *smumgr; + struct pp_eventmgr *eventmgr; + int ret = 0; + + if (handle == NULL) + return -EINVAL; + + pp_handle = (struct pp_instance *)handle; + smumgr = pp_handle->smu_mgr; + + if (smumgr == NULL || smumgr->smumgr_funcs == NULL || + smumgr->smumgr_funcs->smu_init == NULL || + smumgr->smumgr_funcs->start_smu == NULL) + return -EINVAL; + + ret = smumgr->smumgr_funcs->smu_init(smumgr); + if (ret) { + printk(KERN_ERR "[ powerplay ] smc initialization failed\n"); + return ret; + } + + ret = smumgr->smumgr_funcs->start_smu(smumgr); + if (ret) { + printk(KERN_ERR "[ powerplay ] smc start failed\n"); + smumgr->smumgr_funcs->smu_fini(smumgr); + return ret; + } + + hw_init_power_state_table(pp_handle->hwmgr); + eventmgr = pp_handle->eventmgr; + + if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL) + return -EINVAL; + + ret = eventmgr->pp_eventmgr_init(eventmgr); + return 0; +} + +static int pp_hw_fini(void *handle) +{ + struct pp_instance *pp_handle; + struct pp_smumgr *smumgr; + struct pp_eventmgr *eventmgr; + + if (handle == NULL) + return -EINVAL; + + pp_handle = (struct pp_instance *)handle; + eventmgr = pp_handle->eventmgr; + + if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL) + eventmgr->pp_eventmgr_fini(eventmgr); + + smumgr = pp_handle->smu_mgr; + + if (smumgr != NULL || smumgr->smumgr_funcs != NULL || + smumgr->smumgr_funcs->smu_fini != NULL) + smumgr->smumgr_funcs->smu_fini(smumgr); + + return 0; +} + +static bool pp_is_idle(void *handle) +{ + return 0; +} + +static int pp_wait_for_idle(void *handle) +{ + return 0; +} + +static int pp_sw_reset(void *handle) +{ + return 0; +} + +static void pp_print_status(void *handle) +{ + +} + +static int pp_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int pp_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static int pp_suspend(void *handle) +{ + struct pp_instance *pp_handle; + struct pp_eventmgr *eventmgr; + struct pem_event_data event_data = { {0} }; + + if (handle == NULL) + return -EINVAL; + + pp_handle = (struct pp_instance *)handle; + eventmgr = pp_handle->eventmgr; + pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data); + return 0; +} + +static int pp_resume(void *handle) +{ + struct pp_instance *pp_handle; + struct pp_eventmgr *eventmgr; + struct pem_event_data event_data = { {0} }; + struct pp_smumgr *smumgr; + int ret; + + if (handle == NULL) + return -EINVAL; + + pp_handle = (struct pp_instance *)handle; + smumgr = pp_handle->smu_mgr; + + if (smumgr == NULL || smumgr->smumgr_funcs == NULL || + smumgr->smumgr_funcs->start_smu == NULL) + return -EINVAL; + + ret = smumgr->smumgr_funcs->start_smu(smumgr); + if (ret) { + printk(KERN_ERR "[ powerplay ] smc start failed\n"); + smumgr->smumgr_funcs->smu_fini(smumgr); + return ret; + } + + eventmgr = pp_handle->eventmgr; + pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data); + + return 0; +} + +const struct amd_ip_funcs pp_ip_funcs = { + .early_init = pp_early_init, + .late_init = NULL, + .sw_init = pp_sw_init, + .sw_fini = pp_sw_fini, + .hw_init = pp_hw_init, + .hw_fini = pp_hw_fini, + .suspend = pp_suspend, + .resume = pp_resume, + .is_idle = pp_is_idle, + .wait_for_idle = pp_wait_for_idle, + .soft_reset = pp_sw_reset, + .print_status = pp_print_status, + .set_clockgating_state = pp_set_clockgating_state, + .set_powergating_state = pp_set_powergating_state, +}; + +static int pp_dpm_load_fw(void *handle) +{ + return 0; +} + +static int pp_dpm_fw_loading_complete(void *handle) +{ + return 0; +} + +static int pp_dpm_force_performance_level(void *handle, + enum amd_dpm_forced_level level) +{ + struct pp_instance *pp_handle; + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + pp_handle = (struct pp_instance *)handle; + + hwmgr = pp_handle->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->force_dpm_level == NULL) + return -EINVAL; + + hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); + + return 0; +} + +static enum amd_dpm_forced_level pp_dpm_get_performance_level( + void *handle) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL) + return -EINVAL; + + return (((struct pp_instance *)handle)->hwmgr->dpm_level); +} + +static int pp_dpm_get_sclk(void *handle, bool low) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->get_sclk == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_sclk(hwmgr, low); +} + +static int pp_dpm_get_mclk(void *handle, bool low) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->get_mclk == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_mclk(hwmgr, low); +} + +static int pp_dpm_powergate_vce(void *handle, bool gate) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->powergate_vce == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); +} + +static int pp_dpm_powergate_uvd(void *handle, bool gate) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->powergate_uvd == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); +} + +static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state) +{ + switch (state) { + case POWER_STATE_TYPE_BATTERY: + return PP_StateUILabel_Battery; + case POWER_STATE_TYPE_BALANCED: + return PP_StateUILabel_Balanced; + case POWER_STATE_TYPE_PERFORMANCE: + return PP_StateUILabel_Performance; + default: + return PP_StateUILabel_None; + } +} + +int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output) +{ + int ret = 0; + struct pp_instance *pp_handle; + struct pem_event_data data = { {0} }; + + pp_handle = (struct pp_instance *)handle; + + if (pp_handle == NULL) + return -EINVAL; + + switch (event_id) { + case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE: + ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); + break; + case AMD_PP_EVENT_ENABLE_USER_STATE: + { + enum amd_pm_state_type ps; + + if (input == NULL) + return -EINVAL; + ps = *(unsigned long *)input; + + data.requested_ui_label = power_state_convert(ps); + ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); + } + break; + default: + break; + } + return ret; +} + +enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) +{ + struct pp_hwmgr *hwmgr; + struct pp_power_state *state; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->current_ps == NULL) + return -EINVAL; + + state = hwmgr->current_ps; + + switch (state->classification.ui_label) { + case PP_StateUILabel_Battery: + return POWER_STATE_TYPE_BATTERY; + case PP_StateUILabel_Balanced: + return POWER_STATE_TYPE_BALANCED; + case PP_StateUILabel_Performance: + return POWER_STATE_TYPE_PERFORMANCE; + default: + return POWER_STATE_TYPE_DEFAULT; + } +} + +static void +pp_debugfs_print_current_performance_level(void *handle, + struct seq_file *m) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->print_current_perforce_level == NULL) + return; + + hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m); +} + +static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->set_fan_control_mode == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); +} + +static int pp_dpm_get_fan_control_mode(void *handle) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->get_fan_control_mode == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); +} + +static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->set_fan_speed_percent == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent); +} + +static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->get_fan_speed_percent == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed); +} + +static int pp_dpm_get_temperature(void *handle) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->get_temperature == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_temperature(hwmgr); +} + +const struct amd_powerplay_funcs pp_dpm_funcs = { + .get_temperature = pp_dpm_get_temperature, + .load_firmware = pp_dpm_load_fw, + .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, + .force_performance_level = pp_dpm_force_performance_level, + .get_performance_level = pp_dpm_get_performance_level, + .get_current_power_state = pp_dpm_get_current_power_state, + .get_sclk = pp_dpm_get_sclk, + .get_mclk = pp_dpm_get_mclk, + .powergate_vce = pp_dpm_powergate_vce, + .powergate_uvd = pp_dpm_powergate_uvd, + .dispatch_tasks = pp_dpm_dispatch_tasks, + .print_current_performance_level = pp_debugfs_print_current_performance_level, + .set_fan_control_mode = pp_dpm_set_fan_control_mode, + .get_fan_control_mode = pp_dpm_get_fan_control_mode, + .set_fan_speed_percent = pp_dpm_set_fan_speed_percent, + .get_fan_speed_percent = pp_dpm_get_fan_speed_percent, +}; + +static int amd_pp_instance_init(struct amd_pp_init *pp_init, + struct amd_powerplay *amd_pp) +{ + int ret; + struct pp_instance *handle; + + handle = kzalloc(sizeof(struct pp_instance), GFP_KERNEL); + if (handle == NULL) + return -ENOMEM; + + handle->pp_valid = PP_VALID; + + ret = smum_init(pp_init, handle); + if (ret) + goto fail_smum; + + ret = hwmgr_init(pp_init, handle); + if (ret) + goto fail_hwmgr; + + ret = eventmgr_init(handle); + if (ret) + goto fail_eventmgr; + + amd_pp->pp_handle = handle; + return 0; + +fail_eventmgr: + hwmgr_fini(handle->hwmgr); +fail_hwmgr: + smum_fini(handle->smu_mgr); +fail_smum: + kfree(handle); + return ret; +} + +static int amd_pp_instance_fini(void *handle) +{ + struct pp_instance *instance = (struct pp_instance *)handle; + + if (instance == NULL) + return -EINVAL; + + eventmgr_fini(instance->eventmgr); + + hwmgr_fini(instance->hwmgr); + + smum_fini(instance->smu_mgr); + + kfree(handle); + return 0; +} + +int amd_powerplay_init(struct amd_pp_init *pp_init, + struct amd_powerplay *amd_pp) +{ + int ret; + + if (pp_init == NULL || amd_pp == NULL) + return -EINVAL; + + ret = amd_pp_instance_init(pp_init, amd_pp); + + if (ret) + return ret; + + amd_pp->ip_funcs = &pp_ip_funcs; + amd_pp->pp_funcs = &pp_dpm_funcs; + + return 0; +} + +int amd_powerplay_fini(void *handle) +{ + amd_pp_instance_fini(handle); + + return 0; +} + +/* export this function to DAL */ + +int amd_powerplay_display_configuration_change(void *handle, const void *input) +{ + struct pp_hwmgr *hwmgr; + const struct amd_pp_display_configuration *display_config = input; + + PP_CHECK((struct pp_instance *)handle); + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + phm_store_dal_configuration_data(hwmgr, display_config); + + return 0; +} + +int amd_powerplay_get_display_power_level(void *handle, + struct amd_pp_dal_clock_info *output) +{ + struct pp_hwmgr *hwmgr; + + PP_CHECK((struct pp_instance *)handle); + + if (output == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + return phm_get_dal_power_level(hwmgr, output); +} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile b/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile new file mode 100644 index 000000000000..7509e3850087 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the 'event manager' sub-component of powerplay. +# It provides the event management services for the driver. + +EVENT_MGR = eventmgr.o eventinit.o eventmanagement.o \ + eventactionchains.o eventsubchains.o eventtasks.o psm.o + +AMD_PP_EVENT = $(addprefix $(AMD_PP_PATH)/eventmgr/,$(EVENT_MGR)) + +AMD_POWERPLAY_FILES += $(AMD_PP_EVENT) + diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c new file mode 100644 index 000000000000..83be3cf210e0 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -0,0 +1,289 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "eventmgr.h" +#include "eventactionchains.h" +#include "eventsubchains.h" + +static const pem_event_action *initialize_event[] = { + block_adjust_power_state_tasks, + power_budget_tasks, + system_config_tasks, + setup_asic_tasks, + enable_dynamic_state_management_tasks, + enable_clock_power_gatings_tasks, + get_2d_performance_state_tasks, + set_performance_state_tasks, + initialize_thermal_controller_tasks, + conditionally_force_3d_performance_state_tasks, + process_vbios_eventinfo_tasks, + broadcast_power_policy_tasks, + NULL +}; + +const struct action_chain initialize_action_chain = { + "Initialize", + initialize_event +}; + +static const pem_event_action *uninitialize_event[] = { + ungate_all_display_phys_tasks, + uninitialize_display_phy_access_tasks, + disable_gfx_voltage_island_power_gating_tasks, + disable_gfx_clock_gating_tasks, + set_boot_state_tasks, + adjust_power_state_tasks, + disable_dynamic_state_management_tasks, + disable_clock_power_gatings_tasks, + cleanup_asic_tasks, + prepare_for_pnp_stop_tasks, + NULL +}; + +const struct action_chain uninitialize_action_chain = { + "Uninitialize", + uninitialize_event +}; + +static const pem_event_action *power_source_change_event_pp_enabled[] = { + set_power_source_tasks, + set_power_saving_state_tasks, + adjust_power_state_tasks, + enable_disable_fps_tasks, + set_nbmcu_state_tasks, + broadcast_power_policy_tasks, + NULL +}; + +const struct action_chain power_source_change_action_chain_pp_enabled = { + "Power source change - PowerPlay enabled", + power_source_change_event_pp_enabled +}; + +static const pem_event_action *power_source_change_event_pp_disabled[] = { + set_power_source_tasks, + set_nbmcu_state_tasks, + NULL +}; + +const struct action_chain power_source_changes_action_chain_pp_disabled = { + "Power source change - PowerPlay disabled", + power_source_change_event_pp_disabled +}; + +static const pem_event_action *power_source_change_event_hardware_dc[] = { + set_power_source_tasks, + set_power_saving_state_tasks, + adjust_power_state_tasks, + enable_disable_fps_tasks, + reset_hardware_dc_notification_tasks, + set_nbmcu_state_tasks, + broadcast_power_policy_tasks, + NULL +}; + +const struct action_chain power_source_change_action_chain_hardware_dc = { + "Power source change - with Hardware DC switching", + power_source_change_event_hardware_dc +}; + +static const pem_event_action *suspend_event[] = { + reset_display_phy_access_tasks, + unregister_interrupt_tasks, + disable_gfx_voltage_island_power_gating_tasks, + disable_gfx_clock_gating_tasks, + notify_smu_suspend_tasks, + disable_smc_firmware_ctf_tasks, + set_boot_state_tasks, + adjust_power_state_tasks, + disable_fps_tasks, + vari_bright_suspend_tasks, + reset_fan_speed_to_default_tasks, + power_down_asic_tasks, + disable_stutter_mode_tasks, + set_connected_standby_tasks, + block_hw_access_tasks, + NULL +}; + +const struct action_chain suspend_action_chain = { + "Suspend", + suspend_event +}; + +static const pem_event_action *resume_event[] = { + unblock_hw_access_tasks, + resume_connected_standby_tasks, + notify_smu_resume_tasks, + reset_display_configCounter_tasks, + update_dal_configuration_tasks, + vari_bright_resume_tasks, + block_adjust_power_state_tasks, + setup_asic_tasks, + enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ + enable_dynamic_state_management_tasks, + enable_clock_power_gatings_tasks, + enable_disable_bapm_tasks, + initialize_thermal_controller_tasks, + reset_boot_state_tasks, + adjust_power_state_tasks, + enable_disable_fps_tasks, + notify_hw_power_source_tasks, + process_vbios_event_info_tasks, + enable_gfx_clock_gating_tasks, + enable_gfx_voltage_island_power_gating_tasks, + reset_clock_gating_tasks, + notify_smu_vpu_recovery_end_tasks, + disable_vpu_cap_tasks, + execute_escape_sequence_tasks, + NULL +}; + + +const struct action_chain resume_action_chain = { + "resume", + resume_event +}; + +static const pem_event_action *complete_init_event[] = { + adjust_power_state_tasks, + enable_gfx_clock_gating_tasks, + enable_gfx_voltage_island_power_gating_tasks, + notify_power_state_change_tasks, + NULL +}; + +const struct action_chain complete_init_action_chain = { + "complete init", + complete_init_event +}; + +static const pem_event_action *enable_gfx_clock_gating_event[] = { + enable_gfx_clock_gating_tasks, + NULL +}; + +const struct action_chain enable_gfx_clock_gating_action_chain = { + "enable gfx clock gate", + enable_gfx_clock_gating_event +}; + +static const pem_event_action *disable_gfx_clock_gating_event[] = { + disable_gfx_clock_gating_tasks, + NULL +}; + +const struct action_chain disable_gfx_clock_gating_action_chain = { + "disable gfx clock gate", + disable_gfx_clock_gating_event +}; + +static const pem_event_action *enable_cgpg_event[] = { + enable_cgpg_tasks, + NULL +}; + +const struct action_chain enable_cgpg_action_chain = { + "eable cg pg", + enable_cgpg_event +}; + +static const pem_event_action *disable_cgpg_event[] = { + disable_cgpg_tasks, + NULL +}; + +const struct action_chain disable_cgpg_action_chain = { + "disable cg pg", + disable_cgpg_event +}; + + +/* Enable user _2d performance and activate */ + +static const pem_event_action *enable_user_state_event[] = { + create_new_user_performance_state_tasks, + adjust_power_state_tasks, + NULL +}; + +const struct action_chain enable_user_state_action_chain = { + "Enable user state", + enable_user_state_event +}; + +static const pem_event_action *enable_user_2d_performance_event[] = { + enable_user_2d_performance_tasks, + add_user_2d_performance_state_tasks, + set_performance_state_tasks, + adjust_power_state_tasks, + delete_user_2d_performance_state_tasks, + NULL +}; + +const struct action_chain enable_user_2d_performance_action_chain = { + "enable_user_2d_performance_event_activate", + enable_user_2d_performance_event +}; + + +static const pem_event_action *disable_user_2d_performance_event[] = { + disable_user_2d_performance_tasks, + delete_user_2d_performance_state_tasks, + NULL +}; + +const struct action_chain disable_user_2d_performance_action_chain = { + "disable_user_2d_performance_event", + disable_user_2d_performance_event +}; + + +static const pem_event_action *display_config_change_event[] = { + /* countDisplayConfigurationChangeEventTasks, */ + unblock_adjust_power_state_tasks, + set_cpu_power_state, + notify_hw_power_source_tasks, + /* updateDALConfigurationTasks, + variBrightDisplayConfigurationChangeTasks, */ + adjust_power_state_tasks, + /*enableDisableFPSTasks, + setNBMCUStateTasks, + notifyPCIEDeviceReadyTasks,*/ + NULL +}; + +const struct action_chain display_config_change_action_chain = { + "Display configuration change", + display_config_change_event +}; + +static const pem_event_action *readjust_power_state_event[] = { + adjust_power_state_tasks, + NULL +}; + +const struct action_chain readjust_power_state_action_chain = { + "re-adjust power state", + readjust_power_state_event +}; + diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h new file mode 100644 index 000000000000..f181e53cdcda --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h @@ -0,0 +1,62 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _EVENT_ACTION_CHAINS_H_ +#define _EVENT_ACTION_CHAINS_H_ +#include "eventmgr.h" + +extern const struct action_chain initialize_action_chain; + +extern const struct action_chain uninitialize_action_chain; + +extern const struct action_chain power_source_change_action_chain_pp_enabled; + +extern const struct action_chain power_source_changes_action_chain_pp_disabled; + +extern const struct action_chain power_source_change_action_chain_hardware_dc; + +extern const struct action_chain suspend_action_chain; + +extern const struct action_chain resume_action_chain; + +extern const struct action_chain complete_init_action_chain; + +extern const struct action_chain enable_gfx_clock_gating_action_chain; + +extern const struct action_chain disable_gfx_clock_gating_action_chain; + +extern const struct action_chain enable_cgpg_action_chain; + +extern const struct action_chain disable_cgpg_action_chain; + +extern const struct action_chain enable_user_2d_performance_action_chain; + +extern const struct action_chain disable_user_2d_performance_action_chain; + +extern const struct action_chain enable_user_state_action_chain; + +extern const struct action_chain readjust_power_state_action_chain; + +extern const struct action_chain display_config_change_action_chain; + +#endif /*_EVENT_ACTION_CHAINS_H_*/ + diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c new file mode 100644 index 000000000000..d5ec8ccbe97d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c @@ -0,0 +1,195 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "eventmgr.h" +#include "eventinit.h" +#include "ppinterrupt.h" +#include "hardwaremanager.h" + +void pem_init_feature_info(struct pp_eventmgr *eventmgr) +{ + + /* PowerPlay info */ + eventmgr->ui_state_info[PP_PowerSource_AC].default_ui_lable = + PP_StateUILabel_Performance; + + eventmgr->ui_state_info[PP_PowerSource_AC].current_ui_label = + PP_StateUILabel_Performance; + + eventmgr->ui_state_info[PP_PowerSource_DC].default_ui_lable = + PP_StateUILabel_Battery; + + eventmgr->ui_state_info[PP_PowerSource_DC].current_ui_label = + PP_StateUILabel_Battery; + + if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_PowerPlaySupport)) { + eventmgr->features[PP_Feature_PowerPlay].supported = true; + eventmgr->features[PP_Feature_PowerPlay].version = PEM_CURRENT_POWERPLAY_FEATURE_VERSION; + eventmgr->features[PP_Feature_PowerPlay].enabled_default = true; + eventmgr->features[PP_Feature_PowerPlay].enabled = true; + } else { + eventmgr->features[PP_Feature_PowerPlay].supported = false; + eventmgr->features[PP_Feature_PowerPlay].enabled = false; + eventmgr->features[PP_Feature_PowerPlay].enabled_default = false; + } + + eventmgr->features[PP_Feature_Force3DClock].supported = true; + eventmgr->features[PP_Feature_Force3DClock].enabled = false; + eventmgr->features[PP_Feature_Force3DClock].enabled_default = false; + eventmgr->features[PP_Feature_Force3DClock].version = 1; + + /* over drive*/ + eventmgr->features[PP_Feature_User2DPerformance].version = 4; + eventmgr->features[PP_Feature_User3DPerformance].version = 4; + eventmgr->features[PP_Feature_OverdriveTest].version = 4; + + eventmgr->features[PP_Feature_OverDrive].version = 4; + eventmgr->features[PP_Feature_OverDrive].enabled = false; + eventmgr->features[PP_Feature_OverDrive].enabled_default = false; + + eventmgr->features[PP_Feature_User2DPerformance].supported = false; + eventmgr->features[PP_Feature_User2DPerformance].enabled = false; + eventmgr->features[PP_Feature_User2DPerformance].enabled_default = false; + + eventmgr->features[PP_Feature_User3DPerformance].supported = false; + eventmgr->features[PP_Feature_User3DPerformance].enabled = false; + eventmgr->features[PP_Feature_User3DPerformance].enabled_default = false; + + eventmgr->features[PP_Feature_OverdriveTest].supported = false; + eventmgr->features[PP_Feature_OverdriveTest].enabled = false; + eventmgr->features[PP_Feature_OverdriveTest].enabled_default = false; + + eventmgr->features[PP_Feature_OverDrive].supported = false; + + eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled_default = false; + eventmgr->features[PP_Feature_PowerBudgetWaiver].version = 1; + eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false; + eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled = false; + + /* Multi UVD States support */ + eventmgr->features[PP_Feature_MultiUVDState].supported = false; + eventmgr->features[PP_Feature_MultiUVDState].enabled = false; + eventmgr->features[PP_Feature_MultiUVDState].enabled_default = false; + + /* Dynamic UVD States support */ + eventmgr->features[PP_Feature_DynamicUVDState].supported = false; + eventmgr->features[PP_Feature_DynamicUVDState].enabled = false; + eventmgr->features[PP_Feature_DynamicUVDState].enabled_default = false; + + /* VCE DPM support */ + eventmgr->features[PP_Feature_VCEDPM].supported = false; + eventmgr->features[PP_Feature_VCEDPM].enabled = false; + eventmgr->features[PP_Feature_VCEDPM].enabled_default = false; + + /* ACP PowerGating support */ + eventmgr->features[PP_Feature_ACP_POWERGATING].supported = false; + eventmgr->features[PP_Feature_ACP_POWERGATING].enabled = false; + eventmgr->features[PP_Feature_ACP_POWERGATING].enabled_default = false; + + /* PPM support */ + eventmgr->features[PP_Feature_PPM].version = 1; + eventmgr->features[PP_Feature_PPM].supported = false; + eventmgr->features[PP_Feature_PPM].enabled = false; + + /* FFC support (enables fan and temp settings, Gemini needs temp settings) */ + if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport) || + phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_GeminiRegulatorFanControlSupport)) { + eventmgr->features[PP_Feature_FFC].version = 1; + eventmgr->features[PP_Feature_FFC].supported = true; + eventmgr->features[PP_Feature_FFC].enabled = true; + eventmgr->features[PP_Feature_FFC].enabled_default = true; + } else { + eventmgr->features[PP_Feature_FFC].supported = false; + eventmgr->features[PP_Feature_FFC].enabled = false; + eventmgr->features[PP_Feature_FFC].enabled_default = false; + } + + eventmgr->features[PP_Feature_VariBright].supported = false; + eventmgr->features[PP_Feature_VariBright].enabled = false; + eventmgr->features[PP_Feature_VariBright].enabled_default = false; + + eventmgr->features[PP_Feature_BACO].supported = false; + eventmgr->features[PP_Feature_BACO].supported = false; + eventmgr->features[PP_Feature_BACO].enabled_default = false; + + /* PowerDown feature support */ + eventmgr->features[PP_Feature_PowerDown].supported = false; + eventmgr->features[PP_Feature_PowerDown].enabled = false; + eventmgr->features[PP_Feature_PowerDown].enabled_default = false; + + eventmgr->features[PP_Feature_FPS].version = 1; + eventmgr->features[PP_Feature_FPS].supported = false; + eventmgr->features[PP_Feature_FPS].enabled_default = false; + eventmgr->features[PP_Feature_FPS].enabled = false; + + eventmgr->features[PP_Feature_ViPG].version = 1; + eventmgr->features[PP_Feature_ViPG].supported = false; + eventmgr->features[PP_Feature_ViPG].enabled_default = false; + eventmgr->features[PP_Feature_ViPG].enabled = false; +} + +static int thermal_interrupt_callback(void *private_data, + unsigned src_id, const uint32_t *iv_entry) +{ + /* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/ + printk("current thermal is out of range \n"); + return 0; +} + +int pem_register_interrupts(struct pp_eventmgr *eventmgr) +{ + int result = 0; + struct pp_interrupt_registration_info info; + + info.call_back = thermal_interrupt_callback; + info.context = eventmgr; + + result = phm_register_thermal_interrupt(eventmgr->hwmgr, &info); + + /* TODO: + * 2. Register CTF event interrupt + * 3. Register for vbios events interrupt + * 4. Register External Throttle Interrupt + * 5. Register Smc To Host Interrupt + * */ + return result; +} + + +int pem_unregister_interrupts(struct pp_eventmgr *eventmgr) +{ + return 0; +} + + +void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr) +{ + eventmgr->features[PP_Feature_MultiUVDState].supported = false; + eventmgr->features[PP_Feature_VariBright].supported = false; + eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false; + eventmgr->features[PP_Feature_OverDrive].supported = false; + eventmgr->features[PP_Feature_OverdriveTest].supported = false; + eventmgr->features[PP_Feature_User3DPerformance].supported = false; + eventmgr->features[PP_Feature_User2DPerformance].supported = false; + eventmgr->features[PP_Feature_PowerPlay].supported = false; + eventmgr->features[PP_Feature_Force3DClock].supported = false; +} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h new file mode 100644 index 000000000000..9ef96aab3f24 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h @@ -0,0 +1,34 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _EVENTINIT_H_ +#define _EVENTINIT_H_ + +#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4 + +void pem_init_feature_info(struct pp_eventmgr *eventmgr); +void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr); +int pem_register_interrupts(struct pp_eventmgr *eventmgr); +int pem_unregister_interrupts(struct pp_eventmgr *eventmgr); + +#endif /* _EVENTINIT_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c new file mode 100644 index 000000000000..1e2ad5603080 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c @@ -0,0 +1,215 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "eventmanagement.h" +#include "eventmgr.h" +#include "eventactionchains.h" + +int pem_init_event_action_chains(struct pp_eventmgr *eventmgr) +{ + int i; + + for (i = 0; i < AMD_PP_EVENT_MAX; i++) + eventmgr->event_chain[i] = NULL; + + eventmgr->event_chain[AMD_PP_EVENT_SUSPEND] = pem_get_suspend_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_INITIALIZE] = pem_get_initialize_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_UNINITIALIZE] = pem_get_uninitialize_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_POWER_SOURCE_CHANGE] = pem_get_power_source_change_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_HIBERNATE] = pem_get_hibernate_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_RESUME] = pem_get_resume_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_THERMAL_NOTIFICATION] = pem_get_thermal_notification_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_VBIOS_NOTIFICATION] = pem_get_vbios_notification_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_ENTER_THERMAL_STATE] = pem_get_enter_thermal_state_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_EXIT_THERMAL_STATE] = pem_get_exit_thermal_state_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_ENABLE_POWER_PLAY] = pem_get_enable_powerplay_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_DISABLE_POWER_PLAY] = pem_get_disable_powerplay_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST] = pem_get_enable_overdrive_test_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST] = pem_get_disable_overdrive_test_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING] = pem_get_enable_gfx_clock_gating_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING] = pem_get_disable_gfx_clock_gating_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_ENABLE_CGPG] = pem_get_enable_cgpg_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_DISABLE_CGPG] = pem_get_disable_cgpg_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_COMPLETE_INIT] = pem_get_complete_init_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_SCREEN_ON] = pem_get_screen_on_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_SCREEN_OFF] = pem_get_screen_off_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_PRE_SUSPEND] = pem_get_pre_suspend_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_PRE_RESUME] = pem_get_pre_resume_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_ENABLE_USER_STATE] = pem_enable_user_state_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_READJUST_POWER_STATE] = pem_readjust_power_state_action_chain(eventmgr); + eventmgr->event_chain[AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE] = pem_display_config_change_action_chain(eventmgr); + return 0; +} + +int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data) +{ + const pem_event_action **paction_chain; + const pem_event_action *psub_chain; + int tmp_result = 0; + int result = 0; + + if (eventmgr == NULL || event_chain == NULL || event_data == NULL) + return -EINVAL; + + for (paction_chain = event_chain->action_chain; NULL != *paction_chain; paction_chain++) { + if (0 != result) + return result; + + for (psub_chain = *paction_chain; NULL != *psub_chain; psub_chain++) { + tmp_result = (*psub_chain)(eventmgr, event_data); + if (0 == result) + result = tmp_result; + } + } + + return result; +} + +const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr) +{ + return &suspend_action_chain; +} + +const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr) +{ + return &initialize_action_chain; +} + +const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr) +{ + return &uninitialize_action_chain; +} + +const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr) +{ + return &power_source_change_action_chain_pp_enabled; /* other case base on feature info*/ +} + +const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr) +{ + return &resume_action_chain; +} + +const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr) +{ + return &enable_gfx_clock_gating_action_chain; +} + +const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr) +{ + return &disable_gfx_clock_gating_action_chain; +} + +const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr) +{ + return &enable_cgpg_action_chain; +} + +const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr) +{ + return &disable_cgpg_action_chain; +} + +const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr) +{ + return &complete_init_action_chain; +} + +const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr) +{ + return NULL; +} + +const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr) +{ + return &enable_user_state_action_chain; +} + +const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr) +{ + return &readjust_power_state_action_chain; +} + +const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr) +{ + return &display_config_change_action_chain; +} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h new file mode 100644 index 000000000000..383d4b295aa9 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h @@ -0,0 +1,59 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _EVENT_MANAGEMENT_H_ +#define _EVENT_MANAGEMENT_H_ + +#include "eventmgr.h" + +int pem_init_event_action_chains(struct pp_eventmgr *eventmgr); +int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data); +const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr); + +extern const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr); +extern const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr); +const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr); + + +#endif /* _EVENT_MANAGEMENT_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c new file mode 100644 index 000000000000..52a3efc97f05 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c @@ -0,0 +1,114 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include "eventmgr.h" +#include "hwmgr.h" +#include "eventinit.h" +#include "eventmanagement.h" + +static int pem_init(struct pp_eventmgr *eventmgr) +{ + int result = 0; + struct pem_event_data event_data; + + /* Initialize PowerPlay feature info */ + pem_init_feature_info(eventmgr); + + /* Initialize event action chains */ + pem_init_event_action_chains(eventmgr); + + /* Call initialization event */ + result = pem_handle_event(eventmgr, AMD_PP_EVENT_INITIALIZE, &event_data); + + if (0 != result) + return result; + + /* Register interrupt callback functions */ + result = pem_register_interrupts(eventmgr); + return 0; +} + +static void pem_fini(struct pp_eventmgr *eventmgr) +{ + struct pem_event_data event_data; + + pem_uninit_featureInfo(eventmgr); + pem_unregister_interrupts(eventmgr); + + pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); + + if (eventmgr != NULL) + kfree(eventmgr); +} + +int eventmgr_init(struct pp_instance *handle) +{ + int result = 0; + struct pp_eventmgr *eventmgr; + + if (handle == NULL) + return -EINVAL; + + eventmgr = kzalloc(sizeof(struct pp_eventmgr), GFP_KERNEL); + if (eventmgr == NULL) + return -ENOMEM; + + eventmgr->hwmgr = handle->hwmgr; + handle->eventmgr = eventmgr; + + eventmgr->platform_descriptor = &(eventmgr->hwmgr->platform_descriptor); + eventmgr->pp_eventmgr_init = pem_init; + eventmgr->pp_eventmgr_fini = pem_fini; + + return result; +} + +int eventmgr_fini(struct pp_eventmgr *eventmgr) +{ + kfree(eventmgr); + return 0; +} + +static int pem_handle_event_unlocked(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *data) +{ + if (eventmgr == NULL || event >= AMD_PP_EVENT_MAX || data == NULL) + return -EINVAL; + + return pem_excute_event_chain(eventmgr, eventmgr->event_chain[event], data); +} + +int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *event_data) +{ + int r = 0; + + r = pem_handle_event_unlocked(eventmgr, event, event_data); + + return r; +} + +bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr) +{ + return (eventmgr->block_adjust_power_state || phm_is_hw_access_blocked(eventmgr->hwmgr)); +} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c new file mode 100644 index 000000000000..9ef2d90e2886 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c @@ -0,0 +1,410 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "eventmgr.h" +#include "eventsubchains.h" +#include "eventtasks.h" +#include "hardwaremanager.h" + +const pem_event_action reset_display_phy_access_tasks[] = { + pem_task_reset_display_phys_access, + NULL +}; + +const pem_event_action broadcast_power_policy_tasks[] = { + /* PEM_Task_BroadcastPowerPolicyChange, */ + NULL +}; + +const pem_event_action unregister_interrupt_tasks[] = { + pem_task_unregister_interrupts, + NULL +}; + +/* Disable GFX Voltage Islands Power Gating */ +const pem_event_action disable_gfx_voltage_island_powergating_tasks[] = { + pem_task_disable_voltage_island_power_gating, + NULL +}; + +const pem_event_action disable_gfx_clockgating_tasks[] = { + pem_task_disable_gfx_clock_gating, + NULL +}; + +const pem_event_action block_adjust_power_state_tasks[] = { + pem_task_block_adjust_power_state, + NULL +}; + + +const pem_event_action unblock_adjust_power_state_tasks[] = { + pem_task_unblock_adjust_power_state, + NULL +}; + +const pem_event_action set_performance_state_tasks[] = { + pem_task_set_performance_state, + NULL +}; + +const pem_event_action get_2d_performance_state_tasks[] = { + pem_task_get_2D_performance_state_id, + NULL +}; + +const pem_event_action conditionally_force3D_performance_state_tasks[] = { + pem_task_conditionally_force_3d_performance_state, + NULL +}; + +const pem_event_action process_vbios_eventinfo_tasks[] = { + /* PEM_Task_ProcessVbiosEventInfo,*/ + NULL +}; + +const pem_event_action enable_dynamic_state_management_tasks[] = { + /* PEM_Task_ResetBAPMPolicyChangedFlag,*/ + pem_task_get_boot_state_id, + pem_task_enable_dynamic_state_management, + pem_task_register_interrupts, + NULL +}; + +const pem_event_action enable_clock_power_gatings_tasks[] = { + pem_task_enable_clock_power_gatings_tasks, + pem_task_powerdown_uvd_tasks, + pem_task_powerdown_vce_tasks, + NULL +}; + +const pem_event_action setup_asic_tasks[] = { + pem_task_setup_asic, + NULL +}; + +const pem_event_action power_budget_tasks[] = { + /* TODO + * PEM_Task_PowerBudgetWaiverAvailable, + * PEM_Task_PowerBudgetWarningMessage, + * PEM_Task_PruneStatesBasedOnPowerBudget, + */ + NULL +}; + +const pem_event_action system_config_tasks[] = { + /* PEM_Task_PruneStatesBasedOnSystemConfig,*/ + NULL +}; + + +const pem_event_action conditionally_force_3d_performance_state_tasks[] = { + pem_task_conditionally_force_3d_performance_state, + NULL +}; + +const pem_event_action ungate_all_display_phys_tasks[] = { + /* PEM_Task_GetDisplayPhyAccessInfo */ + NULL +}; + +const pem_event_action uninitialize_display_phy_access_tasks[] = { + /* PEM_Task_UninitializeDisplayPhysAccess, */ + NULL +}; + +const pem_event_action disable_gfx_voltage_island_power_gating_tasks[] = { + /* PEM_Task_DisableVoltageIslandPowerGating, */ + NULL +}; + +const pem_event_action disable_gfx_clock_gating_tasks[] = { + pem_task_disable_gfx_clock_gating, + NULL +}; + +const pem_event_action set_boot_state_tasks[] = { + pem_task_get_boot_state_id, + pem_task_set_boot_state, + NULL +}; + +const pem_event_action adjust_power_state_tasks[] = { + pem_task_notify_hw_mgr_display_configuration_change, + pem_task_adjust_power_state, + pem_task_notify_smc_display_config_after_power_state_adjustment, + pem_task_update_allowed_performance_levels, + /* to do pem_task_Enable_disable_bapm, */ + NULL +}; + +const pem_event_action disable_dynamic_state_management_tasks[] = { + pem_task_unregister_interrupts, + pem_task_get_boot_state_id, + pem_task_disable_dynamic_state_management, + NULL +}; + +const pem_event_action disable_clock_power_gatings_tasks[] = { + pem_task_disable_clock_power_gatings_tasks, + NULL +}; + +const pem_event_action cleanup_asic_tasks[] = { + /* PEM_Task_DisableFPS,*/ + pem_task_cleanup_asic, + NULL +}; + +const pem_event_action prepare_for_pnp_stop_tasks[] = { + /* PEM_Task_PrepareForPnpStop,*/ + NULL +}; + +const pem_event_action set_power_source_tasks[] = { + pem_task_set_power_source, + pem_task_notify_hw_of_power_source, + NULL +}; + +const pem_event_action set_power_saving_state_tasks[] = { + pem_task_reset_power_saving_state, + pem_task_get_power_saving_state, + pem_task_set_power_saving_state, + /* PEM_Task_ResetODDCState, + * PEM_Task_GetODDCState, + * PEM_Task_SetODDCState,*/ + NULL +}; + +const pem_event_action enable_disable_fps_tasks[] = { + /* PEM_Task_EnableDisableFPS,*/ + NULL +}; + +const pem_event_action set_nbmcu_state_tasks[] = { + /* PEM_Task_NBMCUStateChange,*/ + NULL +}; + +const pem_event_action reset_hardware_dc_notification_tasks[] = { + /* PEM_Task_ResetHardwareDCNotification,*/ + NULL +}; + + +const pem_event_action notify_smu_suspend_tasks[] = { + /* PEM_Task_NotifySMUSuspend,*/ + NULL +}; + +const pem_event_action disable_smc_firmware_ctf_tasks[] = { + /* PEM_Task_DisableSMCFirmwareCTF,*/ + NULL +}; + +const pem_event_action disable_fps_tasks[] = { + /* PEM_Task_DisableFPS,*/ + NULL +}; + +const pem_event_action vari_bright_suspend_tasks[] = { + /* PEM_Task_VariBright_Suspend,*/ + NULL +}; + +const pem_event_action reset_fan_speed_to_default_tasks[] = { + /* PEM_Task_ResetFanSpeedToDefault,*/ + NULL +}; + +const pem_event_action power_down_asic_tasks[] = { + /* PEM_Task_DisableFPS,*/ + pem_task_power_down_asic, + NULL +}; + +const pem_event_action disable_stutter_mode_tasks[] = { + /* PEM_Task_DisableStutterMode,*/ + NULL +}; + +const pem_event_action set_connected_standby_tasks[] = { + /* PEM_Task_SetConnectedStandby,*/ + NULL +}; + +const pem_event_action block_hw_access_tasks[] = { + pem_task_block_hw_access, + NULL +}; + +const pem_event_action unblock_hw_access_tasks[] = { + pem_task_un_block_hw_access, + NULL +}; + +const pem_event_action resume_connected_standby_tasks[] = { + /* PEM_Task_ResumeConnectedStandby,*/ + NULL +}; + +const pem_event_action notify_smu_resume_tasks[] = { + /* PEM_Task_NotifySMUResume,*/ + NULL +}; + +const pem_event_action reset_display_configCounter_tasks[] = { + pem_task_reset_display_phys_access, + NULL +}; + +const pem_event_action update_dal_configuration_tasks[] = { + /* PEM_Task_CheckVBlankTime,*/ + NULL +}; + +const pem_event_action vari_bright_resume_tasks[] = { + /* PEM_Task_VariBright_Resume,*/ + NULL +}; + +const pem_event_action notify_hw_power_source_tasks[] = { + pem_task_notify_hw_of_power_source, + NULL +}; + +const pem_event_action process_vbios_event_info_tasks[] = { + /* PEM_Task_ProcessVbiosEventInfo,*/ + NULL +}; + +const pem_event_action enable_gfx_clock_gating_tasks[] = { + pem_task_enable_gfx_clock_gating, + NULL +}; + +const pem_event_action enable_gfx_voltage_island_power_gating_tasks[] = { + pem_task_enable_voltage_island_power_gating, + NULL +}; + +const pem_event_action reset_clock_gating_tasks[] = { + /* PEM_Task_ResetClockGating*/ + NULL +}; + +const pem_event_action notify_smu_vpu_recovery_end_tasks[] = { + /* PEM_Task_NotifySmuVPURecoveryEnd,*/ + NULL +}; + +const pem_event_action disable_vpu_cap_tasks[] = { + /* PEM_Task_DisableVPUCap,*/ + NULL +}; + +const pem_event_action execute_escape_sequence_tasks[] = { + /* PEM_Task_ExecuteEscapesequence,*/ + NULL +}; + +const pem_event_action notify_power_state_change_tasks[] = { + pem_task_notify_power_state_change, + NULL +}; + +const pem_event_action enable_cgpg_tasks[] = { + pem_task_enable_cgpg, + NULL +}; + +const pem_event_action disable_cgpg_tasks[] = { + pem_task_disable_cgpg, + NULL +}; + +const pem_event_action enable_user_2d_performance_tasks[] = { + /* PEM_Task_SetUser2DPerformanceFlag,*/ + /* PEM_Task_UpdateUser2DPerformanceEnableEvents,*/ + NULL +}; + +const pem_event_action add_user_2d_performance_state_tasks[] = { + /* PEM_Task_Get2DPerformanceTemplate,*/ + /* PEM_Task_AllocateNewPowerStateMemory,*/ + /* PEM_Task_CopyNewPowerStateInfo,*/ + /* PEM_Task_UpdateNewPowerStateClocks,*/ + /* PEM_Task_UpdateNewPowerStateUser2DPerformanceFlag,*/ + /* PEM_Task_AddPowerState,*/ + /* PEM_Task_ReleaseNewPowerStateMemory,*/ + NULL +}; + +const pem_event_action delete_user_2d_performance_state_tasks[] = { + /* PEM_Task_GetCurrentUser2DPerformanceStateID,*/ + /* PEM_Task_DeletePowerState,*/ + /* PEM_Task_SetCurrentUser2DPerformanceStateID,*/ + NULL +}; + +const pem_event_action disable_user_2d_performance_tasks[] = { + /* PEM_Task_ResetUser2DPerformanceFlag,*/ + /* PEM_Task_UpdateUser2DPerformanceDisableEvents,*/ + NULL +}; + +const pem_event_action enable_stutter_mode_tasks[] = { + pem_task_enable_stutter_mode, + NULL +}; + +const pem_event_action enable_disable_bapm_tasks[] = { + /*PEM_Task_EnableDisableBAPM,*/ + NULL +}; + +const pem_event_action reset_boot_state_tasks[] = { + pem_task_reset_boot_state, + NULL +}; + +const pem_event_action create_new_user_performance_state_tasks[] = { + pem_task_create_user_performance_state, + NULL +}; + +const pem_event_action initialize_thermal_controller_tasks[] = { + pem_task_initialize_thermal_controller, + NULL +}; + +const pem_event_action uninitialize_thermal_controller_tasks[] = { + pem_task_uninitialize_thermal_controller, + NULL +}; + +const pem_event_action set_cpu_power_state[] = { + pem_task_set_cpu_power_state, + NULL +}; \ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h new file mode 100644 index 000000000000..7714cb927428 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h @@ -0,0 +1,100 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _EVENT_SUB_CHAINS_H_ +#define _EVENT_SUB_CHAINS_H_ + +#include "eventmgr.h" + +extern const pem_event_action reset_display_phy_access_tasks[]; +extern const pem_event_action broadcast_power_policy_tasks[]; +extern const pem_event_action unregister_interrupt_tasks[]; +extern const pem_event_action disable_GFX_voltage_island_powergating_tasks[]; +extern const pem_event_action disable_GFX_clockgating_tasks[]; +extern const pem_event_action block_adjust_power_state_tasks[]; +extern const pem_event_action unblock_adjust_power_state_tasks[]; +extern const pem_event_action set_performance_state_tasks[]; +extern const pem_event_action get_2D_performance_state_tasks[]; +extern const pem_event_action conditionally_force3D_performance_state_tasks[]; +extern const pem_event_action process_vbios_eventinfo_tasks[]; +extern const pem_event_action enable_dynamic_state_management_tasks[]; +extern const pem_event_action enable_clock_power_gatings_tasks[]; +extern const pem_event_action conditionally_force3D_performance_state_tasks[]; +extern const pem_event_action setup_asic_tasks[]; +extern const pem_event_action power_budget_tasks[]; +extern const pem_event_action system_config_tasks[]; +extern const pem_event_action get_2d_performance_state_tasks[]; +extern const pem_event_action conditionally_force_3d_performance_state_tasks[]; +extern const pem_event_action ungate_all_display_phys_tasks[]; +extern const pem_event_action uninitialize_display_phy_access_tasks[]; +extern const pem_event_action disable_gfx_voltage_island_power_gating_tasks[]; +extern const pem_event_action disable_gfx_clock_gating_tasks[]; +extern const pem_event_action set_boot_state_tasks[]; +extern const pem_event_action adjust_power_state_tasks[]; +extern const pem_event_action disable_dynamic_state_management_tasks[]; +extern const pem_event_action disable_clock_power_gatings_tasks[]; +extern const pem_event_action cleanup_asic_tasks[]; +extern const pem_event_action prepare_for_pnp_stop_tasks[]; +extern const pem_event_action set_power_source_tasks[]; +extern const pem_event_action set_power_saving_state_tasks[]; +extern const pem_event_action enable_disable_fps_tasks[]; +extern const pem_event_action set_nbmcu_state_tasks[]; +extern const pem_event_action reset_hardware_dc_notification_tasks[]; +extern const pem_event_action notify_smu_suspend_tasks[]; +extern const pem_event_action disable_smc_firmware_ctf_tasks[]; +extern const pem_event_action disable_fps_tasks[]; +extern const pem_event_action vari_bright_suspend_tasks[]; +extern const pem_event_action reset_fan_speed_to_default_tasks[]; +extern const pem_event_action power_down_asic_tasks[]; +extern const pem_event_action disable_stutter_mode_tasks[]; +extern const pem_event_action set_connected_standby_tasks[]; +extern const pem_event_action block_hw_access_tasks[]; +extern const pem_event_action unblock_hw_access_tasks[]; +extern const pem_event_action resume_connected_standby_tasks[]; +extern const pem_event_action notify_smu_resume_tasks[]; +extern const pem_event_action reset_display_configCounter_tasks[]; +extern const pem_event_action update_dal_configuration_tasks[]; +extern const pem_event_action vari_bright_resume_tasks[]; +extern const pem_event_action notify_hw_power_source_tasks[]; +extern const pem_event_action process_vbios_event_info_tasks[]; +extern const pem_event_action enable_gfx_clock_gating_tasks[]; +extern const pem_event_action enable_gfx_voltage_island_power_gating_tasks[]; +extern const pem_event_action reset_clock_gating_tasks[]; +extern const pem_event_action notify_smu_vpu_recovery_end_tasks[]; +extern const pem_event_action disable_vpu_cap_tasks[]; +extern const pem_event_action execute_escape_sequence_tasks[]; +extern const pem_event_action notify_power_state_change_tasks[]; +extern const pem_event_action enable_cgpg_tasks[]; +extern const pem_event_action disable_cgpg_tasks[]; +extern const pem_event_action enable_user_2d_performance_tasks[]; +extern const pem_event_action add_user_2d_performance_state_tasks[]; +extern const pem_event_action delete_user_2d_performance_state_tasks[]; +extern const pem_event_action disable_user_2d_performance_tasks[]; +extern const pem_event_action enable_stutter_mode_tasks[]; +extern const pem_event_action enable_disable_bapm_tasks[]; +extern const pem_event_action reset_boot_state_tasks[]; +extern const pem_event_action create_new_user_performance_state_tasks[]; +extern const pem_event_action initialize_thermal_controller_tasks[]; +extern const pem_event_action uninitialize_thermal_controller_tasks[]; +extern const pem_event_action set_cpu_power_state[]; +#endif /* _EVENT_SUB_CHAINS_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c new file mode 100644 index 000000000000..5cd123472db4 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c @@ -0,0 +1,438 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "eventmgr.h" +#include "eventinit.h" +#include "eventmanagement.h" +#include "eventmanager.h" +#include "hardwaremanager.h" +#include "eventtasks.h" +#include "power_state.h" +#include "hwmgr.h" +#include "amd_powerplay.h" +#include "psm.h" + +#define TEMP_RANGE_MIN (90 * 1000) +#define TEMP_RANGE_MAX (120 * 1000) + +int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + + if (pem_is_hw_access_blocked(eventmgr)) + return 0; + + phm_force_dpm_levels(eventmgr->hwmgr, AMD_DPM_FORCED_LEVEL_AUTO); + + return 0; +} + +/* eventtasks_generic.c */ +int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + struct pp_hwmgr *hwmgr; + + if (pem_is_hw_access_blocked(eventmgr)) + return 0; + + hwmgr = eventmgr->hwmgr; + if (event_data->pnew_power_state != NULL) + hwmgr->request_ps = event_data->pnew_power_state; + + if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) + psm_adjust_power_state_dynamic(eventmgr, event_data->skip_state_adjust_rules); + else + psm_adjust_power_state_static(eventmgr, event_data->skip_state_adjust_rules); + + return 0; +} + +int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + return phm_power_down_asic(eventmgr->hwmgr); +} + +int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID)) + return psm_set_states(eventmgr, &(event_data->requested_state_id)); + + return 0; +} + +int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + return pem_unregister_interrupts(eventmgr); +} + +int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + int result; + + result = psm_get_state_by_classification(eventmgr, + PP_StateClassificationFlag_Boot, + &(event_data->requested_state_id) + ); + + if (0 == result) + pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); + else + pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); + + return result; +} + +int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + return phm_enable_dynamic_state_management(eventmgr->hwmgr); +} + +int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + return phm_enable_clock_power_gatings(eventmgr->hwmgr); +} + +int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + return phm_powerdown_uvd(eventmgr->hwmgr); +} + +int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + phm_powergate_uvd(eventmgr->hwmgr, true); + phm_powergate_vce(eventmgr->hwmgr, true); + return 0; +} + +int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + return phm_setup_asic(eventmgr->hwmgr); +} + +int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_store_dal_configuration(struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config) +{ + /* TODO */ + return 0; + /*phm_store_dal_configuration_data(eventmgr->hwmgr, display_config) */ +} + +int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + if (pem_is_hw_access_blocked(eventmgr)) + return 0; + + return phm_display_configuration_changed(eventmgr->hwmgr); +} + +int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + return 0; +} + +int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + if (pem_is_hw_access_blocked(eventmgr)) + return 0; + + return phm_notify_smc_display_config_after_ps_adjustment(eventmgr->hwmgr); +} + +int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + eventmgr->block_adjust_power_state = true; + /* to do PHM_ResetIPSCounter(pEventMgr->pHwMgr);*/ + return 0; +} + +int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + eventmgr->block_adjust_power_state = false; + return 0; +} + +int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + return phm_set_cpu_power_state(eventmgr->hwmgr); +} + +/*powersaving*/ + +int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_enable_clock_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + + +int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + + +/* performance */ +int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID)) + return psm_set_states(eventmgr, &(event_data->requested_state_id)); + + return 0; +} + +int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + /* TODO */ + return 0; +} + +int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + int result; + + if (eventmgr->features[PP_Feature_PowerPlay].supported && + !(eventmgr->features[PP_Feature_PowerPlay].enabled)) + result = psm_get_state_by_classification(eventmgr, + PP_StateClassificationFlag_Boot, + &(event_data->requested_state_id)); + else if (eventmgr->features[PP_Feature_User2DPerformance].enabled) + result = psm_get_state_by_classification(eventmgr, + PP_StateClassificationFlag_User2DPerformance, + &(event_data->requested_state_id)); + else + result = psm_get_ui_state(eventmgr, PP_StateUILabel_Performance, + &(event_data->requested_state_id)); + + if (0 == result) + pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); + else + pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); + + return result; +} + +int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + struct pp_power_state *state; + int table_entries; + struct pp_hwmgr *hwmgr = eventmgr->hwmgr; + int i; + + table_entries = hwmgr->num_ps; + state = hwmgr->ps; + +restart_search: + for (i = 0; i < table_entries; i++) { + if (state->classification.ui_label & event_data->requested_ui_label) { + event_data->pnew_power_state = state; + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + + switch (event_data->requested_ui_label) { + case PP_StateUILabel_Battery: + case PP_StateUILabel_Balanced: + event_data->requested_ui_label = PP_StateUILabel_Performance; + goto restart_search; + default: + break; + } + return -1; +} + +int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + struct PP_TemperatureRange range; + + range.max = TEMP_RANGE_MAX; + range.min = TEMP_RANGE_MIN; + + if (eventmgr == NULL || eventmgr->platform_descriptor == NULL) + return -EINVAL; + + if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ThermalController)) + return phm_start_thermal_controller(eventmgr->hwmgr, &range); + + return 0; +} + +int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) +{ + return phm_stop_thermal_controller(eventmgr->hwmgr); +} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h new file mode 100644 index 000000000000..6c6297e3b598 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h @@ -0,0 +1,88 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _EVENT_TASKS_H_ +#define _EVENT_TASKS_H_ +#include "eventmgr.h" + +struct amd_display_configuration; + +/* eventtasks_generic.c */ +int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_store_dal_configuration (struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config); +int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +/*powersaving*/ + +int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); + +/* performance */ +int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +/*thermal */ +int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); +int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); + +#endif /* _EVENT_TASKS_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c new file mode 100644 index 000000000000..a46225c0fc01 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c @@ -0,0 +1,117 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "psm.h" + +int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id) +{ + struct pp_power_state *state; + int table_entries; + struct pp_hwmgr *hwmgr = eventmgr->hwmgr; + int i; + + table_entries = hwmgr->num_ps; + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + if (state->classification.ui_label & ui_label) { + *state_id = state->id; + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + return -1; +} + +int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id) +{ + struct pp_power_state *state; + int table_entries; + struct pp_hwmgr *hwmgr = eventmgr->hwmgr; + int i; + + table_entries = hwmgr->num_ps; + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + if (state->classification.flags & flag) { + *state_id = state->id; + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + return -1; +} + +int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id) +{ + struct pp_power_state *state; + int table_entries; + struct pp_hwmgr *hwmgr = eventmgr->hwmgr; + int i; + + table_entries = hwmgr->num_ps; + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + if (state->id == *state_id) { + hwmgr->request_ps = state; + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + return -1; +} + +int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip) +{ + + struct pp_power_state *pcurrent; + struct pp_power_state *requested; + struct pp_hwmgr *hwmgr; + bool equal; + + if (skip) + return 0; + + hwmgr = eventmgr->hwmgr; + pcurrent = hwmgr->current_ps; + requested = hwmgr->request_ps; + + if (requested == NULL) + return 0; + + if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal))) + equal = false; + + if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { + phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); + phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); + hwmgr->current_ps = requested; + } + return 0; +} + +int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip) +{ + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h new file mode 100644 index 000000000000..fbdff3e02aa3 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h @@ -0,0 +1,38 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "eventmgr.h" +#include "eventinit.h" +#include "eventmanagement.h" +#include "eventmanager.h" +#include "power_state.h" +#include "hardwaremanager.h" + +int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id); + +int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id); + +int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id); + +int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip); + +int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile new file mode 100644 index 000000000000..b664e34dbcc0 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for the 'hw manager' sub-component of powerplay. +# It provides the hardware management services for the driver. + +HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ + hardwaremanager.o pp_acpi.o cz_hwmgr.o \ + cz_clockpowergating.o \ + tonga_processpptables.o ppatomctrl.o \ + tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ + fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ + fiji_clockpowergating.o fiji_thermal.o + +AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) + +AMD_POWERPLAY_FILES += $(AMD_PP_HWMGR) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c new file mode 100644 index 000000000000..ad7700822a1c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -0,0 +1,252 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "hwmgr.h" +#include "cz_clockpowergating.h" +#include "cz_ppsmc.h" + +/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS + 0 GFX0L (3:0), (27:24), + 1 GFX0H (7:4), (31:28), + 2 GFX1L (3:0), (19:16), + 3 GFX1H (7:4), (23:20), + 4 DDIL (3:0), (11: 8), + 5 DDIH (7:4), (15:12), + 6 DDI2L (3:0), ( 3: 0), + 7 DDI2H (7:4), ( 7: 4), +*/ +#define DDI_PHY_GEN_STATUS_VAL(phyID) (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4)) +#define IS_PHY_ID_USED_BY_PLL(PhyID) (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false) + + +int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating) +{ + int ret = 0; + + switch (block) { + case PHM_AsicBlock_UVD_MVC: + case PHM_AsicBlock_UVD: + case PHM_AsicBlock_UVD_HD: + case PHM_AsicBlock_UVD_SD: + if (gating == PHM_ClockGateSetting_StaticOff) + ret = cz_dpm_powerdown_uvd(hwmgr); + else + ret = cz_dpm_powerup_uvd(hwmgr); + break; + case PHM_AsicBlock_GFX: + default: + break; + } + + return ret; +} + + +bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block) +{ + return true; +} + + +int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable) +{ + return 0; +} + +int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args) +{ + /* TODO */ + return 0; +} + +int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw) +{ + /* TODO */ + return 0; +} + +int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr) +{ + /* TODO */ + return 0; +} + +int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr) +{ + /* TODO */ + return 0; +} + +int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr) +{ + /* TODO */ + return 0; +} + +static int cz_tf_uvd_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result) +{ + return 0; +} + +static int cz_tf_vce_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result) +{ + return 0; +} + +int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + uint32_t dpm_features = 0; + + if (enable && + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM)) { + cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled; + dpm_features |= UVD_DPM_MASK; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_EnableAllSmuFeatures, dpm_features); + } else { + dpm_features |= UVD_DPM_MASK; + cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_DisableAllSmuFeatures, dpm_features); + } + return 0; +} + +int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + uint32_t dpm_features = 0; + + if (enable && phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEDPM)) { + cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled; + dpm_features |= VCE_DPM_MASK; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_EnableAllSmuFeatures, dpm_features); + } else { + dpm_features |= VCE_DPM_MASK; + cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_DisableAllSmuFeatures, dpm_features); + } + + return 0; +} + + +int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + if (cz_hwmgr->uvd_power_gated == bgate) + return 0; + + cz_hwmgr->uvd_power_gated = bgate; + + if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + cz_dpm_update_uvd_dpm(hwmgr, true); + cz_dpm_powerdown_uvd(hwmgr); + } else { + cz_dpm_powerup_uvd(hwmgr); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); + cz_dpm_update_uvd_dpm(hwmgr, false); + } + + return 0; +} + +int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating)) { + if (cz_hwmgr->vce_power_gated != bgate) { + if (bgate) { + cgs_set_clockgating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + cgs_set_powergating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + cz_enable_disable_vce_dpm(hwmgr, false); + /* TODO: to figure out why vce can't be poweroff*/ + cz_hwmgr->vce_power_gated = true; + } else { + cz_dpm_powerup_vce(hwmgr); + cz_hwmgr->vce_power_gated = false; + cgs_set_clockgating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + cgs_set_powergating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + cz_dpm_update_vce_dpm(hwmgr); + cz_enable_disable_vce_dpm(hwmgr, true); + return 0; + } + } + } else { + cz_dpm_update_vce_dpm(hwmgr); + cz_enable_disable_vce_dpm(hwmgr, true); + return 0; + } + + if (!cz_hwmgr->vce_power_gated) + cz_dpm_update_vce_dpm(hwmgr); + + return 0; +} + + +static struct phm_master_table_item cz_enable_clock_power_gatings_list[] = { + /*we don't need an exit table here, because there is only D3 cold on Kv*/ + { phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize }, + { phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize }, + /* to do { NULL, cz_tf_xdma_power_gating_enable }, */ + { NULL, NULL } +}; + +struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = { + 0, + PHM_MasterTableFlag_None, + cz_enable_clock_power_gatings_list +}; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h new file mode 100644 index 000000000000..bbbc0571320e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h @@ -0,0 +1,37 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _CZ_CLOCK_POWER_GATING_H_ +#define _CZ_CLOCK_POWER_GATING_H_ + +#include "cz_hwmgr.h" +#include "pp_asicblocks.h" + +extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); +extern struct phm_master_table_header cz_phm_enable_clock_power_gatings_master; +extern struct phm_master_table_header cz_phm_disable_clock_power_gatings_master; +extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); +extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable); +#endif /* _CZ_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c new file mode 100644 index 000000000000..0874ab42ee95 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -0,0 +1,1737 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include "atom-types.h" +#include "atombios.h" +#include "processpptables.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "smu/smu_8_0_d.h" +#include "smu8_fusion.h" +#include "smu/smu_8_0_sh_mask.h" +#include "smumgr.h" +#include "hwmgr.h" +#include "hardwaremanager.h" +#include "cz_ppsmc.h" +#include "cz_hwmgr.h" +#include "power_state.h" +#include "cz_clockpowergating.h" +#include "pp_debug.h" + +#define ixSMUSVI_NB_CURRENTVID 0xD8230044 +#define CURRENT_NB_VID_MASK 0xff000000 +#define CURRENT_NB_VID__SHIFT 24 +#define ixSMUSVI_GFX_CURRENTVID 0xD8230048 +#define CURRENT_GFX_VID_MASK 0xff000000 +#define CURRENT_GFX_VID__SHIFT 24 + +static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic; + +static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps) +{ + if (PhwCz_Magic != hw_ps->magic) + return NULL; + + return (struct cz_power_state *)hw_ps; +} + +static const struct cz_power_state *cast_const_PhwCzPowerState( + const struct pp_hw_power_state *hw_ps) +{ + if (PhwCz_Magic != hw_ps->magic) + return NULL; + + return (struct cz_power_state *)hw_ps; +} + +uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint32_t msg) +{ + int i = 0; + struct phm_vce_clock_voltage_dependency_table *ptable = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + + switch (msg) { + case PPSMC_MSG_SetEclkSoftMin: + case PPSMC_MSG_SetEclkHardMin: + for (i = 0; i < (int)ptable->count; i++) { + if (clock <= ptable->entries[i].ecclk) + break; + } + break; + + case PPSMC_MSG_SetEclkSoftMax: + case PPSMC_MSG_SetEclkHardMax: + for (i = ptable->count - 1; i >= 0; i--) { + if (clock >= ptable->entries[i].ecclk) + break; + } + break; + + default: + break; + } + + return i; +} + +static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint32_t msg) +{ + int i = 0; + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + + switch (msg) { + case PPSMC_MSG_SetSclkSoftMin: + case PPSMC_MSG_SetSclkHardMin: + for (i = 0; i < (int)table->count; i++) { + if (clock <= table->entries[i].clk) + break; + } + break; + + case PPSMC_MSG_SetSclkSoftMax: + case PPSMC_MSG_SetSclkHardMax: + for (i = table->count - 1; i >= 0; i--) { + if (clock >= table->entries[i].clk) + break; + } + break; + + default: + break; + } + return i; +} + +static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint32_t msg) +{ + int i = 0; + struct phm_uvd_clock_voltage_dependency_table *ptable = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + + switch (msg) { + case PPSMC_MSG_SetUvdSoftMin: + case PPSMC_MSG_SetUvdHardMin: + for (i = 0; i < (int)ptable->count; i++) { + if (clock <= ptable->entries[i].vclk) + break; + } + break; + + case PPSMC_MSG_SetUvdSoftMax: + case PPSMC_MSG_SetUvdHardMax: + for (i = ptable->count - 1; i >= 0; i--) { + if (clock >= ptable->entries[i].vclk) + break; + } + break; + + default: + break; + } + + return i; +} + +static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + if (cz_hwmgr->max_sclk_level == 0) { + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxSclkLevel); + cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr->smumgr) + 1; + } + + return cz_hwmgr->max_sclk_level; +} + +static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + uint32_t i; + + cz_hwmgr->gfx_ramp_step = 256*25/100; + + cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */ + + for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) + cz_hwmgr->activity_target[i] = CZ_AT_DFLT; + + cz_hwmgr->mgcg_cgtt_local0 = 0x00000000; + cz_hwmgr->mgcg_cgtt_local1 = 0x00000000; + + cz_hwmgr->clock_slow_down_freq = 25000; + + cz_hwmgr->skip_clock_slow_down = 1; + + cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ + + cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ + + cz_hwmgr->voting_rights_clients = 0x00C00033; + + cz_hwmgr->static_screen_threshold = 8; + + cz_hwmgr->ddi_power_gating_disabled = 0; + + cz_hwmgr->bapm_enabled = 1; + + cz_hwmgr->voltage_drop_threshold = 0; + + cz_hwmgr->gfx_power_gating_threshold = 500; + + cz_hwmgr->vce_slow_sclk_threshold = 20000; + + cz_hwmgr->dce_slow_sclk_threshold = 30000; + + cz_hwmgr->disable_driver_thermal_policy = 1; + + cz_hwmgr->disable_nb_ps3_in_battery = 0; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ABM); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_NonABMSupportInPPLib); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicM3Arbiter); + + cz_hwmgr->override_dynamic_mgpg = 1; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPatchPowerState); + + cz_hwmgr->thermal_auto_throttling_treshold = 0; + + cz_hwmgr->tdr_clock = 0; + + cz_hwmgr->disable_gfx_power_gating_in_uvd = 0; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicUVDState); + + cz_hwmgr->cc6_settings.cpu_cc6_disable = false; + cz_hwmgr->cc6_settings.cpu_pstate_disable = false; + cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false; + cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableVoltageIsland); + + return 0; +} + +static uint32_t cz_convert_8Bit_index_to_voltage( + struct pp_hwmgr *hwmgr, uint16_t voltage) +{ + return 6200 - (voltage * 25); +} + +static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *table) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend; + struct cz_sys_info *sys_info = &cz_hwmgr->sys_info; + struct phm_clock_voltage_dependency_table *dep_table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + + if (dep_table->count > 0) { + table->sclk = dep_table->entries[dep_table->count-1].clk; + table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr, + (uint16_t)dep_table->entries[dep_table->count-1].v); + } + table->mclk = sys_info->nbp_memory_clock[0]; + return 0; +} + +static int cz_init_dynamic_state_adjustment_rule_settings( + struct pp_hwmgr *hwmgr, + ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table) +{ + uint32_t table_size = + sizeof(struct phm_clock_voltage_dependency_table) + + (7 * sizeof(struct phm_clock_voltage_dependency_record)); + + struct phm_clock_voltage_dependency_table *table_clk_vlt = + kzalloc(table_size, GFP_KERNEL); + + if (NULL == table_clk_vlt) { + printk(KERN_ERR "[ powerplay ] Can not allocate memory!\n"); + return -ENOMEM; + } + + table_clk_vlt->count = 8; + table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0; + table_clk_vlt->entries[0].v = 0; + table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1; + table_clk_vlt->entries[1].v = 1; + table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2; + table_clk_vlt->entries[2].v = 2; + table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3; + table_clk_vlt->entries[3].v = 3; + table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4; + table_clk_vlt->entries[4].v = 4; + table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5; + table_clk_vlt->entries[5].v = 5; + table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6; + table_clk_vlt->entries[6].v = 6; + table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7; + table_clk_vlt->entries[7].v = 7; + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; + + return 0; +} + +static int cz_get_system_info_data(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend; + ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL; + uint32_t i; + int result = 0; + uint8_t frev, crev; + uint16_t size; + + info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table( + hwmgr->device, + GetIndexIntoMasterTable(DATA, IntegratedSystemInfo), + &size, &frev, &crev); + + if (crev != 9) { + printk(KERN_ERR "[ powerplay ] Unsupported IGP table: %d %d\n", frev, crev); + return -EINVAL; + } + + if (info == NULL) { + printk(KERN_ERR "[ powerplay ] Could not retrieve the Integrated System Info Table!\n"); + return -EINVAL; + } + + cz_hwmgr->sys_info.bootup_uma_clock = + le32_to_cpu(info->ulBootUpUMAClock); + + cz_hwmgr->sys_info.bootup_engine_clock = + le32_to_cpu(info->ulBootUpEngineClock); + + cz_hwmgr->sys_info.dentist_vco_freq = + le32_to_cpu(info->ulDentistVCOFreq); + + cz_hwmgr->sys_info.system_config = + le32_to_cpu(info->ulSystemConfig); + + cz_hwmgr->sys_info.bootup_nb_voltage_index = + le16_to_cpu(info->usBootUpNBVoltage); + + cz_hwmgr->sys_info.htc_hyst_lmt = + (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt; + + cz_hwmgr->sys_info.htc_tmp_lmt = + (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt; + + if (cz_hwmgr->sys_info.htc_tmp_lmt <= + cz_hwmgr->sys_info.htc_hyst_lmt) { + printk(KERN_ERR "[ powerplay ] The htcTmpLmt should be larger than htcHystLmt.\n"); + return -EINVAL; + } + + cz_hwmgr->sys_info.nb_dpm_enable = + cz_hwmgr->enable_nb_ps_policy && + (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1); + + for (i = 0; i < CZ_NUM_NBPSTATES; i++) { + if (i < CZ_NUM_NBPMEMORYCLOCK) { + cz_hwmgr->sys_info.nbp_memory_clock[i] = + le32_to_cpu(info->ulNbpStateMemclkFreq[i]); + } + cz_hwmgr->sys_info.nbp_n_clock[i] = + le32_to_cpu(info->ulNbpStateNClkFreq[i]); + } + + for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) { + cz_hwmgr->sys_info.display_clock[i] = + le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK); + } + + /* Here use 4 levels, make sure not exceed */ + for (i = 0; i < CZ_NUM_NBPSTATES; i++) { + cz_hwmgr->sys_info.nbp_voltage_index[i] = + le16_to_cpu(info->usNBPStateVoltage[i]); + } + + if (!cz_hwmgr->sys_info.nb_dpm_enable) { + for (i = 1; i < CZ_NUM_NBPSTATES; i++) { + if (i < CZ_NUM_NBPMEMORYCLOCK) { + cz_hwmgr->sys_info.nbp_memory_clock[i] = + cz_hwmgr->sys_info.nbp_memory_clock[0]; + } + cz_hwmgr->sys_info.nbp_n_clock[i] = + cz_hwmgr->sys_info.nbp_n_clock[0]; + cz_hwmgr->sys_info.nbp_voltage_index[i] = + cz_hwmgr->sys_info.nbp_voltage_index[0]; + } + } + + if (le32_to_cpu(info->ulGPUCapInfo) & + SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableDFSBypass); + } + + cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber; + + cz_construct_max_power_limits_table (hwmgr, + &hwmgr->dyn_state.max_clock_voltage_on_ac); + + cz_init_dynamic_state_adjustment_rule_settings(hwmgr, + &info->sDISPCLK_Voltage[0]); + + return result; +} + +static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + cz_hwmgr->boot_power_level.engineClock = + cz_hwmgr->sys_info.bootup_engine_clock; + + cz_hwmgr->boot_power_level.vddcIndex = + (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index; + + cz_hwmgr->boot_power_level.dsDividerIndex = 0; + + cz_hwmgr->boot_power_level.ssDividerIndex = 0; + + cz_hwmgr->boot_power_level.allowGnbSlow = 1; + + cz_hwmgr->boot_power_level.forceNBPstate = 0; + + cz_hwmgr->boot_power_level.hysteresis_up = 0; + + cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0; + + cz_hwmgr->boot_power_level.display_wm = 0; + + cz_hwmgr->boot_power_level.vce_wm = 0; + + return 0; +} + +static int cz_tf_reset_active_process_mask(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + return 0; +} + +static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + struct SMU8_Fusion_ClkTable *clock_table; + int ret; + uint32_t i; + void *table = NULL; + pp_atomctrl_clock_dividers_kong dividers; + + struct phm_clock_voltage_dependency_table *vddc_table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *vdd_gfx_table = + hwmgr->dyn_state.vdd_gfx_dependency_on_sclk; + struct phm_acp_clock_voltage_dependency_table *acp_table = + hwmgr->dyn_state.acp_clock_voltage_dependency_table; + struct phm_uvd_clock_voltage_dependency_table *uvd_table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + struct phm_vce_clock_voltage_dependency_table *vce_table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + + if (!hwmgr->need_pp_table_upload) + return 0; + + ret = smum_download_powerplay_table(hwmgr->smumgr, &table); + + PP_ASSERT_WITH_CODE((0 == ret && NULL != table), + "Fail to get clock table from SMU!", return -EINVAL;); + + clock_table = (struct SMU8_Fusion_ClkTable *)table; + + /* patch clock table */ + PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), + "Dependency table entry exceeds max limit!", return -EINVAL;); + PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), + "Dependency table entry exceeds max limit!", return -EINVAL;); + PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), + "Dependency table entry exceeds max limit!", return -EINVAL;); + PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), + "Dependency table entry exceeds max limit!", return -EINVAL;); + PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), + "Dependency table entry exceeds max limit!", return -EINVAL;); + + for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) { + + /* vddc_sclk */ + clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = + (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0; + clock_table->SclkBreakdownTable.ClkLevel[i].Frequency = + (i < vddc_table->count) ? vddc_table->entries[i].clk : 0; + + atomctrl_get_engine_pll_dividers_kong(hwmgr, + clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, + ÷rs); + + clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.pll_post_divider; + + /* vddgfx_sclk */ + clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid = + (i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0; + + /* acp breakdown */ + clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid = + (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0; + clock_table->AclkBreakdownTable.ClkLevel[i].Frequency = + (i < acp_table->count) ? acp_table->entries[i].acpclk : 0; + + atomctrl_get_engine_pll_dividers_kong(hwmgr, + clock_table->AclkBreakdownTable.ClkLevel[i].Frequency, + ÷rs); + + clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.pll_post_divider; + + + /* uvd breakdown */ + clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid = + (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; + clock_table->VclkBreakdownTable.ClkLevel[i].Frequency = + (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0; + + atomctrl_get_engine_pll_dividers_kong(hwmgr, + clock_table->VclkBreakdownTable.ClkLevel[i].Frequency, + ÷rs); + + clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.pll_post_divider; + + clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid = + (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; + clock_table->DclkBreakdownTable.ClkLevel[i].Frequency = + (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0; + + atomctrl_get_engine_pll_dividers_kong(hwmgr, + clock_table->DclkBreakdownTable.ClkLevel[i].Frequency, + ÷rs); + + clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.pll_post_divider; + + /* vce breakdown */ + clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid = + (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0; + clock_table->EclkBreakdownTable.ClkLevel[i].Frequency = + (i < vce_table->count) ? vce_table->entries[i].ecclk : 0; + + + atomctrl_get_engine_pll_dividers_kong(hwmgr, + clock_table->EclkBreakdownTable.ClkLevel[i].Frequency, + ÷rs); + + clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.pll_post_divider; + + } + ret = smum_upload_powerplay_table(hwmgr->smumgr); + + return ret; +} + +static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + unsigned long clock = 0, level; + + if (NULL == table || table->count <= 0) + return -EINVAL; + + cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; + cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk; + + level = cz_get_max_sclk_level(hwmgr) - 1; + + if (level < table->count) + clock = table->entries[level].clk; + else + clock = table->entries[table->count - 1].clk; + + cz_hwmgr->sclk_dpm.soft_max_clk = clock; + cz_hwmgr->sclk_dpm.hard_max_clk = clock; + + return 0; +} + +static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct phm_uvd_clock_voltage_dependency_table *table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + unsigned long clock = 0, level; + + if (NULL == table || table->count <= 0) + return -EINVAL; + + cz_hwmgr->uvd_dpm.soft_min_clk = 0; + cz_hwmgr->uvd_dpm.hard_min_clk = 0; + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxUvdLevel); + level = smum_get_argument(hwmgr->smumgr); + + if (level < table->count) + clock = table->entries[level].vclk; + else + clock = table->entries[table->count - 1].vclk; + + cz_hwmgr->uvd_dpm.soft_max_clk = clock; + cz_hwmgr->uvd_dpm.hard_max_clk = clock; + + return 0; +} + +static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct phm_vce_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + unsigned long clock = 0, level; + + if (NULL == table || table->count <= 0) + return -EINVAL; + + cz_hwmgr->vce_dpm.soft_min_clk = 0; + cz_hwmgr->vce_dpm.hard_min_clk = 0; + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxEclkLevel); + level = smum_get_argument(hwmgr->smumgr); + + if (level < table->count) + clock = table->entries[level].ecclk; + else + clock = table->entries[table->count - 1].ecclk; + + cz_hwmgr->vce_dpm.soft_max_clk = clock; + cz_hwmgr->vce_dpm.hard_max_clk = clock; + + return 0; +} + +static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct phm_acp_clock_voltage_dependency_table *table = + hwmgr->dyn_state.acp_clock_voltage_dependency_table; + unsigned long clock = 0, level; + + if (NULL == table || table->count <= 0) + return -EINVAL; + + cz_hwmgr->acp_dpm.soft_min_clk = 0; + cz_hwmgr->acp_dpm.hard_min_clk = 0; + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxAclkLevel); + level = smum_get_argument(hwmgr->smumgr); + + if (level < table->count) + clock = table->entries[level].acpclk; + else + clock = table->entries[table->count - 1].acpclk; + + cz_hwmgr->acp_dpm.soft_max_clk = clock; + cz_hwmgr->acp_dpm.hard_max_clk = clock; + return 0; +} + +static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + cz_hwmgr->uvd_power_gated = false; + cz_hwmgr->vce_power_gated = false; + cz_hwmgr->samu_power_gated = false; + cz_hwmgr->acp_power_gated = false; + cz_hwmgr->pgacpinit = true; + + return 0; +} + +static int cz_tf_init_sclk_threshold(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + cz_hwmgr->low_sclk_interrupt_threshold = 0; + + return 0; +} +static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + + unsigned long clock = 0; + unsigned long level; + unsigned long stable_pstate_sclk; + struct PP_Clocks clocks; + unsigned long percentage; + + cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; + level = cz_get_max_sclk_level(hwmgr) - 1; + + if (level < table->count) + cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[level].clk; + else + cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; + + /*PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks);*/ + clock = clocks.engineClock; + + if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { + cz_hwmgr->sclk_dpm.hard_min_clk = clock; + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkHardMin, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.hard_min_clk, + PPSMC_MSG_SetSclkHardMin)); + } + + clock = cz_hwmgr->sclk_dpm.soft_min_clk; + + /* update minimum clocks for Stable P-State feature */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + percentage = 75; + /*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table */ + stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk * + percentage) / 100; + + if (clock < stable_pstate_sclk) + clock = stable_pstate_sclk; + } else { + if (clock < hwmgr->gfx_arbiter.sclk) + clock = hwmgr->gfx_arbiter.sclk; + } + + if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) { + cz_hwmgr->sclk_dpm.soft_min_clk = clock; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_min_clk, + PPSMC_MSG_SetSclkSoftMin)); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState) && + cz_hwmgr->sclk_dpm.soft_max_clk != clock) { + cz_hwmgr->sclk_dpm.soft_max_clk = clock; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMax)); + } + + return 0; +} + +static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr; + if (clks == 0) + clks = CZ_MIN_DEEP_SLEEP_SCLK; + + PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetMinDeepSleepSclk, + clks); + } + + return 0; +} + +static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = + (struct cz_hwmgr *)(hwmgr->backend); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetWatermarkFrequency, + cz_hwmgr->sclk_dpm.soft_max_clk); + + return 0; +} + +static int cz_tf_set_enabled_levels(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + return 0; +} + + +static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + int ret = 0; + + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + unsigned long dpm_features = 0; + + if (!cz_hwmgr->is_nb_dpm_enabled) { + PP_DBG_LOG("enabling ALL SMU features.\n"); + dpm_features |= NB_DPM_MASK; + ret = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features); + if (ret == 0) + cz_hwmgr->is_nb_dpm_enabled = true; + } + + return ret; +} + +static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) +{ + struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + + if (hw_data->is_nb_dpm_enabled) { + if (enable) { + PP_DBG_LOG("enable Low Memory PState.\n"); + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_EnableLowMemoryPstate, + (lock ? 1 : 0)); + } else { + PP_DBG_LOG("disable Low Memory PState.\n"); + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_DisableLowMemoryPstate, + (lock ? 1 : 0)); + } + } + + return 0; +} + +static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + bool disable_switch; + bool enable_low_mem_state; + struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input; + const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state); + + if (hw_data->sys_info.nb_dpm_enable) { + disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false; + enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true; + + if (pnew_state->action == FORCE_HIGH) + cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); + else if(pnew_state->action == CANCEL_FORCE_HIGH) + cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); + else + cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch); + } + return 0; +} + +static struct phm_master_table_item cz_set_power_state_list[] = { + {NULL, cz_tf_update_sclk_limit}, + {NULL, cz_tf_set_deep_sleep_sclk_threshold}, + {NULL, cz_tf_set_watermark_threshold}, + {NULL, cz_tf_set_enabled_levels}, + {NULL, cz_tf_enable_nb_dpm}, + {NULL, cz_tf_update_low_mem_pstate}, + {NULL, NULL} +}; + +static struct phm_master_table_header cz_set_power_state_master = { + 0, + PHM_MasterTableFlag_None, + cz_set_power_state_list +}; + +static struct phm_master_table_item cz_setup_asic_list[] = { + {NULL, cz_tf_reset_active_process_mask}, + {NULL, cz_tf_upload_pptable_to_smu}, + {NULL, cz_tf_init_sclk_limit}, + {NULL, cz_tf_init_uvd_limit}, + {NULL, cz_tf_init_vce_limit}, + {NULL, cz_tf_init_acp_limit}, + {NULL, cz_tf_init_power_gate_state}, + {NULL, cz_tf_init_sclk_threshold}, + {NULL, NULL} +}; + +static struct phm_master_table_header cz_setup_asic_master = { + 0, + PHM_MasterTableFlag_None, + cz_setup_asic_list +}; + +static int cz_tf_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + hw_data->disp_clk_bypass_pending = false; + hw_data->disp_clk_bypass = false; + + return 0; +} + +static int cz_tf_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + hw_data->is_nb_dpm_enabled = false; + + return 0; +} + +static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + + hw_data->cc6_settings.cc6_setting_changed = false; + hw_data->cc6_settings.cpu_pstate_separation_time = 0; + hw_data->cc6_settings.cpu_cc6_disable = false; + hw_data->cc6_settings.cpu_pstate_disable = false; + + return 0; +} + +static struct phm_master_table_item cz_power_down_asic_list[] = { + {NULL, cz_tf_power_up_display_clock_sys_pll}, + {NULL, cz_tf_clear_nb_dpm_flag}, + {NULL, cz_tf_reset_cc6_data}, + {NULL, NULL} +}; + +static struct phm_master_table_header cz_power_down_asic_master = { + 0, + PHM_MasterTableFlag_None, + cz_power_down_asic_list +}; + +static int cz_tf_program_voting_clients(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, + PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); + return 0; +} + +static int cz_tf_start_dpm(struct pp_hwmgr *hwmgr, void *input, void *output, + void *storage, int result) +{ + int res = 0xff; + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + unsigned long dpm_features = 0; + + cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled; + dpm_features |= SCLK_DPM_MASK; + + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features); + + return res; +} + +static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock; + cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock; + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_min_clk, + PPSMC_MSG_SetSclkSoftMin)); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMax)); + + return 0; +} + +int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + cz_hwmgr->acp_boot_level = 0xff; + return 0; +} + +static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr, + unsigned long check_feature) +{ + int result; + unsigned long features; + + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetFeatureStatus, 0); + if (result == 0) { + features = smum_get_argument(hwmgr->smumgr); + if (features & check_feature) + return true; + } + + return result; +} + +static int cz_tf_check_for_dpm_disabled(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn)) + return PP_Result_TableImmediateExit; + return 0; +} + +static int cz_tf_enable_didt(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + /* TO DO */ + return 0; +} + +static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr, + void *input, void *output, + void *storage, int result) +{ + if (!cz_dpm_check_smu_features(hwmgr, + SMU_EnabledFeatureScoreboard_SclkDpmOn)) + return PP_Result_TableImmediateExit; + return 0; +} + +static struct phm_master_table_item cz_disable_dpm_list[] = { + { NULL, cz_tf_check_for_dpm_enabled}, + {NULL, NULL}, +}; + + +static struct phm_master_table_header cz_disable_dpm_master = { + 0, + PHM_MasterTableFlag_None, + cz_disable_dpm_list +}; + +static struct phm_master_table_item cz_enable_dpm_list[] = { + { NULL, cz_tf_check_for_dpm_disabled }, + { NULL, cz_tf_program_voting_clients }, + { NULL, cz_tf_start_dpm}, + { NULL, cz_tf_program_bootup_state}, + { NULL, cz_tf_enable_didt }, + { NULL, cz_tf_reset_acp_boot_level }, + {NULL, NULL}, +}; + +static struct phm_master_table_header cz_enable_dpm_master = { + 0, + PHM_MasterTableFlag_None, + cz_enable_dpm_list +}; + +static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *prequest_ps, + const struct pp_power_state *pcurrent_ps) +{ + struct cz_power_state *cz_ps = + cast_PhwCzPowerState(&prequest_ps->hardware); + + const struct cz_power_state *cz_current_ps = + cast_const_PhwCzPowerState(&pcurrent_ps->hardware); + + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct PP_Clocks clocks; + bool force_high; + unsigned long num_of_active_displays = 4; + + cz_ps->evclk = hwmgr->vce_arbiter.evclk; + cz_ps->ecclk = hwmgr->vce_arbiter.ecclk; + + cz_ps->need_dfs_bypass = true; + + cz_hwmgr->video_start = (hwmgr->uvd_arbiter.vclk != 0 || hwmgr->uvd_arbiter.dclk != 0 || + hwmgr->vce_arbiter.evclk != 0 || hwmgr->vce_arbiter.ecclk != 0); + + cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); + + /* to do PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks); */ + /* PECI_GetNumberOfActiveDisplays(pHwMgr->pPECI, &numOfActiveDisplays); */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) + clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; + else + clocks.memoryClock = 0; + + if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]) + || (num_of_active_displays >= 3); + + cz_ps->action = cz_current_ps->action; + + if ((force_high == false) && (cz_ps->action == FORCE_HIGH)) + cz_ps->action = CANCEL_FORCE_HIGH; + else if ((force_high == true) && (cz_ps->action != FORCE_HIGH)) + cz_ps->action = FORCE_HIGH; + else + cz_ps->action = DO_NOTHING; + + return 0; +} + +static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + int result = 0; + + result = cz_initialize_dpm_defaults(hwmgr); + if (result != 0) { + printk(KERN_ERR "[ powerplay ] cz_initialize_dpm_defaults failed\n"); + return result; + } + + result = cz_get_system_info_data(hwmgr); + if (result != 0) { + printk(KERN_ERR "[ powerplay ] cz_get_system_info_data failed\n"); + return result; + } + + cz_construct_boot_state(hwmgr); + + result = phm_construct_table(hwmgr, &cz_setup_asic_master, + &(hwmgr->setup_asic)); + if (result != 0) { + printk(KERN_ERR "[ powerplay ] Fail to construct setup ASIC\n"); + return result; + } + + result = phm_construct_table(hwmgr, &cz_power_down_asic_master, + &(hwmgr->power_down_asic)); + if (result != 0) { + printk(KERN_ERR "[ powerplay ] Fail to construct power down ASIC\n"); + return result; + } + + result = phm_construct_table(hwmgr, &cz_disable_dpm_master, + &(hwmgr->disable_dynamic_state_management)); + if (result != 0) { + printk(KERN_ERR "[ powerplay ] Fail to disable_dynamic_state\n"); + return result; + } + result = phm_construct_table(hwmgr, &cz_enable_dpm_master, + &(hwmgr->enable_dynamic_state_management)); + if (result != 0) { + printk(KERN_ERR "[ powerplay ] Fail to enable_dynamic_state\n"); + return result; + } + result = phm_construct_table(hwmgr, &cz_set_power_state_master, + &(hwmgr->set_power_state)); + if (result != 0) { + printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n"); + return result; + } + + result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings)); + if (result != 0) { + printk(KERN_ERR "[ powerplay ] Fail to construct enable_clock_power_gatings\n"); + return result; + } + return result; +} + +static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + if (hwmgr != NULL || hwmgr->backend != NULL) { + kfree(hwmgr->backend); + kfree(hwmgr); + } + return 0; +} + +int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + if (cz_hwmgr->sclk_dpm.soft_min_clk != + cz_hwmgr->sclk_dpm.soft_max_clk) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMin)); + return 0; +} + +int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + unsigned long clock = 0, level; + + if (NULL == table || table->count <= 0) + return -EINVAL; + + cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; + cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk; + + level = cz_get_max_sclk_level(hwmgr) - 1; + + if (level < table->count) + clock = table->entries[level].clk; + else + clock = table->entries[table->count - 1].clk; + + cz_hwmgr->sclk_dpm.soft_max_clk = clock; + cz_hwmgr->sclk_dpm.hard_max_clk = clock; + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_min_clk, + PPSMC_MSG_SetSclkSoftMin)); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMax)); + + return 0; +} + +int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + if (cz_hwmgr->sclk_dpm.soft_min_clk != + cz_hwmgr->sclk_dpm.soft_max_clk) { + cz_hwmgr->sclk_dpm.soft_max_clk = + cz_hwmgr->sclk_dpm.soft_min_clk; + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(hwmgr, + cz_hwmgr->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMax)); + } + + return 0; +} + +static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = cz_phm_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = cz_phm_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = cz_phm_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + + return ret; +} + +int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_UVDPowerOFF); + return 0; +} + +int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDynamicPowerGating)) { + return smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_UVDPowerON, 1); + } else { + return smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_UVDPowerON, 0); + } + } + + return 0; +} + +int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct phm_uvd_clock_voltage_dependency_table *ptable = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + + if (!bgate) { + /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + cz_hwmgr->uvd_dpm.hard_min_clk = + ptable->entries[ptable->count - 1].vclk; + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetUvdHardMin, + cz_get_uvd_level(hwmgr, + cz_hwmgr->uvd_dpm.hard_min_clk, + PPSMC_MSG_SetUvdHardMin)); + + cz_enable_disable_uvd_dpm(hwmgr, true); + } else + cz_enable_disable_uvd_dpm(hwmgr, true); + } else + cz_enable_disable_uvd_dpm(hwmgr, false); + + return 0; +} + +int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct phm_vce_clock_voltage_dependency_table *ptable = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + + /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + cz_hwmgr->vce_dpm.hard_min_clk = + ptable->entries[ptable->count - 1].ecclk; + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, + cz_get_eclk_level(hwmgr, + cz_hwmgr->vce_dpm.hard_min_clk, + PPSMC_MSG_SetEclkHardMin)); + } else { + /*EPR# 419220 -HW limitation to to */ + cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, + cz_get_eclk_level(hwmgr, + cz_hwmgr->vce_dpm.hard_min_clk, + PPSMC_MSG_SetEclkHardMin)); + + } + return 0; +} + +int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_VCEPowerOFF); + return 0; +} + +int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_VCEPowerON); + return 0; +} + +static int cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + return cz_hwmgr->sys_info.bootup_uma_clock; +} + +static int cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct cz_power_state *cz_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + cz_ps = cast_PhwCzPowerState(&ps->hardware); + + if (low) + return cz_ps->levels[0].engineClock; + else + return cz_ps->levels[cz_ps->level-1].engineClock; +} + +static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps); + + cz_ps->level = 1; + cz_ps->nbps_flags = 0; + cz_ps->bapm_flags = 0; + cz_ps->levels[0] = cz_hwmgr->boot_power_level; + + return 0; +} + +static int cz_dpm_get_pp_table_entry_callback( + struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps, + unsigned int index, + const void *clock_info) +{ + struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps); + + const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info; + + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + uint8_t clock_info_index = cz_clock_info->index; + + if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1)) + clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1); + + cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk; + cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v; + + cz_ps->level = index + 1; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + cz_ps->levels[index].dsDividerIndex = 5; + cz_ps->levels[index].ssDividerIndex = 5; + } + + return 0; +} + +static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) +{ + int result; + unsigned long ret = 0; + + result = pp_tables_get_num_of_entries(hwmgr, &ret); + + return result ? 0 : ret; +} + +static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry, struct pp_power_state *ps) +{ + int result; + struct cz_power_state *cz_ps; + + ps->hardware.magic = PhwCz_Magic; + + cz_ps = cast_PhwCzPowerState(&(ps->hardware)); + + result = pp_tables_get_entry(hwmgr, entry, ps, + cz_dpm_get_pp_table_entry_callback); + + cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; + cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; + + return result; +} + +int cz_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct cz_power_state); +} + +static void +cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + + struct phm_vce_clock_voltage_dependency_table *vce_table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + + struct phm_uvd_clock_voltage_dependency_table *uvd_table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + + uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX), + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); + uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); + uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); + + uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent; + uint16_t vddnb, vddgfx; + int result; + + if (sclk_index >= NUM_SCLK_LEVELS) { + seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index); + } else { + sclk = table->entries[sclk_index].clk; + seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100); + } + + tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & + CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; + vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp); + tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & + CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; + vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); + seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx); + + seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en"); + if (!cz_hwmgr->uvd_power_gated) { + if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index); + } else { + vclk = uvd_table->entries[uvd_index].vclk; + dclk = uvd_table->entries[uvd_index].dclk; + seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100); + } + } + + seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en"); + if (!cz_hwmgr->vce_power_gated) { + if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + seq_printf(m, "\n invalid vce dpm level %d\n", vce_index); + } else { + ecclk = vce_table->entries[vce_index].ecclk; + seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100); + } + } + + result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity); + if (0 == result) { + activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); + activity_percent = activity_percent > 100 ? 100 : activity_percent; + } else { + activity_percent = 50; + } + + seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent); +} + +static void cz_hw_print_display_cfg( + const struct cc6_settings *cc6_settings) +{ + PP_DBG_LOG("New Display Configuration:\n"); + + PP_DBG_LOG(" cpu_cc6_disable: %d\n", + cc6_settings->cpu_cc6_disable); + PP_DBG_LOG(" cpu_pstate_disable: %d\n", + cc6_settings->cpu_pstate_disable); + PP_DBG_LOG(" nb_pstate_switch_disable: %d\n", + cc6_settings->nb_pstate_switch_disable); + PP_DBG_LOG(" cpu_pstate_separation_time: %d\n\n", + cc6_settings->cpu_pstate_separation_time); +} + + static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + uint32_t data = 0; + + if (hw_data->cc6_settings.cc6_setting_changed == true) { + + hw_data->cc6_settings.cc6_setting_changed = false; + + cz_hw_print_display_cfg(&hw_data->cc6_settings); + + data |= (hw_data->cc6_settings.cpu_pstate_separation_time + & PWRMGT_SEPARATION_TIME_MASK) + << PWRMGT_SEPARATION_TIME_SHIFT; + + data|= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0) + << PWRMGT_DISABLE_CPU_CSTATES_SHIFT; + + data|= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0) + << PWRMGT_DISABLE_CPU_PSTATES_SHIFT; + + PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n", + data); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetDisplaySizePowerParams, + data); + } + + return 0; +} + + + static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, + bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) + { + struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + + if (separation_time != + hw_data->cc6_settings.cpu_pstate_separation_time + || cc6_disable != + hw_data->cc6_settings.cpu_cc6_disable + || pstate_disable != + hw_data->cc6_settings.cpu_pstate_disable + || pstate_switch_disable != + hw_data->cc6_settings.nb_pstate_switch_disable) { + + hw_data->cc6_settings.cc6_setting_changed = true; + + hw_data->cc6_settings.cpu_pstate_separation_time = + separation_time; + hw_data->cc6_settings.cpu_cc6_disable = + cc6_disable; + hw_data->cc6_settings.cpu_pstate_disable = + pstate_disable; + hw_data->cc6_settings.nb_pstate_switch_disable = + pstate_switch_disable; + + } + + return 0; +} + + static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, + struct amd_pp_dal_clock_info*info) +{ + uint32_t i; + const struct phm_clock_voltage_dependency_table * table = + hwmgr->dyn_state.vddc_dep_on_dal_pwrl; + const struct phm_clock_and_voltage_limits* limits = + &hwmgr->dyn_state.max_clock_voltage_on_ac; + + info->engine_max_clock = limits->sclk; + info->memory_max_clock = limits->mclk; + + for (i = table->count - 1; i > 0; i--) { + + if (limits->vddc >= table->entries[i].v) { + info->level = table->entries[i].clk; + return 0; + } + } + return -EINVAL; +} + +static const struct pp_hwmgr_func cz_hwmgr_funcs = { + .backend_init = cz_hwmgr_backend_init, + .backend_fini = cz_hwmgr_backend_fini, + .asic_setup = NULL, + .apply_state_adjust_rules = cz_apply_state_adjust_rules, + .force_dpm_level = cz_dpm_force_dpm_level, + .get_power_state_size = cz_get_power_state_size, + .powerdown_uvd = cz_dpm_powerdown_uvd, + .powergate_uvd = cz_dpm_powergate_uvd, + .powergate_vce = cz_dpm_powergate_vce, + .get_mclk = cz_dpm_get_mclk, + .get_sclk = cz_dpm_get_sclk, + .patch_boot_state = cz_dpm_patch_boot_state, + .get_pp_table_entry = cz_dpm_get_pp_table_entry, + .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries, + .print_current_perforce_level = cz_print_current_perforce_level, + .set_cpu_power_state = cz_set_cpu_power_state, + .store_cc6_data = cz_store_cc6_data, + .get_dal_power_level= cz_get_dal_power_level, +}; + +int cz_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + struct cz_hwmgr *cz_hwmgr; + int ret = 0; + + cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); + if (cz_hwmgr == NULL) + return -ENOMEM; + + hwmgr->backend = cz_hwmgr; + hwmgr->hwmgr_func = &cz_hwmgr_funcs; + hwmgr->pptable_func = &pptable_funcs; + return ret; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h new file mode 100644 index 000000000000..c477f1cf3f23 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h @@ -0,0 +1,326 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _CZ_HWMGR_H_ +#define _CZ_HWMGR_H_ + +#include "cgs_common.h" +#include "ppatomctrl.h" + +#define CZ_NUM_NBPSTATES 4 +#define CZ_NUM_NBPMEMORYCLOCK 2 +#define MAX_DISPLAY_CLOCK_LEVEL 8 +#define CZ_AT_DFLT 30 +#define CZ_MAX_HARDWARE_POWERLEVELS 8 +#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 +#define CZ_MIN_DEEP_SLEEP_SCLK 800 + +/* Carrizo device IDs */ +#define DEVICE_ID_CZ_9870 0x9870 +#define DEVICE_ID_CZ_9874 0x9874 +#define DEVICE_ID_CZ_9875 0x9875 +#define DEVICE_ID_CZ_9876 0x9876 +#define DEVICE_ID_CZ_9877 0x9877 + +#define PHMCZ_WRITE_SMC_REGISTER(device, reg, value) \ + cgs_write_ind_register(device, CGS_IND_REG__SMC, ix##reg, value) + +struct cz_dpm_entry { + uint32_t soft_min_clk; + uint32_t hard_min_clk; + uint32_t soft_max_clk; + uint32_t hard_max_clk; +}; + +struct cz_sys_info { + uint32_t bootup_uma_clock; + uint32_t bootup_engine_clock; + uint32_t dentist_vco_freq; + uint32_t nb_dpm_enable; + uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK]; + uint32_t nbp_n_clock[CZ_NUM_NBPSTATES]; + uint16_t nbp_voltage_index[CZ_NUM_NBPSTATES]; + uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL]; + uint16_t bootup_nb_voltage_index; + uint8_t htc_tmp_lmt; + uint8_t htc_hyst_lmt; + uint32_t system_config; + uint32_t uma_channel_number; +}; + +#define MAX_DISPLAYPHY_IDS 0x8 +#define DISPLAYPHY_LANEMASK 0xF +#define UNKNOWN_TRANSMITTER_PHY_ID (-1) + +#define DISPLAYPHY_PHYID_SHIFT 24 +#define DISPLAYPHY_LANESELECT_SHIFT 16 + +#define DISPLAYPHY_RX_SELECT 0x1 +#define DISPLAYPHY_TX_SELECT 0x2 +#define DISPLAYPHY_CORE_SELECT 0x4 + +#define DDI_POWERGATING_ARG(phyID, lanemask, rx, tx, core) \ + (((uint32_t)(phyID))<backend); + + data->uvd_power_gated = false; + data->vce_power_gated = false; + data->samu_power_gated = false; + data->acp_power_gated = false; + + return 0; +} + +int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (data->uvd_power_gated == bgate) + return 0; + + data->uvd_power_gated = bgate; + + if (bgate) + fiji_update_uvd_dpm(hwmgr, true); + else + fiji_update_uvd_dpm(hwmgr, false); + + return 0; +} + +int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_set_power_state_input states; + const struct pp_power_state *pcurrent; + struct pp_power_state *requested; + + if (data->vce_power_gated == bgate) + return 0; + + data->vce_power_gated = bgate; + + pcurrent = hwmgr->current_ps; + requested = hwmgr->request_ps; + + states.pcurrent_state = &(pcurrent->hardware); + states.pnew_state = &(requested->hardware); + + fiji_update_vce_dpm(hwmgr, &states); + fiji_enable_disable_vce_dpm(hwmgr, !bgate); + + return 0; +} + +int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (data->samu_power_gated == bgate) + return 0; + + data->samu_power_gated = bgate; + + if (bgate) + fiji_update_samu_dpm(hwmgr, true); + else + fiji_update_samu_dpm(hwmgr, false); + + return 0; +} + +int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (data->acp_power_gated == bgate) + return 0; + + data->acp_power_gated = bgate; + + if (bgate) + fiji_update_acp_dpm(hwmgr, true); + else + fiji_update_acp_dpm(hwmgr, false); + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h new file mode 100644 index 000000000000..33af5f511ab8 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h @@ -0,0 +1,35 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _FIJI_CLOCK_POWER_GATING_H_ +#define _FIJI_CLOCK_POWER_GATING_H_ + +#include "fiji_hwmgr.h" +#include "pp_asicblocks.h" + +extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); +extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); +extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); +#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h new file mode 100644 index 000000000000..32d43e8fecb2 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h @@ -0,0 +1,105 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef FIJI_DYN_DEFAULTS_H +#define FIJI_DYN_DEFAULTS_H + +/** \file +* Volcanic Islands Dynamic default parameters. +*/ + +enum FIJIdpm_TrendDetection +{ + FIJIAdpm_TrendDetection_AUTO, + FIJIAdpm_TrendDetection_UP, + FIJIAdpm_TrendDetection_DOWN +}; +typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection; + +/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */ + +/* Bit vector representing same fields as hardware register. */ +#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ???? + * HDP_busy + * IH_busy + * UVD_busy + * VCE_busy + * ACP_busy + * SAMU_busy + * SDMA enabled */ +#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ???? + * SH_Gfx_busy + * RB_Gfx_busy + * VCE_busy */ + +#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. + * FE_Gfx_busy + * RB_Gfx_busy + * ACP_busy */ + +#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. + * FE_Gfx_busy + * SH_Gfx_busy + * UVD_busy */ + +#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy + * VCE_busy + * ACP_busy + * SAMU_busy */ + +#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */ +#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */ +#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */ + + +/* thermal protection counter (units). */ +#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */ + +/* static screen threshold unit */ +#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0 + +/* static screen threshold */ +#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8 + +/* gfx idle clock stop threshold */ +#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */ + +/* Fixed reference divider to use when building baby stepping tables. */ +#define PPFIJI_REFERENCEDIVIDER_DFLT 4 + +/* ULV voltage change delay time + * Used to be delay_vreg in N.I. split for S.I. + * Using N.I. delay_vreg value as default + * ReferenceClock = 2700 + * VoltageResponseTime = 1000 + * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687 + */ +#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687 + +#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035 +#define PPFIJI_CGULVCONTROL_DFLT 0x00007450 +#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/ +#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */ + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c new file mode 100644 index 000000000000..28031a7eddba --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -0,0 +1,5127 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include "linux/delay.h" + +#include "hwmgr.h" +#include "fiji_smumgr.h" +#include "atombios.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "atombios.h" +#include "cgs_common.h" +#include "fiji_dyn_defaults.h" +#include "fiji_powertune.h" +#include "smu73.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" +#include "pppcielanes.h" +#include "fiji_hwmgr.h" +#include "tonga_processpptables.h" +#include "tonga_pptable.h" +#include "pp_debug.h" +#include "pp_acpi.h" +#include "amd_pcie_helpers.h" +#include "cgs_linux.h" +#include "ppinterrupt.h" + +#include "fiji_clockpowergating.h" +#include "fiji_thermal.h" + +#define VOLTAGE_SCALE 4 +#define SMC_RAM_END 0x40000 +#define VDDC_VDDCI_DELTA 300 + +#define MC_SEQ_MISC0_GDDR5_SHIFT 28 +#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0_GDDR5_VALUE 5 + +#define MC_CG_ARB_FREQ_F0 0x0a /* boot-up default */ +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +/* From smc_reg.h */ +#define SMC_CG_IND_START 0xc0030000 +#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND */ + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +#define VDDC_VDDCI_DELTA 300 + +#define ixSWRST_COMMAND_1 0x1400103 +#define MC_SEQ_CNTL__CAC_EN_MASK 0x40000000 + +/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ +enum DPM_EVENT_SRC { + DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ + DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ + DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ + DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ + DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ +}; + + +/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs + * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] + */ +uint16_t fiji_clock_stretcher_lookup_table[2][4] = { {600, 1050, 3, 0}, + {600, 1050, 6, 1} }; + +/* [FF, SS] type, [] 4 voltage ranges, and + * [Floor Freq, Boundary Freq, VID min , VID max] + */ +uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = +{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, + { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; + +/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] + * (coming from PWR_CKS_CNTL.stretch_amount reg spec) + */ +uint8_t fiji_clock_stretch_amount_conversion[2][6] = { {0, 1, 3, 2, 4, 5}, + {0, 2, 4, 5, 6, 5} }; + +const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic); + +struct fiji_power_state *cast_phw_fiji_power_state( + struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL;); + + return (struct fiji_power_state *)hw_ps; +} + +const struct fiji_power_state *cast_const_phw_fiji_power_state( + const struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL;); + + return (const struct fiji_power_state *)hw_ps; +} + +static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} + +static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_ulv_parm *ulv = &data->ulv; + + ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT; + data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0; + data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2; + data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7; + + data->static_screen_threshold_unit = + PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT; + data->static_screen_threshold = + PPFIJI_STATICSCREENTHRESHOLD_DFLT; + + /* Unset ABM cap as it moved to DAL. + * Add PHM_PlatformCaps_NonABMSupportInPPLib + * for re-direct ABM related request to DAL + */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ABM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_NonABMSupportInPPLib); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicACTiming); + + fiji_initialize_power_tune_defaults(hwmgr); + + data->mclk_stutter_mode_threshold = 60000; + data->pcie_gen_performance.max = PP_PCIEGen1; + data->pcie_gen_performance.min = PP_PCIEGen3; + data->pcie_gen_power_saving.max = PP_PCIEGen1; + data->pcie_gen_power_saving.min = PP_PCIEGen3; + data->pcie_lane_performance.max = 0; + data->pcie_lane_performance.min = 16; + data->pcie_lane_power_saving.max = 0; + data->pcie_lane_power_saving.min = 16; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicUVDState); +} + +static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + uint16_t virtual_voltage_id, int32_t *sclk) +{ + uint8_t entryId; + uint8_t voltageId; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL); + + /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ + for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) { + voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd; + if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) + break; + } + + PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count, + "Can't find requested voltage id in vdd_dep_on_sclk table!", + return -EINVAL; + ); + + *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk; + + return 0; +} + +/** +* Get Leakage VDDC based on leakage ID. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint16_t vv_id; + uint16_t vddc = 0; + uint16_t evv_default = 1150; + uint16_t i, j; + uint32_t sclk = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + int result; + + for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) { + vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + if (!fiji_get_sclk_for_voltage_evv(hwmgr, + table_info->vddc_lookup_table, vv_id, &sclk)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + for (j = 1; j < sclk_table->count; j++) { + if (sclk_table->entries[j].clk == sclk && + sclk_table->entries[j].cks_enable == 0) { + sclk += 5000; + break; + } + } + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableDriverEVV)) + result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr, + VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true); + else + result = -EINVAL; + + if (result) + result = atomctrl_get_voltage_evv_on_sclk(hwmgr, + VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc); + + /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ + PP_ASSERT_WITH_CODE((vddc < 2000), + "Invalid VDDC value, greater than 2v!", result = -EINVAL;); + + if (result) + /* 1.15V is the default safe value for Fiji */ + vddc = evv_default; + + /* the voltage should not be zero nor equal to leakage ID */ + if (vddc != 0 && vddc != vv_id) { + data->vddc_leakage.actual_voltage + [data->vddc_leakage.count] = vddc; + data->vddc_leakage.leakage_id + [data->vddc_leakage.count] = vv_id; + data->vddc_leakage.count++; + } + } + } + return 0; +} + +/** + * Change virtual leakage voltage to actual value. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pointer to changing voltage + * @param pointer to leakage table + */ +static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, + uint16_t *voltage, struct fiji_leakage_voltage *leakage_table) +{ + uint32_t index; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (index = 0; index < leakage_table->count; index++) { + /* if this voltage matches a leakage voltage ID */ + /* patch with actual leakage voltage */ + if (leakage_table->leakage_id[index] == *voltage) { + *voltage = leakage_table->actual_voltage[index]; + break; + } + } + + if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) + printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); +} + +/** +* Patch voltage lookup table by EVV leakages. +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pointer to voltage lookup table +* @param pointer to leakage table +* @return always 0 +*/ +static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + struct fiji_leakage_voltage *leakage_table) +{ + uint32_t i; + + for (i = 0; i < lookup_table->count; i++) + fiji_patch_with_vdd_leakage(hwmgr, + &lookup_table->entries[i].us_vdd, leakage_table); + + return 0; +} + +static int fiji_patch_clock_voltage_limits_with_vddc_leakage( + struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table, + uint16_t *vddc) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = + table_info->max_clock_voltage_on_dc.vddc; + return 0; +} + +static int fiji_patch_voltage_dependency_tables_with_lookup_table( + struct pp_hwmgr *hwmgr) +{ + uint8_t entryId; + uint8_t voltageId; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = + table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + for (entryId = 0; entryId < sclk_table->count; ++entryId) { + voltageId = sclk_table->entries[entryId].vddInd; + sclk_table->entries[entryId].vddc = + table_info->vddc_lookup_table->entries[voltageId].us_vdd; + } + + for (entryId = 0; entryId < mclk_table->count; ++entryId) { + voltageId = mclk_table->entries[entryId].vddInd; + mclk_table->entries[entryId].vddc = + table_info->vddc_lookup_table->entries[voltageId].us_vdd; + } + + for (entryId = 0; entryId < mm_table->count; ++entryId) { + voltageId = mm_table->entries[entryId].vddcInd; + mm_table->entries[entryId].vddc = + table_info->vddc_lookup_table->entries[voltageId].us_vdd; + } + + return 0; + +} + +static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) +{ + /* Need to determine if we need calculated voltage. */ + return 0; +} + +static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) +{ + /* Need to determine if we need calculated voltage from mm table. */ + return 0; +} + +static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_voltage_lookup_table *lookup_table) +{ + uint32_t table_size, i, j; + struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; + table_size = lookup_table->count; + + PP_ASSERT_WITH_CODE(0 != lookup_table->count, + "Lookup table is empty", return -EINVAL); + + /* Sorting voltages */ + for (i = 0; i < table_size - 1; i++) { + for (j = i + 1; j > 0; j--) { + if (lookup_table->entries[j].us_vdd < + lookup_table->entries[j - 1].us_vdd) { + tmp_voltage_lookup_record = lookup_table->entries[j - 1]; + lookup_table->entries[j - 1] = lookup_table->entries[j]; + lookup_table->entries[j] = tmp_voltage_lookup_record; + } + } + } + + return 0; +} + +static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr) +{ + int result = 0; + int tmp_result; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr, + table_info->vddc_lookup_table, &(data->vddc_leakage)); + if (tmp_result) + result = tmp_result; + + tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, + &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); + if (tmp_result) + result = tmp_result; + + tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = fiji_calc_voltage_dependency_tables(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); + if(tmp_result) + result = tmp_result; + + return result; +} + +static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = + table_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, + "VDD dependency on SCLK table is missing. \ + This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, + "VDD dependency on SCLK table has to have is missing. \ + This table is mandatory", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, + "VDD dependency on MCLK table is missing. \ + This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, + "VDD dependency on MCLK table has to have is missing. \ + This table is mandatory", return -EINVAL); + + data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc; + data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table-> + entries[allowed_sclk_vdd_table->count - 1].vddc; + + table_info->max_clock_voltage_on_ac.sclk = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.mclk = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.vddc = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; + table_info->max_clock_voltage_on_ac.vddci = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = + table_info->max_clock_voltage_on_ac.sclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = + table_info->max_clock_voltage_on_ac.mclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = + table_info->max_clock_voltage_on_ac.vddc; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = + table_info->max_clock_voltage_on_ac.vddci; + + return 0; +} + +static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr) +{ + uint32_t speedCntl = 0; + + /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ + speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, + ixPCIE_LC_SPEED_CNTL); + return((uint16_t)PHM_GET_FIELD(speedCntl, + PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); +} + +static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) +{ + uint32_t link_width; + + /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ + link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, + PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); + + PP_ASSERT_WITH_CODE((7 >= link_width), + "Invalid PCIe lane width!", return 0); + + return decode_pcie_lane_width(link_width); +} + +/** Patch the Boot State to match VBIOS boot clocks and voltage. +* +* @param hwmgr Pointer to the hardware manager. +* @param pPowerState The address of the PowerState instance being created. +* +*/ +static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps; + ATOM_FIRMWARE_INFO_V2_2 *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + /* First retrieve the Boot clocks and VDDC from the firmware info table. + * We assume here that fw_info is unchanged if this call fails. + */ + fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( + hwmgr->device, index, + &size, &frev, &crev); + if (!fw_info) + /* During a test, there is no firmware info table. */ + return 0; + + /* Patch the state. */ + data->vbios_boot_state.sclk_bootup_value = + le32_to_cpu(fw_info->ulDefaultEngineClock); + data->vbios_boot_state.mclk_bootup_value = + le32_to_cpu(fw_info->ulDefaultMemoryClock); + data->vbios_boot_state.mvdd_bootup_value = + le16_to_cpu(fw_info->usBootUpMVDDCVoltage); + data->vbios_boot_state.vddc_bootup_value = + le16_to_cpu(fw_info->usBootUpVDDCVoltage); + data->vbios_boot_state.vddci_bootup_value = + le16_to_cpu(fw_info->usBootUpVDDCIVoltage); + data->vbios_boot_state.pcie_gen_bootup_value = + fiji_get_current_pcie_speed(hwmgr); + data->vbios_boot_state.pcie_lane_bootup_value = + (uint16_t)fiji_get_current_pcie_lane_number(hwmgr); + + /* set boot power state */ + ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; + ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; + ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; + ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; + + return 0; +} + +static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t i; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + bool stay_in_boot; + int result; + + data->dll_default_on = false; + data->sram_end = SMC_RAM_END; + + for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) + data->activity_target[i] = FIJI_AT_DFLT; + + data->vddc_vddci_delta = VDDC_VDDCI_DELTA; + + data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT; + data->mclk_dpm0_activity_target = 0xa; + + data->sclk_dpm_key_disabled = 0; + data->mclk_dpm_key_disabled = 0; + data->pcie_dpm_key_disabled = 0; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UnTabledHardwareInterface); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + + data->gpio_debug = 0; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPatchPowerState); + + /* need to set voltage control types before EVV patching */ + data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE; + data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; + data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; + + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) + data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl)) + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) + data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO; + + if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) + data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) + data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; + } + + if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + if (table_info && table_info->cac_dtp_table->usClockStretchAmount) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + + fiji_init_dpm_defaults(hwmgr); + + /* Get leakage voltage based on leakage ID. */ + fiji_get_evv_voltages(hwmgr); + + /* Patch our voltage dependency table with actual leakage voltage + * We need to perform leakage translation before it's used by other functions + */ + fiji_complete_dependency_tables(hwmgr); + + /* Parse pptable data read from VBIOS */ + fiji_set_private_data_based_on_pptable(hwmgr); + + /* ULV Support */ + data->ulv.ulv_supported = true; /* ULV feature is enabled by default */ + + /* Initalize Dynamic State Adjustment Rule Settings */ + result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); + + if (!result) { + data->uvd_enabled = false; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableSMU7ThermalManagement); + data->vddc_phase_shed_control = false; + } + + stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StayInBootState); + + if (0 == result) { + struct cgs_system_info sys_info = {0}; + + data->is_tlu_enabled = 0; + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + FIJI_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM); + + if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp && + hwmgr->thermal_controller. + advanceFanControlParameters.ucFanControlMode) { + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; + hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = + table_info->cac_dtp_table->usOperatingTempMinLimit; + hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = + table_info->cac_dtp_table->usOperatingTempMaxLimit; + hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = + table_info->cac_dtp_table->usDefaultTargetOperatingTemp; + hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = + table_info->cac_dtp_table->usOperatingTempStep; + hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = + table_info->cac_dtp_table->usTargetOperatingTemp; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODFuzzyFanControlSupport); + } + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_gen_cap = 0x30007; + else + data->pcie_gen_cap = (uint32_t)sys_info.value; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_lane_cap = 0x2f0000; + else + data->pcie_lane_cap = (uint32_t)sys_info.value; + } else { + /* Ignore return value in here, we are cleaning up a mess. */ + tonga_hwmgr_backend_fini(hwmgr); + } + + return 0; +} + +/** + * Read clock related registers. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + data->clock_registers.vCG_SPLL_FUNC_CNTL = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_SPLL_FUNC_CNTL); + data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_SPLL_FUNC_CNTL_2); + data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_SPLL_FUNC_CNTL_3); + data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_SPLL_FUNC_CNTL_4); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_SPLL_SPREAD_SPECTRUM); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_SPLL_SPREAD_SPECTRUM_2); + + return 0; +} + +/** + * Find out if memory is GDDR5. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int fiji_get_memory_type(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t temp; + + temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); + + data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == + ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> + MC_SEQ_MISC0_GDDR5_SHIFT)); + + return 0; +} + +/** + * Enables Dynamic Power Management by SMC + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, STATIC_PM_EN, 1); + + return 0; +} + +/** + * Initialize PowerGating States for different engines + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = false; + data->vce_power_gated = false; + data->samu_power_gated = false; + data->acp_power_gated = false; + data->pg_acp_init = true; + + return 0; +} + +static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + data->low_sclk_interrupt_threshold = 0; + + return 0; +} + +static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = fiji_read_clock_registers(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to read clock registers!", result = tmp_result); + + tmp_result = fiji_get_memory_type(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get memory type!", result = tmp_result); + + tmp_result = fiji_enable_acpi_power_management(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ACPI power management!", result = tmp_result); + + tmp_result = fiji_init_power_gate_state(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init power gate state!", result = tmp_result); + + tmp_result = tonga_get_mc_microcode_version(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get MC microcode version!", result = tmp_result); + + tmp_result = fiji_init_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init sclk threshold!", result = tmp_result); + + return result; +} + +/** +* Checks if we want to support voltage control +* +* @param hwmgr the address of the powerplay hardware manager. +*/ +static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr) +{ + const struct fiji_hwmgr *data = + (const struct fiji_hwmgr *)(hwmgr->backend); + + return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control); +} + +/** +* Enable voltage control +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr) +{ + /* enable voltage control */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); + + return 0; +} + +/** +* Remove repeated voltage values and create table with unique values. +* +* @param hwmgr the address of the powerplay hardware manager. +* @param vol_table the pointer to changing voltage table +* @return 0 in success +*/ + +static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr, + struct pp_atomctrl_voltage_table *vol_table) +{ + uint32_t i, j; + uint16_t vvalue; + bool found = false; + struct pp_atomctrl_voltage_table *table; + + PP_ASSERT_WITH_CODE((NULL != vol_table), + "Voltage Table empty.", return -EINVAL); + table = kzalloc(sizeof(struct pp_atomctrl_voltage_table), + GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + table->mask_low = vol_table->mask_low; + table->phase_delay = vol_table->phase_delay; + + for (i = 0; i < vol_table->count; i++) { + vvalue = vol_table->entries[i].value; + found = false; + + for (j = 0; j < table->count; j++) { + if (vvalue == table->entries[j].value) { + found = true; + break; + } + } + + if (!found) { + table->entries[table->count].value = vvalue; + table->entries[table->count].smio_low = + vol_table->entries[i].smio_low; + table->count++; + } + } + + memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table)); + kfree(table); + + return 0; +} + +static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + uint32_t i; + int result; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table); + + PP_ASSERT_WITH_CODE((0 != dep_table->count), + "Voltage Dependency Table empty.", return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < dep_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].mvdd; + vol_table->entries[i].smio_low = 0; + } + + result = fiji_trim_voltage_table(hwmgr, vol_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to trim MVDD table.", return result); + + return 0; +} + +static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + uint32_t i; + int result; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table); + + PP_ASSERT_WITH_CODE((0 != dep_table->count), + "Voltage Dependency Table empty.", return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < dep_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].vddci; + vol_table->entries[i].smio_low = 0; + } + + result = fiji_trim_voltage_table(hwmgr, vol_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to trim VDDCI table.", return result); + + return 0; +} + +static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table) +{ + int i = 0; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table); + + PP_ASSERT_WITH_CODE((0 != lookup_table->count), + "Voltage Lookup Table empty.", return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + + vol_table->count = lookup_table->count; + + for (i = 0; i < vol_table->count; i++) { + vol_table->entries[i].value = lookup_table->entries[i].us_vdd; + vol_table->entries[i].smio_low = 0; + } + + return 0; +} + +/* ---- Voltage Tables ---- + * If the voltage table would be bigger than + * what will fit into the state table on + * the SMC keep only the higher entries. + */ +static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr, + uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table) +{ + unsigned int i, diff; + + if (vol_table->count <= max_vol_steps) + return; + + diff = vol_table->count - max_vol_steps; + + for (i = 0; i < max_vol_steps; i++) + vol_table->entries[i] = vol_table->entries[i + diff]; + + vol_table->count = max_vol_steps; + + return; +} + +/** +* Create Voltage Tables. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + int result; + + if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, + &(data->mvdd_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve MVDD table.", + return result); + } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + result = fiji_get_svi2_mvdd_voltage_table(hwmgr, + table_info->vdd_dep_on_mclk); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 MVDD table from dependancy table.", + return result;); + } + + if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, + &(data->vddci_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDCI table.", + return result); + } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + result = fiji_get_svi2_vddci_voltage_table(hwmgr, + table_info->vdd_dep_on_mclk); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDCI table from dependancy table.", + return result); + } + + if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + result = fiji_get_svi2_vdd_voltage_table(hwmgr, + table_info->vddc_lookup_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDC table from lookup table.", + return result); + } + + PP_ASSERT_WITH_CODE( + (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)), + "Too many voltage values for VDDC. Trimming to fit state table.", + fiji_trim_voltage_table_to_fit_state_table(hwmgr, + SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table))); + + PP_ASSERT_WITH_CODE( + (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)), + "Too many voltage values for VDDCI. Trimming to fit state table.", + fiji_trim_voltage_table_to_fit_state_table(hwmgr, + SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table))); + + PP_ASSERT_WITH_CODE( + (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)), + "Too many voltage values for MVDD. Trimming to fit state table.", + fiji_trim_voltage_table_to_fit_state_table(hwmgr, + SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table))); + + return 0; +} + +static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + /* Program additional LP registers + * that are no longer programmed by VBIOS + */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + + return 0; +} + +/** +* Programs static screed detection parameters +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int fiji_program_static_screen_threshold_parameters( + struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* Set static screen threshold unit */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, + data->static_screen_threshold_unit); + /* Set static screen threshold */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, + data->static_screen_threshold); + + return 0; +} + +/** +* Setup display gap for glitch free memory clock switching. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t displayGap = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL); + + displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL, + DISP_GAP, DISPLAY_GAP_IGNORE); + + displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL, + DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL, displayGap); + + return 0; +} + +/** +* Programs activity state transition voting clients +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* Clear reset for voting clients before enabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); + + return 0; +} + +/** +* Get the location of various tables inside the FW image. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint32_t tmp; + int result; + bool error = false; + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, DpmTable), + &tmp, data->sram_end); + + if (0 == result) + data->dpm_table_start = tmp; + + error |= (0 != result); + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, SoftRegisters), + &tmp, data->sram_end); + + if (!result) { + data->soft_regs_start = tmp; + smu_data->soft_regs_start = tmp; + } + + error |= (0 != result); + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, mcRegisterTable), + &tmp, data->sram_end); + + if (!result) + data->mc_reg_table_start = tmp; + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, FanTable), + &tmp, data->sram_end); + + if (!result) + data->fan_table_start = tmp; + + error |= (0 != result); + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, mcArbDramTimingTable), + &tmp, data->sram_end); + + if (!result) + data->arb_table_start = tmp; + + error |= (0 != result); + + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, Version), + &tmp, data->sram_end); + + if (!result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? -1 : 0; +} + +/* Copy one arb setting to another and then switch the active set. + * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. + */ +static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, + uint32_t arb_src, uint32_t arb_dest) +{ + uint32_t mc_arb_dram_timing; + uint32_t mc_arb_dram_timing2; + uint32_t burst_time; + uint32_t mc_cg_config; + + switch (arb_src) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + break; + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); + break; + default: + return -EINVAL; + } + + switch (arb_dest) { + case MC_CG_ARB_FREQ_F0: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); + break; + case MC_CG_ARB_FREQ_F1: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); + break; + default: + return -EINVAL; + } + + mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); + mc_cg_config |= 0x0000000F; + cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); + + return 0; +} + +/** +* Initial switch from ARB F0->F1 +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +* This function is to be called from the SetPowerState table. +*/ +static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) +{ + return fiji_copy_and_switch_arb_sets(hwmgr, + MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr, + struct fiji_single_dpm_table *dpm_table, uint32_t count) +{ + int i; + PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER, + "Fatal error, can not set up single DPM table entries " + "to exceed max number!",); + + dpm_table->count = count; + for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) + dpm_table->dpm_levels[i].enabled = false; + + return 0; +} + +static void fiji_setup_pcie_table_entry( + struct fiji_single_dpm_table *dpm_table, + uint32_t index, uint32_t pcie_gen, + uint32_t pcie_lanes) +{ + dpm_table->dpm_levels[index].value = pcie_gen; + dpm_table->dpm_levels[index].param1 = pcie_lanes; + dpm_table->dpm_levels[index].enabled = 1; +} + +static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + uint32_t i, max_entry; + + PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || + data->use_pcie_power_saving_levels), "No pcie performance levels!", + return -EINVAL); + + if (data->use_pcie_performance_levels && + !data->use_pcie_power_saving_levels) { + data->pcie_gen_power_saving = data->pcie_gen_performance; + data->pcie_lane_power_saving = data->pcie_lane_performance; + } else if (!data->use_pcie_performance_levels && + data->use_pcie_power_saving_levels) { + data->pcie_gen_performance = data->pcie_gen_power_saving; + data->pcie_lane_performance = data->pcie_lane_power_saving; + } + + fiji_reset_single_dpm_table(hwmgr, + &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK); + + if (pcie_table != NULL) { + /* max_entry is used to make sure we reserve one PCIE level + * for boot level (fix for A+A PSPP issue). + * If PCIE table from PPTable have ULV entry + 8 entries, + * then ignore the last entry.*/ + max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ? + SMU73_MAX_LEVELS_LINK : pcie_table->count; + for (i = 1; i < max_entry; i++) { + fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, + get_pcie_gen_support(data->pcie_gen_cap, + pcie_table->entries[i].gen_speed), + get_pcie_lane_support(data->pcie_lane_cap, + pcie_table->entries[i].lane_width)); + } + data->dpm_table.pcie_speed_table.count = max_entry - 1; + } else { + /* Hardcode Pcie Table */ + fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + + data->dpm_table.pcie_speed_table.count = 6; + } + /* Populate last level for boot PCIE level, but do not increment count. */ + fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, + data->dpm_table.pcie_speed_table.count, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + + return 0; +} + +/* + * This function is to initalize all DPM state tables + * for SMU7 based on the dependency table. + * Dynamic state patching function will then trim these + * state tables to the allowed range based + * on the power policy or external client requests, + * such as UVD request, etc. + */ +static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i; + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = + table_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, + "SCLK dependency table has to have is missing. " + "This table is mandatory", + return -EINVAL); + + PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, + "MCLK dependency table has to have is missing. " + "This table is mandatory", + return -EINVAL); + + /* clear the state table to reset everything to default */ + fiji_reset_single_dpm_table(hwmgr, + &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS); + fiji_reset_single_dpm_table(hwmgr, + &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY); + + /* Initialize Sclk DPM table based on allow Sclk values */ + data->dpm_table.sclk_table.count = 0; + for (i = 0; i < dep_sclk_table->count; i++) { + if (i == 0 || data->dpm_table.sclk_table.dpm_levels + [data->dpm_table.sclk_table.count - 1].value != + dep_sclk_table->entries[i].clk) { + data->dpm_table.sclk_table.dpm_levels + [data->dpm_table.sclk_table.count].value = + dep_sclk_table->entries[i].clk; + data->dpm_table.sclk_table.dpm_levels + [data->dpm_table.sclk_table.count].enabled = + (i == 0) ? true : false; + data->dpm_table.sclk_table.count++; + } + } + + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mclk_table.count = 0; + for (i=0; icount; i++) { + if ( i==0 || data->dpm_table.mclk_table.dpm_levels + [data->dpm_table.mclk_table.count - 1].value != + dep_mclk_table->entries[i].clk) { + data->dpm_table.mclk_table.dpm_levels + [data->dpm_table.mclk_table.count].value = + dep_mclk_table->entries[i].clk; + data->dpm_table.mclk_table.dpm_levels + [data->dpm_table.mclk_table.count].enabled = + (i == 0) ? true : false; + data->dpm_table.mclk_table.count++; + } + } + + /* setup PCIE gen speed levels */ + fiji_setup_default_pcie_table(hwmgr); + + /* save a copy of the default DPM table */ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), + sizeof(struct fiji_dpm_table)); + + return 0; +} + +/** + * @brief PhwFiji_GetVoltageOrder + * Returns index of requested voltage record in lookup(table) + * @param lookup_table - lookup list to search in + * @param voltage - voltage to look for + * @return 0 on success + */ +uint8_t fiji_get_voltage_index( + struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage) +{ + uint8_t count = (uint8_t) (lookup_table->count); + uint8_t i; + + PP_ASSERT_WITH_CODE((NULL != lookup_table), + "Lookup Table empty.", return 0); + PP_ASSERT_WITH_CODE((0 != count), + "Lookup Table empty.", return 0); + + for (i = 0; i < lookup_table->count; i++) { + /* find first voltage equal or bigger than requested */ + if (lookup_table->entries[i].us_vdd >= voltage) + return i; + } + /* voltage is bigger than max voltage in the table */ + return i - 1; +} + +/** +* Preparation of vddc and vddgfx CAC tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index; + int result = 0; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *lookup_table = + table_info->vddc_lookup_table; + /* tables is already swapped, so in order to use the value from it, + * we need to swap it back. + * We are populating vddc CAC data to BapmVddc table + * in split and merged mode + */ + for( count = 0; countcount; count++) { + index = fiji_get_voltage_index(lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 - + (lookup_table->entries[index].us_cac_low * + VOLTAGE_SCALE)) / 25); + table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 - + (lookup_table->entries[index].us_cac_high * + VOLTAGE_SCALE)) / 25); + } + + return result; +} + +/** +* Preparation of voltage tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ + +int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + int result; + + result = fiji_populate_cac_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate CAC voltage tables to SMC", + return -EINVAL); + + return 0; +} + +static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_Ulv *state) +{ + int result = 0; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; + state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 ); + + state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; + + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + } + return result; +} + +static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + return fiji_populate_ulv_level(hwmgr, &table->Ulv); +} + +static int32_t fiji_get_dpm_level_enable_mask_value( + struct fiji_single_dpm_table* dpm_table) +{ + int32_t i; + int32_t mask = 0; + + for (i = dpm_table->count; i > 0; i--) { + mask = mask << 1; + if (dpm_table->dpm_levels[i - 1].enabled) + mask |= 0x1; + else + mask &= 0xFFFFFFFE; + } + return mask; +} + +static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_dpm_table *dpm_table = &data->dpm_table; + int i; + + /* Index (dpm_table->pcie_speed_table.count) + * is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( + dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); + } + + data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +/** +* Calculates the SCLK dividers using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ +static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk) +{ + const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t ref_clock; + uint32_t ref_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ + ref_clock = atomctrl_get_reference_clock(hwmgr); + ref_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider */ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup */ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + struct pp_atomctrl_internal_ss_info ssInfo; + + uint32_t vco_freq = clock * dividers.uc_pll_post_div; + if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, + vco_freq, &ssInfo)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + * + * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 + */ + uint32_t clk_s = ref_clock * 5 / + (ref_divider * ssInfo.speed_spectrum_rate); + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage * + fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s); + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2, + CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v); + } + } + + sclk->SclkFrequency = clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) +{ + uint32_t i; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_voltage_table *vddci_table = + &(data->vddci_voltage_table); + + for (i = 0; i < vddci_table->count; i++) { + if (vddci_table->entries[i].value >= vddci) + return vddci_table->entries[i].value; + } + + PP_ASSERT_WITH_CODE(false, + "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", + return vddci_table->entries[i].value); +} + +static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table* dep_table, + uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) +{ + uint32_t i; + uint16_t vddci; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + *voltage = *mvdd = 0; + + /* clock - voltage dependency table is empty table */ + if (dep_table->count == 0) + return -EINVAL; + + for (i = 0; i < dep_table->count; i++) { + /* find first sclk bigger than request */ + if (dep_table->entries[i].clk >= clock) { + *voltage |= (dep_table->entries[i].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i].vddci) + *voltage |= (dep_table->entries[i].vddci * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else { + vddci = fiji_find_closest_vddci(hwmgr, + (dep_table->entries[i].vddc - + (uint16_t)data->vddc_vddci_delta)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i].mvdd * + VOLTAGE_SCALE; + + *voltage |= 1 << PHASES_SHIFT; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i-1].vddci) { + vddci = fiji_find_closest_vddci(hwmgr, + (dep_table->entries[i].vddc - + (uint16_t)data->vddc_vddci_delta)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; + + return 0; +} +/** +* Populates single SMC SCLK structure using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ + +static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint16_t sclk_al_threshold, + struct SMU73_Discrete_GraphicsLevel *level) +{ + int result; + /* PP_Clocks minClocks; */ + uint32_t threshold, mvdd; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + result = fiji_calculate_sclk_params(hwmgr, clock, level); + + /* populate graphics levels */ + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, clock, + &level->MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for " + "VDDC engine clock dependency table", + return result); + + level->SclkFrequency = clock; + level->ActivityLevel = sclk_al_threshold; + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + level->EnabledForThrottle = 1; + level->UpHyst = 10; + level->DownHyst = 0; + level->VoltageDownHyst = 0; + level->PowerThrottle = 0; + + threshold = clock * data->fast_watermark_threshold / 100; + + /* + * TODO: get minimum clocks from dal configaration + * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks); + */ + /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */ + + /* get level->DeepSleepDivId + if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) + { + level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR); + } */ + + /* Default to slow, highest DPM level will be + * set to PPSMC_DISPLAY_WATERMARK_LOW later. + */ + level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + + return 0; +} +/** +* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states +* +* @param hwmgr the address of the hardware manager +*/ +static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_dpm_table *dpm_table = &data->dpm_table; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; + int result = 0; + uint32_t array = data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * + SMU73_MAX_LEVELS_GRAPHICS; + struct SMU73_Discrete_GraphicsLevel *levels = + data->smc_state_table.GraphicsLevel; + uint32_t i, max_entry; + uint8_t hightest_pcie_level_enabled = 0, + lowest_pcie_level_enabled = 0, + mid_pcie_level_enabled = 0, + count = 0; + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = fiji_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)data->activity_target[i], + &levels[i]); + if (result) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + levels[i].DeepSleepDivId = 0; + } + + /* Only enable level 0 for now.*/ + levels[0].EnabledForActivity = 1; + + /* set highest level watermark to high */ + levels[dpm_table->sclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + max_entry = pcie_entry_cnt - 1; + for (i = 0; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = + (uint8_t) ((i < max_entry)? i : max_entry); + } else { + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (hightest_pcie_level_enabled + 1))) != 0 )) + hightest_pcie_level_enabled++; + + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0 )) + lowest_pcie_level_enabled++; + + while ((count < hightest_pcie_level_enabled) && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 )) + count++; + + mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) < + hightest_pcie_level_enabled? + (lowest_pcie_level_enabled + 1 + count) : + hightest_pcie_level_enabled; + + /* set pcieDpmLevel to hightest_pcie_level_enabled */ + for(i = 2; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = hightest_pcie_level_enabled; + + /* set pcieDpmLevel to lowest_pcie_level_enabled */ + levels[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled */ + levels[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change */ + result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, data->sram_end); + + return result; +} + +/** + * MCLK Frequency Ratio + * SEQ_CG_RESP Bit[31:24] - 0x0 + * Bit[27:24] \96 DDR3 Frequency ratio + * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz + * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz + * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz + * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz + * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz + * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz + * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz + * 400 < 0x7 <= 450MHz, 800 < 0xF + */ +static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock) +{ + if (mem_clock <= 10000) return 0x0; + if (mem_clock <= 15000) return 0x1; + if (mem_clock <= 20000) return 0x2; + if (mem_clock <= 25000) return 0x3; + if (mem_clock <= 30000) return 0x4; + if (mem_clock <= 35000) return 0x5; + if (mem_clock <= 40000) return 0x6; + if (mem_clock <= 45000) return 0x7; + if (mem_clock <= 50000) return 0x8; + if (mem_clock <= 55000) return 0x9; + if (mem_clock <= 60000) return 0xa; + if (mem_clock <= 65000) return 0xb; + if (mem_clock <= 70000) return 0xc; + if (mem_clock <= 75000) return 0xd; + if (mem_clock <= 80000) return 0xe; + /* mem_clock > 800MHz */ + return 0xf; +} + +/** +* Populates the SMC MCLK structure using the provided memory clock +* +* @param hwmgr the address of the hardware manager +* @param clock the memory clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ +static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk) +{ + struct pp_atomctrl_memory_clock_param mem_param; + int result; + + result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to get Memory PLL Dividers.",); + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = clock; + mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider; + mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock); + + return result; +} + +static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + + if (table_info->vdd_dep_on_mclk) { + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, clock, + &mem_level->MinVoltage, &mem_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory " + "VDDC voltage dependency table", return result); + } + + mem_level->EnabledForThrottle = 1; + mem_level->EnabledForActivity = 0; + mem_level->UpHyst = 0; + mem_level->DownHyst = 100; + mem_level->VoltageDownHyst = 0; + mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + mem_level->StutterEnable = false; + + mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + /* enable stutter mode if all the follow condition applied + * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI, + * &(data->DisplayTiming.numExistingDisplays)); + */ + data->display_timing.num_existing_displays = 1; + + if ((data->mclk_stutter_mode_threshold) && + (clock <= data->mclk_stutter_mode_threshold) && + (!data->is_uvd_enabled) && + (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + STUTTER_ENABLE) & 0x1)) + mem_level->StutterEnable = true; + + result = fiji_calculate_mclk_params(hwmgr, clock, mem_level); + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); + } + return result; +} + +/** +* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states +* +* @param hwmgr the address of the hardware manager +*/ +static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_dpm_table *dpm_table = &data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t array = data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, MemoryLevel); + uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) * + SMU73_MAX_LEVELS_MEMORY; + struct SMU73_Discrete_MemoryLevel *levels = + data->smc_state_table.MemoryLevel; + uint32_t i; + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", + return -EINVAL); + result = fiji_populate_single_memory_level(hwmgr, + dpm_table->mclk_table.dpm_levels[i].value, + &levels[i]); + if (result) + return result; + } + + /* Only enable level 0 for now. */ + levels[0].EnabledForActivity = 1; + + /* in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in + * a higher state by default such that we are not effected by + * up threshold or and MCLK DPM latency. + */ + levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; + CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); + + data->smc_state_table.MemoryDpmLevelCount = + (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = + fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high */ + levels[dpm_table->mclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change */ + result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, data->sram_end); + + return result; +} + +/** +* Populates the SMC MVDD structure using the provided memory clock. +* +* @param hwmgr the address of the hardware manager +* @param mclk the MCLK value to be used in the decision if MVDD should be high or low. +* @param voltage the SMC VOLTAGE structure to be populated +*/ +int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr, + uint32_t mclk, SMIO_Pattern *smio_pat) +{ + const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { + smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", + return -EINVAL); + } else + return -EINVAL; + + return 0; +} + +static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = 0; + const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct pp_atomctrl_clock_dividers_vi dividers; + SMIO_Pattern vol_level; + uint32_t mvdd; + uint16_t us_mvdd; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (!data->sclk_dpm_key_disabled) { + /* Get MinVoltage and Frequency from DPM0, + * already converted to SMC_UL */ + table->ACPILevel.SclkFrequency = + data->dpm_table.sclk_table.dpm_levels[0].value; + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, + table->ACPILevel.SclkFrequency, + &table->ACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDC voltage value " + "in Clock Dependency Table",); + } else { + table->ACPILevel.SclkFrequency = + data->vbios_boot_state.sclk_bootup_value; + table->ACPILevel.MinVoltage = + data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE; + } + + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2, + SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + if (!data->mclk_dpm_key_disabled) { + /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ + table->MemoryACPILevel.MclkFrequency = + data->dpm_table.mclk_table.dpm_levels[0].value; + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, + table->MemoryACPILevel.MclkFrequency, + &table->MemoryACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDCI voltage value " + "in Clock Dependency Table",); + } else { + table->MemoryACPILevel.MclkFrequency = + data->vbios_boot_state.mclk_bootup_value; + table->MemoryACPILevel.MinVoltage = + data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE; + } + + us_mvdd = 0; + if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) || + (data->mclk_dpm_key_disabled)) + us_mvdd = data->vbios_boot_state.mvdd_bootup_value; + else { + if (!fiji_populate_mvdd_value(hwmgr, + data->dpm_table.mclk_table.dpm_levels[0].value, + &vol_level)) + us_mvdd = vol_level.Voltage; + } + + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + table->MemoryACPILevel.ActivityLevel = + PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = false; + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); + + return result; +} + +static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + table->VceLevelCount = (uint8_t)(mm_table->count); + table->VceBootLevel = 0; + + for(count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage |= + (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + table->VceLevel[count].MinVoltage |= + ((mm_table->entries[count].vddc - data->vddc_vddci_delta) * + VOLTAGE_SCALE) << VDDCI_SHIFT; + table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /*retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); + } + return result; +} + +static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + table->AcpLevelCount = (uint8_t)(mm_table->count); + table->AcpBootLevel = 0; + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = mm_table->entries[count].aclk; + table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - + data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->AcpLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for engine clock", return result); + + table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage); + } + return result; +} + +static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t)(mm_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + /* not sure whether we need evclk or not */ + table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; + table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - + data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); + } + return result; +} + +static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, + int32_t eng_clock, int32_t mem_clock, + struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs) +{ + uint32_t dram_timing; + uint32_t dram_timing2; + uint32_t burstTime; + ULONG state, trrds, trrdl; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + eng_clock, mem_clock); + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME); + + state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0); + trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0); + trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + arb_regs->TRRDS = (uint8_t)trrds; + arb_regs->TRRDL = (uint8_t)trrdl; + + return 0; +} + +static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct SMU73_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + int result = 0; + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = fiji_populate_memory_timing_parameters(hwmgr, + data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + if (result) + break; + } + } + + if (!result) + result = fiji_copy_bytes_to_smc( + hwmgr->smumgr, + data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU73_Discrete_MCArbDramTimingTable), + data->sram_end); + return result; +} + +static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + table->UvdLevelCount = (uint8_t)(mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - + data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); + + } + return result; +} + +static int fiji_find_boot_level(struct fiji_single_dpm_table *table, + uint32_t value, uint32_t *boot_level) +{ + int result = -EINVAL; + uint32_t i; + + for (i = 0; i < table->count; i++) { + if (value == table->dpm_levels[i].value) { + *boot_level = i; + result = 0; + } + } + return result; +} + +static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + int result = 0; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = fiji_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(table->GraphicsBootLevel)); + + result = fiji_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(table->MemoryBootLevel)); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value * + VOLTAGE_SCALE; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return 0; +} + +static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint8_t count, level; + + count = (uint8_t)(table_info->vdd_dep_on_sclk->count); + for (level = 0; level < count; level++) { + if(table_info->vdd_dep_on_sclk->entries[level].clk >= + data->vbios_boot_state.sclk_bootup_value) { + data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(table_info->vdd_dep_on_mclk->count); + for (level = 0; level < count; level++) { + if(table_info->vdd_dep_on_mclk->entries[level].clk >= + data->vbios_boot_state.mclk_bootup_value) { + data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, + volt_with_cks, value; + uint16_t clock_freq_u16; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, + volt_offset = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; + + /* Read SMU_Eefuse to read and calculate RO and determine + * if the part is SS or FF. if RO >= 1660MHz, part is FF. + */ + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (146 * 4)); + efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (148 * 4)); + efuse &= 0xFF000000; + efuse = efuse >> 24; + efuse2 &= 0xF; + + if (efuse2 == 1) + ro = (2300 - 1350) * efuse / 255 + 1350; + else + ro = (2500 - 1000) * efuse / 255 + 1000; + + if (ro >= 1660) + type = 0; + else + type = 1; + + /* Populate Stretch amount */ + data->smc_state_table.ClockStretcherAmount = stretch_amount; + + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + for (i = 0; i < sclk_table->count; i++) { + data->smc_state_table.Sclk_CKS_masterEn0_7 |= + sclk_table->entries[i].cks_enable << i; + volt_without_cks = (uint32_t)((14041 * + (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / + (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); + volt_with_cks = (uint32_t)((13946 * + (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / + (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); + if (volt_without_cks >= volt_with_cks) + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); + data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; + } + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + STRETCH_ENABLE, 0x0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + staticEnable, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x0); + + /* Populate CKS Lookup Table */ + if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) + stretch_amount2 = 0; + else if (stretch_amount == 3 || stretch_amount == 4) + stretch_amount2 = 1; + else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + PP_ASSERT_WITH_CODE(false, + "Stretch Amount in PPTable not supported\n", + return -EINVAL); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL); + value &= 0xFFC2FF87; + data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = + fiji_clock_stretcher_lookup_table[stretch_amount2][0]; + data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = + fiji_clock_stretcher_lookup_table[stretch_amount2][1]; + clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table. + GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1]. + SclkFrequency) / 100); + if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] < + clock_freq_u16 && + fiji_clock_stretcher_lookup_table[stretch_amount2][1] > + clock_freq_u16) { + /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ + value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; + /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ + value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; + /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ + value |= (fiji_clock_stretch_amount_conversion + [fiji_clock_stretcher_lookup_table[stretch_amount2][3]] + [stretch_amount]) << 3; + } + CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].minFreq); + CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].maxFreq); + data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = + fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; + data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= + (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL, value); + + /* Populate DDT Lookup Table */ + for (i = 0; i < 4; i++) { + /* Assign the minimum and maximum VID stored + * in the last row of Clock Stretcher Voltage Table. + */ + data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].minVID = + (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2]; + data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].maxVID = + (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3]; + /* Loop through each SCLK and check the frequency + * to see if it lies within the frequency for clock stretcher. + */ + for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) { + cks_setting = 0; + clock_freq = PP_SMC_TO_HOST_UL( + data->smc_state_table.GraphicsLevel[j].SclkFrequency); + /* Check the allowed frequency against the sclk level[j]. + * Sclk's endianness has already been converted, + * and it's in 10Khz unit, + * as opposed to Data table, which is in Mhz unit. + */ + if (clock_freq >= + (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) { + cks_setting |= 0x2; + if (clock_freq < + (fiji_clock_stretcher_ddt_table[type][i][1]) * 100) + cks_setting |= 0x1; + } + data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2); + } + CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table. + ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); + value &= 0xFFFFFFFE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); + + return 0; +} + +/** +* Populates the SMC VRConfig field in DPM table. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint16_t config; + + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); + + /* Set Vddc Voltage Controller */ + if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + PP_ASSERT_WITH_CODE(false, + "VDDC should be on SVI2 control in merged mode!",); + } + /* Set Vddci Voltage Controller */ + if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } + /* Set Mvdd Voltage Controller */ + if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + config = VR_SMIO_PATTERN_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } + + return 0; +} + +/** +* Initializes the SMC table and uploads it +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data (PowerState) +* @return always 0 +*/ +static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table); + const struct fiji_ulv_parm *ulv = &(data->ulv); + uint8_t i; + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + + result = fiji_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to setup default DPM tables!", return result); + + if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control) + fiji_populate_smc_voltage_tables(hwmgr, table); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) { + result = fiji_populate_ulv_state(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter); + } + + result = fiji_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result); + + result = fiji_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result); + + result = fiji_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result); + + result = fiji_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result); + + result = fiji_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result); + + result = fiji_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result); + + result = fiji_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result); + + /* Since only the initial state is completely set up at this point + * (the other states are just copies of the boot state) we only + * need to populate the ARB settings for the initial state. + */ + result = fiji_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result); + + result = fiji_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result); + + result = fiji_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result); + + result = fiji_populate_smc_initailial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot State!", return result); + + result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate BAPM Parameters!", return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = fiji_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate Clock Stretcher Data Table!", + return result); + } + + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + table_info->cac_dtp_table->usTargetOperatingTemp * + FIJI_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * + FIJI_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ + table->PCIeGenInterval = 1; + + result = fiji_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate VRConfig setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = FIJI_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin)) { + table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = FIJI_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* Thermal Output GPIO */ + if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, + &gpio_pin)) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; + + /* For porlarity read GPIOPAD_A with assigned Gpio pin + * since VBIOS will program this register to set 'inactive state', + * driver can then determine 'active state' from this and + * program SMU with correct polarity + */ + table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & + (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO */ + if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CombinePCCWithThermalSignal)) + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++) + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = fiji_copy_bytes_to_smc(hwmgr->smumgr, + data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController), + data->sram_end); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result); + + return 0; +} + +/** +* Initialize the ARB DRAM timing table's index field. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) +{ + const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t tmp; + int result; + + /* This is a read-modify-write on the first byte of the ARB table. + * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure + * is the field 'current'. + * This solution is ugly, but we never write the whole table only + * individual fields in it. + * In reality this field should not be in that structure + * but in a soft register. + */ + result = fiji_read_smc_sram_dword(hwmgr->smumgr, + data->arb_table_start, &tmp, data->sram_end); + + if (result) + return result; + + tmp &= 0x00FFFFFF; + tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; + + return fiji_write_smc_sram_dword(hwmgr->smumgr, + data->arb_table_start, tmp, data->sram_end); +} + +static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) +{ + if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_EnableVRHotGPIOInterrupt); + + return 0; +} + +static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + SCLK_PWRMGT_OFF, 0); + return 0; +} + +static int fiji_enable_ulv(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_ulv_parm *ulv = &(data->ulv); + + if (ulv->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); + + return 0; +} + +static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) + PP_ASSERT_WITH_CODE(false, + "Attempt to enable Master Deep Sleep switch failed!", + return -1); + } else { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -1); + } + } + + return 0; +} + +static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t val, val0, val2; + uint32_t i, cpl_cntl, cpl_threshold, mc_threshold; + + /* enable SCLK dpm */ + if(!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), + "Failed to enable SCLK DPM during DPM Start Function!", + return -1); + + /* enable MCLK dpm */ + if(0 == data->mclk_dpm_key_disabled) { + cpl_threshold = 0; + mc_threshold = 0; + + /* Read per MCD tile (0 - 7) */ + for (i = 0; i < 8; i++) { + PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i); + val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000; + if (0xf0000000 != val) { + /* count number of MCQ that has channel(s) enabled */ + cpl_threshold++; + /* only harvest 3 or full 4 supported */ + mc_threshold = val ? 3 : 4; + } + } + PP_ASSERT_WITH_CODE(0 != cpl_threshold, + "Number of MCQ is zero!", return -EINVAL;); + + mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) << + LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) | + LCAC_MC0_CNTL__MC0_ENABLE_MASK; + cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) << + LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) | + LCAC_CPL_CNTL__CPL_ENABLE_MASK; + cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT)); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, mc_threshold); + if (8 == cpl_threshold) { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC2_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC3_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC4_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC5_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC6_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC7_CNTL, mc_threshold); + } + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, cpl_cntl); + + udelay(5); + + mc_threshold = mc_threshold | + (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT); + cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, mc_threshold); + if (8 == cpl_threshold) { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC2_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC3_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC4_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC5_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC6_CNTL, mc_threshold); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC7_CNTL, mc_threshold); + } + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, cpl_cntl); + + /* Program CAC_EN per MCD (0-7) Tile */ + val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD); + val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK | + MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK | + MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK | + MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK | + MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK | + MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK | + MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK | + MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK | + MC_CONFIG_MCD__MC_RD_ENABLE_MASK); + + for (i = 0; i < 8; i++) { + /* Enable MCD i Tile read & write */ + val2 = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) | + (1 << i)); + cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2); + /* Enbale CAC_ON MCD i Tile */ + val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL); + val2 |= MC_SEQ_CNTL__CAC_EN_MASK; + cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2); + } + /* Set MC_CONFIG_MCD back to its default setting val0 */ + cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Enable)), + "Failed to enable MCLK DPM during DPM Start Function!", + return -1); + } + return 0; +} + +static int fiji_start_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /*enable general power management */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 1); + /* enable sclk deep sleep */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 1); + /* prepare for PCIE DPM */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + data->soft_regs_start + offsetof(SMU73_SoftRegisters, + VoltageChangeTimeout), 0x1000); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, + SWRST_COMMAND_1, RESETLC, 0x0); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Enable)), + "Failed to enable voltage DPM during DPM Start Function!", + return -1); + + if (fiji_enable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!"); + return -1; + } + + /* enable PCIE dpm */ + if(!data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Enable)), + "Failed to enable pcie DPM during DPM Start Function!", + return -1); + } + + return 0; +} + +static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr, + uint32_t sources) +{ + bool protection; + enum DPM_EVENT_SRC src; + + switch (sources) { + default: + printk(KERN_ERR "Unknown throttling event sources."); + /* fall through */ + case 0: + protection = false; + /* src is unused */ + break; + case (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL; + break; + case (1 << PHM_AutoThrottleSource_External): + protection = true; + src = DPM_EVENT_SRC_EXTERNAL; + break; + case (1 << PHM_AutoThrottleSource_External) | + (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; + break; + } + /* Order matters - don't enable thermal protection for the wrong source. */ + if (protection) { + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, + DPM_EVENT_SRC, src); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)); + } else + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, 1); +} + +static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (!(data->active_auto_throttle_sources & (1 << source))) { + data->active_auto_throttle_sources |= 1 << source; + fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + +static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1; + PP_ASSERT_WITH_CODE(result == 0, + "DPM is already running right now, no need to enable DPM!", + return 0); + + if (fiji_voltage_control(hwmgr)) { + tmp_result = fiji_enable_voltage_control(hwmgr); + PP_ASSERT_WITH_CODE(tmp_result == 0, + "Failed to enable voltage control!", + result = tmp_result); + } + + if (fiji_voltage_control(hwmgr)) { + tmp_result = fiji_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to contruct voltage tables!", + result = tmp_result); + } + + tmp_result = fiji_initialize_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize MC reg table!", result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); + + tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program static screen threshold parameters!", + result = tmp_result); + + tmp_result = fiji_enable_display_gap(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable display gap!", result = tmp_result); + + tmp_result = fiji_program_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program voting clients!", result = tmp_result); + + tmp_result = fiji_process_firmware_header(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to process firmware header!", result = tmp_result); + + tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize switch from ArbF0 to F1!", + result = tmp_result); + + tmp_result = fiji_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize SMC table!", result = tmp_result); + + tmp_result = fiji_init_arb_table_index(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize ARB table index!", result = tmp_result); + + tmp_result = fiji_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate PM fuses!", result = tmp_result); + + tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable VR hot GPIO interrupt!", result = tmp_result); + + tmp_result = tonga_notify_smc_display_change(hwmgr, false); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to notify no display!", result = tmp_result); + + tmp_result = fiji_enable_sclk_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SCLK control!", result = tmp_result); + + tmp_result = fiji_enable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ULV!", result = tmp_result); + + tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable deep sleep master switch!", result = tmp_result); + + tmp_result = fiji_start_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start DPM!", result = tmp_result); + + tmp_result = fiji_enable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SMC CAC!", result = tmp_result); + + tmp_result = fiji_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable power containment!", result = tmp_result); + + tmp_result = fiji_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to power control set level!", result = tmp_result); + + tmp_result = fiji_enable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable thermal auto throttle!", result = tmp_result); + + return result; +} + +static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t level, tmp; + + if (!data->sclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = 0; + tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; + while (tmp >>= 1) + level++; + if (level) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + } + } + + if (!data->mclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + level = 0; + tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + while (tmp >>= 1) + level++; + if (level) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + } + } + + if (!data->pcie_dpm_key_disabled) { + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { + level = 0; + tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + level++; + if (level) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + (1 << level)); + } + } + return 0; +} + +static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_clock_voltage_dependency_table *table = + table_info->vddc_dep_on_dal_pwrl; + struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; + enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; + uint32_t req_vddc = 0, req_volt, i; + + if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW && + dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE)) + return; + + for (i= 0; i < table->count; i++) { + if (dal_power_level == table->entries[i].clk) { + req_vddc = table->entries[i].v; + break; + } + } + + vddc_table = table_info->vdd_dep_on_sclk; + for (i= 0; i < vddc_table->count; i++) { + if (req_vddc <= vddc_table->entries[i].vddc) { + req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE) + << VDDC_SHIFT; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VddC_Request, req_volt); + return; + } + } + printk(KERN_ERR "DAL requested level can not" + " found a available voltage in VDDC DPM Table \n"); +} + +static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + fiji_apply_dal_min_voltage_request(hwmgr); + + if (!data->sclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + } + return 0; +} + +static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (!fiji_is_dpm_running(hwmgr)) + return -EINVAL; + + if (!data->pcie_dpm_key_disabled) { + smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_UnForceLevel); + } + + return fiji_upload_dpmlevel_enable_mask(hwmgr); +} + +static uint32_t fiji_get_lowest_enabled_level( + struct pp_hwmgr *hwmgr, uint32_t mask) +{ + uint32_t level = 0; + + while(0 == (mask & (1 << level))) + level++; + + return level; +} + +static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = + (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t level; + + if (!data->sclk_dpm_key_disabled) + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = fiji_get_lowest_enabled_level(hwmgr, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + + } + + if (!data->mclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + level = fiji_get_lowest_enabled_level(hwmgr, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + } + } + + if (!data->pcie_dpm_key_disabled) { + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { + level = fiji_get_lowest_enabled_level(hwmgr, + data->dpm_level_enable_mask.pcie_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + (1 << level)); + } + } + + return 0; + +} +static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = fiji_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = fiji_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = fiji_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + + return ret; +} + +static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct fiji_power_state); +} + +static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, + void *state, struct pp_power_state *power_state, + void *pp_table, uint32_t classification_flag) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_power_state *fiji_power_state = + (struct fiji_power_state *)(&(power_state->hardware)); + struct fiji_performance_level *performance_level; + ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; + ATOM_Tonga_POWERPLAYTABLE *powerplay_table = + (ATOM_Tonga_POWERPLAYTABLE *)pp_table; + ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = + (ATOM_Tonga_SCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); + ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = + (ATOM_Tonga_MCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); + + /* The following fields are not initialized here: id orderedList allStatesList */ + power_state->classification.ui_label = + (le16_to_cpu(state_entry->usClassification) & + ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> + ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; + power_state->classification.flags = classification_flag; + /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ + + power_state->classification.temporary_state = false; + power_state->classification.to_be_deleted = false; + + power_state->validation.disallowOnDC = + (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Tonga_DISALLOW_ON_DC)); + + power_state->pcie.lanes = 0; + + power_state->display.disableFrameModulation = false; + power_state->display.limitRefreshrate = false; + power_state->display.enableVariBright = + (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Tonga_ENABLE_VARIBRIGHT)); + + power_state->validation.supportedPowerLevels = 0; + power_state->uvd_clocks.VCLK = 0; + power_state->uvd_clocks.DCLK = 0; + power_state->temperatures.min = 0; + power_state->temperatures.max = 0; + + performance_level = &(fiji_power_state->performance_levels + [fiji_power_state->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS), + "Performance levels exceeds SMC limit!", + return -1); + + PP_ASSERT_WITH_CODE( + (fiji_power_state->performance_level_count <= + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -1); + + /* Performance levels are arranged from low to high. */ + performance_level->memory_clock = mclk_dep_table->entries + [state_entry->ucMemoryClockIndexLow].ulMclk; + performance_level->engine_clock = sclk_dep_table->entries + [state_entry->ucEngineClockIndexLow].ulSclk; + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, + state_entry->ucPCIEGenLow); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, + state_entry->ucPCIELaneHigh); + + performance_level = &(fiji_power_state->performance_levels + [fiji_power_state->performance_level_count++]); + performance_level->memory_clock = mclk_dep_table->entries + [state_entry->ucMemoryClockIndexHigh].ulMclk; + performance_level->engine_clock = sclk_dep_table->entries + [state_entry->ucEngineClockIndexHigh].ulSclk; + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, + state_entry->ucPCIEGenHigh); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, + state_entry->ucPCIELaneHigh); + + return 0; +} + +static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct fiji_power_state *ps; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = + table_info->vdd_dep_on_mclk; + + state->hardware.magic = PHM_VIslands_Magic; + + ps = (struct fiji_power_state *)(&state->hardware); + + result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state, + fiji_get_pp_table_entry_callback_func); + + /* This is the earliest time we have all the dependency table and the VBIOS boot state + * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state + * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state + */ + if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { + if (dep_mclk_table->entries[0].clk != + data->vbios_boot_state.mclk_bootup_value) + printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + "does not match VBIOS boot MCLK level"); + if (dep_mclk_table->entries[0].vddci != + data->vbios_boot_state.vddci_bootup_value) + printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + "does not match VBIOS boot VDDCI level"); + } + + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + if (state->classification.flags & PP_StateClassificationFlag_ACPI) + data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; + + ps->uvd_clks.vclk = state->uvd_clocks.VCLK; + ps->uvd_clks.dclk = state->uvd_clocks.DCLK; + + if (!result) { + uint32_t i; + + switch (state->classification.ui_label) { + case PP_StateUILabel_Performance: + data->use_pcie_performance_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_performance.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_performance.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_performance.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_performance.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.min = + ps->performance_levels[i].pcie_lane; + } + break; + case PP_StateUILabel_Battery: + data->use_pcie_power_saving_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_power_saving.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_power_saving.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_power_saving.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_power_saving.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.min = + ps->performance_levels[i].pcie_lane; + } + break; + default: + break; + } + } + return 0; +} + +static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *request_ps, + const struct pp_power_state *current_ps) +{ + struct fiji_power_state *fiji_ps = + cast_phw_fiji_power_state(&request_ps->hardware); + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; + bool disable_mclk_switching; + bool disable_mclk_switching_for_frame_lock; + struct cgs_display_info info = {0}; + const struct phm_clock_and_voltage_limits *max_limits; + uint32_t i; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int32_t count; + int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + + data->battery_state = (PP_StateUILabel_Battery == + request_ps->classification.ui_label); + + PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2, + "VI should always have 2 performance levels",); + + max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + &(hwmgr->dyn_state.max_clock_voltage_on_ac) : + &(hwmgr->dyn_state.max_clock_voltage_on_dc); + + /* Cap clock DPM tables at DC MAX if it is in DC. */ + if (PP_PowerSource_DC == hwmgr->power_source) { + for (i = 0; i < fiji_ps->performance_level_count; i++) { + if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk) + fiji_ps->performance_levels[i].memory_clock = max_limits->mclk; + if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk) + fiji_ps->performance_levels[i].engine_clock = max_limits->sclk; + } + } + + fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; + fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; + + fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk; + + cgs_get_active_displays_info(hwmgr->device, &info); + + /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ + + /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */ + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); + stable_pstate_sclk = (max_limits->sclk * 75) / 100; + + for (count = table_info->vdd_dep_on_sclk->count - 1; + count >= 0; count--) { + if (stable_pstate_sclk >= + table_info->vdd_dep_on_sclk->entries[count].clk) { + stable_pstate_sclk = + table_info->vdd_dep_on_sclk->entries[count].clk; + break; + } + } + + if (count < 0) + stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; + + stable_pstate_mclk = max_limits->mclk; + + minimum_clocks.engineClock = stable_pstate_sclk; + minimum_clocks.memoryClock = stable_pstate_mclk; + } + + if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) + minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; + + if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; + + if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.engineClock), + "Overdrive sclk exceeds limit", + hwmgr->gfx_arbiter.sclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.engineClock); + + if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) + fiji_ps->performance_levels[1].engine_clock = + hwmgr->gfx_arbiter.sclk_over_drive; + } + + if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.memoryClock), + "Overdrive mclk exceeds limit", + hwmgr->gfx_arbiter.mclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.memoryClock); + + if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) + fiji_ps->performance_levels[1].memory_clock = + hwmgr->gfx_arbiter.mclk_over_drive; + } + + disable_mclk_switching_for_frame_lock = phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + + disable_mclk_switching = (1 < info.display_count) || + disable_mclk_switching_for_frame_lock; + + sclk = fiji_ps->performance_levels[0].engine_clock; + mclk = fiji_ps->performance_levels[0].memory_clock; + + if (disable_mclk_switching) + mclk = fiji_ps->performance_levels + [fiji_ps->performance_level_count - 1].memory_clock; + + if (sclk < minimum_clocks.engineClock) + sclk = (minimum_clocks.engineClock > max_limits->sclk) ? + max_limits->sclk : minimum_clocks.engineClock; + + if (mclk < minimum_clocks.memoryClock) + mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? + max_limits->mclk : minimum_clocks.memoryClock; + + fiji_ps->performance_levels[0].engine_clock = sclk; + fiji_ps->performance_levels[0].memory_clock = mclk; + + fiji_ps->performance_levels[1].engine_clock = + (fiji_ps->performance_levels[1].engine_clock >= + fiji_ps->performance_levels[0].engine_clock) ? + fiji_ps->performance_levels[1].engine_clock : + fiji_ps->performance_levels[0].engine_clock; + + if (disable_mclk_switching) { + if (mclk < fiji_ps->performance_levels[1].memory_clock) + mclk = fiji_ps->performance_levels[1].memory_clock; + + fiji_ps->performance_levels[0].memory_clock = mclk; + fiji_ps->performance_levels[1].memory_clock = mclk; + } else { + if (fiji_ps->performance_levels[1].memory_clock < + fiji_ps->performance_levels[0].memory_clock) + fiji_ps->performance_levels[1].memory_clock = + fiji_ps->performance_levels[0].memory_clock; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + for (i = 0; i < fiji_ps->performance_level_count; i++) { + fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk; + fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk; + fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; + fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; + } + } + + return 0; +} + +static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct fiji_power_state *fiji_ps = + cast_const_phw_fiji_power_state(states->pnew_state); + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + uint32_t sclk = fiji_ps->performance_levels + [fiji_ps->performance_level_count - 1].engine_clock; + struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + uint32_t mclk = fiji_ps->performance_levels + [fiji_ps->performance_level_count - 1].memory_clock; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct cgs_display_info info = {0}; + + data->need_update_smu7_dpm_table = 0; + + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (i >= sclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + else { + /* TODO: Check SCLK in DAL's minimum clocks + * in case DeepSleep divider update is required. + */ + if(data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < mclk_table->count; i++) { + if (mclk == mclk_table->dpm_levels[i].value) + break; + } + + if (i >= mclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; + + return 0; +} + +static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr, + const struct fiji_power_state *fiji_ps) +{ + uint32_t i; + uint32_t sclk, max_sclk = 0; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_dpm_table *dpm_table = &data->dpm_table; + + for (i = 0; i < fiji_ps->performance_level_count; i++) { + sclk = fiji_ps->performance_levels[i].engine_clock; + if (max_sclk < sclk) + max_sclk = sclk; + } + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) + return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? + dpm_table->pcie_speed_table.dpm_levels + [dpm_table->pcie_speed_table.count - 1].value : + dpm_table->pcie_speed_table.dpm_levels[i].value); + } + + return 0; +} + +static int fiji_request_link_speed_change_before_state_change( + struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + const struct fiji_power_state *fiji_nps = + cast_const_phw_fiji_power_state(states->pnew_state); + const struct fiji_power_state *fiji_cps = + cast_const_phw_fiji_power_state(states->pcurrent_state); + + uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps); + uint16_t current_link_speed; + + if (data->force_pcie_gen == PP_PCIEGenInvalid) + current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps); + else + current_link_speed = data->force_pcie_gen; + + data->force_pcie_gen = PP_PCIEGenInvalid; + data->pspp_notify_required = false; + if (target_link_speed > current_link_speed) { + switch(target_link_speed) { + case PP_PCIEGen3: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) + break; + data->force_pcie_gen = PP_PCIEGen2; + if (current_link_speed == PP_PCIEGen2) + break; + case PP_PCIEGen2: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) + break; + default: + data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr); + break; + } + } else { + if (target_link_speed < current_link_speed) + data->pspp_notify_required = true; + } + + return 0; +} + +static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_FreezeLevel), + "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + DPMTABLE_OD_UPDATE_MCLK)) { + PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_FreezeLevel), + "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + return 0; +} + +static int fiji_populate_and_upload_sclk_mclk_dpm_levels( + struct pp_hwmgr *hwmgr, const void *input) +{ + int result = 0; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct fiji_power_state *fiji_ps = + cast_const_phw_fiji_power_state(states->pnew_state); + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t sclk = fiji_ps->performance_levels + [fiji_ps->performance_level_count - 1].engine_clock; + uint32_t mclk = fiji_ps->performance_levels + [fiji_ps->performance_level_count - 1].memory_clock; + struct fiji_dpm_table *dpm_table = &data->dpm_table; + + struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table; + uint32_t dpm_count, clock_percent; + uint32_t i; + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + dpm_table->sclk_table.dpm_levels + [dpm_table->sclk_table.count - 1].value = sclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinDCSupport)) { + /* Need to do calculation based on the golden DPM table + * as the Heatmap GPU Clock axis is also based on the default values + */ + PP_ASSERT_WITH_CODE( + (golden_dpm_table->sclk_table.dpm_levels + [golden_dpm_table->sclk_table.count - 1].value != 0), + "Divide by 0!", + return -1); + dpm_count = dpm_table->sclk_table.count < 2 ? + 0 : dpm_table->sclk_table.count - 2; + for (i = dpm_count; i > 1; i--) { + if (sclk > golden_dpm_table->sclk_table.dpm_levels + [golden_dpm_table->sclk_table.count-1].value) { + clock_percent = + ((sclk - golden_dpm_table->sclk_table.dpm_levels + [golden_dpm_table->sclk_table.count-1].value) * 100) / + golden_dpm_table->sclk_table.dpm_levels + [golden_dpm_table->sclk_table.count-1].value; + + dpm_table->sclk_table.dpm_levels[i].value = + golden_dpm_table->sclk_table.dpm_levels[i].value + + (golden_dpm_table->sclk_table.dpm_levels[i].value * + clock_percent)/100; + + } else if (golden_dpm_table->sclk_table.dpm_levels + [dpm_table->sclk_table.count-1].value > sclk) { + clock_percent = + ((golden_dpm_table->sclk_table.dpm_levels + [golden_dpm_table->sclk_table.count - 1].value - sclk) * + 100) / + golden_dpm_table->sclk_table.dpm_levels + [golden_dpm_table->sclk_table.count-1].value; + + dpm_table->sclk_table.dpm_levels[i].value = + golden_dpm_table->sclk_table.dpm_levels[i].value - + (golden_dpm_table->sclk_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->sclk_table.dpm_levels[i].value = + golden_dpm_table->sclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { + dpm_table->mclk_table.dpm_levels + [dpm_table->mclk_table.count - 1].value = mclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinDCSupport)) { + + PP_ASSERT_WITH_CODE( + (golden_dpm_table->mclk_table.dpm_levels + [golden_dpm_table->mclk_table.count-1].value != 0), + "Divide by 0!", + return -1); + dpm_count = dpm_table->mclk_table.count < 2 ? + 0 : dpm_table->mclk_table.count - 2; + for (i = dpm_count; i > 1; i--) { + if (mclk > golden_dpm_table->mclk_table.dpm_levels + [golden_dpm_table->mclk_table.count-1].value) { + clock_percent = ((mclk - + golden_dpm_table->mclk_table.dpm_levels + [golden_dpm_table->mclk_table.count-1].value) * 100) / + golden_dpm_table->mclk_table.dpm_levels + [golden_dpm_table->mclk_table.count-1].value; + + dpm_table->mclk_table.dpm_levels[i].value = + golden_dpm_table->mclk_table.dpm_levels[i].value + + (golden_dpm_table->mclk_table.dpm_levels[i].value * + clock_percent) / 100; + + } else if (golden_dpm_table->mclk_table.dpm_levels + [dpm_table->mclk_table.count-1].value > mclk) { + clock_percent = ((golden_dpm_table->mclk_table.dpm_levels + [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) / + golden_dpm_table->mclk_table.dpm_levels + [golden_dpm_table->mclk_table.count-1].value; + + dpm_table->mclk_table.dpm_levels[i].value = + golden_dpm_table->mclk_table.dpm_levels[i].value - + (golden_dpm_table->mclk_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->mclk_table.dpm_levels[i].value = + golden_dpm_table->mclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { + result = fiji_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + /*populate MCLK dpm table to SMU7 */ + result = fiji_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + return result; +} + +static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr, + struct fiji_single_dpm_table * dpm_table, + uint32_t low_limit, uint32_t high_limit) +{ + uint32_t i; + + for (i = 0; i < dpm_table->count; i++) { + if ((dpm_table->dpm_levels[i].value < low_limit) || + (dpm_table->dpm_levels[i].value > high_limit)) + dpm_table->dpm_levels[i].enabled = false; + else + dpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, + const struct fiji_power_state *fiji_ps) +{ + int result = 0; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t high_limit_count; + + PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1), + "power state did not have any performance level", + return -1); + + high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1; + + fiji_trim_single_dpm_states(hwmgr, + &(data->dpm_table.sclk_table), + fiji_ps->performance_levels[0].engine_clock, + fiji_ps->performance_levels[high_limit_count].engine_clock); + + fiji_trim_single_dpm_states(hwmgr, + &(data->dpm_table.mclk_table), + fiji_ps->performance_levels[0].memory_clock, + fiji_ps->performance_levels[high_limit_count].memory_clock); + + return result; +} + +static int fiji_generate_dpm_level_enable_mask( + struct pp_hwmgr *hwmgr, const void *input) +{ + int result; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + const struct fiji_power_state *fiji_ps = + cast_const_phw_fiji_power_state(states->pnew_state); + + result = fiji_trim_dpm_states(hwmgr, fiji_ps); + if (result) + return result; + + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); + data->dpm_level_enable_mask.mclk_dpm_enable_mask = + fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); + data->last_mclk_dpm_enable_mask = + data->dpm_level_enable_mask.mclk_dpm_enable_mask; + + if (data->uvd_enabled) { + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) + data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; + } + + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); + + return 0; +} + +int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable : + (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable); +} + +int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable? + PPSMC_MSG_VCEDPM_Enable : + PPSMC_MSG_VCEDPM_Disable); +} + +int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable? + PPSMC_MSG_SAMUDPM_Enable : + PPSMC_MSG_SAMUDPM_Disable); +} + +int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable? + PPSMC_MSG_ACPDPM_Enable : + PPSMC_MSG_ACPDPM_Disable); +} + +int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (!bgate) { + data->smc_state_table.UvdBootLevel = 0; + if (table_info->mm_dep_table->count > 0) + data->smc_state_table.UvdBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << data->smc_state_table.UvdBootLevel)); + } + + return fiji_enable_disable_uvd_dpm(hwmgr, !bgate); +} + +int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + const struct fiji_power_state *fiji_nps = + cast_const_phw_fiji_power_state(states->pnew_state); + const struct fiji_power_state *fiji_cps = + cast_const_phw_fiji_power_state(states->pcurrent_state); + + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (fiji_nps->vce_clks.evclk >0 && + (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) { + data->smc_state_table.VceBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + + mm_boot_level_offset = data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)1 << data->smc_state_table.VceBootLevel); + + fiji_enable_disable_vce_dpm(hwmgr, true); + } else if (fiji_nps->vce_clks.evclk == 0 && + fiji_cps != NULL && + fiji_cps->vce_clks.evclk > 0) + fiji_enable_disable_vce_dpm(hwmgr, false); + } + + return 0; +} + +int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (!bgate) { + data->smc_state_table.SamuBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFFFFFF00; + mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (uint32_t)(1 << data->smc_state_table.SamuBootLevel)); + } + + return fiji_enable_disable_samu_dpm(hwmgr, !bgate); +} + +int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (!bgate) { + data->smc_state_table.AcpBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, AcpBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFFFF00FF; + mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_ACPDPM_SetEnabledMask, + (uint32_t)(1 << data->smc_state_table.AcpBootLevel)); + } + + return fiji_enable_disable_acp_dpm(hwmgr, !bgate); +} + +static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = fiji_copy_bytes_to_smc( + hwmgr->smumgr, + data->dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + data->sram_end); + } + + return result; +} + +static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return fiji_program_memory_timing_parameters(hwmgr); + + return 0; +} + +static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + + PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + + PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + data->need_update_smu7_dpm_table = 0; + + return 0; +} + +/* Look up the voltaged based on DAL's requested level. + * and then send the requested VDDC voltage to SMC + */ +static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr) +{ + return; +} + +int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) +{ + int result; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* Apply minimum voltage based on DAL's request level */ + fiji_apply_dal_minimum_voltage_request(hwmgr); + + if (0 == data->sclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, + * we should skip this message. + */ + if (!fiji_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ] " + "Trying to set Enable Mask when DPM is disabled \n"); + + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Sclk Dpm enable Mask failed", return -1); + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, + * we should skip this message. + */ + if (!fiji_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ]" + " Trying to set Enable Mask when DPM is disabled \n"); + + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Mclk Dpm enable Mask failed", return -1); + } + } + + return 0; +} + +static int fiji_notify_link_speed_change_after_state_change( + struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + const struct fiji_power_state *fiji_ps = + cast_const_phw_fiji_power_state(states->pnew_state); + uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps); + uint8_t request; + + if (data->pspp_notify_required) { + if (target_link_speed == PP_PCIEGen3) + request = PCIE_PERF_REQ_GEN3; + else if (target_link_speed == PP_PCIEGen2) + request = PCIE_PERF_REQ_GEN2; + else + request = PCIE_PERF_REQ_GEN1; + + if(request == PCIE_PERF_REQ_GEN1 && + fiji_get_current_pcie_speed(hwmgr) > 0) + return 0; + + if (acpi_pcie_perf_request(hwmgr->device, request, false)) { + if (PP_PCIEGen2 == target_link_speed) + printk("PSPP request to switch to Gen2 from Gen3 Failed!"); + else + printk("PSPP request to switch to Gen1 from Gen2 Failed!"); + } + } + + return 0; +} + +static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr, + const void *input) +{ + int tmp_result, result = 0; + + tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to find DPM states clocks in DPM table!", + result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = + fiji_request_link_speed_change_before_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to request link speed change before state change!", + result = tmp_result); + } + + tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to freeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate and upload SCLK MCLK DPM levels!", + result = tmp_result); + + tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to generate DPM level enabled mask!", + result = tmp_result); + + tmp_result = fiji_update_vce_dpm(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to update VCE DPM!", + result = tmp_result); + + tmp_result = fiji_update_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to update SCLK threshold!", + result = tmp_result); + + tmp_result = fiji_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program memory timing parameters!", + result = tmp_result); + + tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to unfreeze SCLK MCLK DPM!", + result = tmp_result); + + tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to upload DPM level enabled mask!", + result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = + fiji_notify_link_speed_change_after_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to notify link speed change after state change!", + result = tmp_result); + } + + return result; +} + +static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct fiji_power_state *fiji_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + fiji_ps = cast_phw_fiji_power_state(&ps->hardware); + + if (low) + return fiji_ps->performance_levels[0].engine_clock; + else + return fiji_ps->performance_levels + [fiji_ps->performance_level_count-1].engine_clock; +} + +static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct fiji_power_state *fiji_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + fiji_ps = cast_phw_fiji_power_state(&ps->hardware); + + if (low) + return fiji_ps->performance_levels[0].memory_clock; + else + return fiji_ps->performance_levels + [fiji_ps->performance_level_count-1].memory_clock; +} + +static void fiji_print_current_perforce_level( + struct pp_hwmgr *hwmgr, struct seq_file *m) +{ + uint32_t sclk, mclk, activity_percent = 0; + uint32_t offset; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + + sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + + mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", + mclk / 100, sclk / 100); + + offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity); + activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); + activity_percent += 0x80; + activity_percent >>= 8; + + seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); +} + +static int fiji_program_display_gap(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t num_active_displays = 0; + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + uint32_t display_gap2; + uint32_t pre_vbi_time_in_us; + uint32_t frame_time_in_us; + uint32_t ref_clock; + uint32_t refresh_rate = 0; + struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info; + + info.mode_info = &mode_info; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_active_displays = info.display_count; + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, + DISP_GAP, (num_active_displays > 0)? + DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL, display_gap); + + ref_clock = mode_info.ref_clock; + refresh_rate = mode_info.refresh_rate; + + if (refresh_rate == 0) + refresh_rate = 60; + + frame_time_in_us = 1000000 / refresh_rate; + + pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL2, display_gap2); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + data->soft_regs_start + + offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + data->soft_regs_start + + offsetof(SMU73_SoftRegisters, VBlankTimeout), + (frame_time_in_us - pre_vbi_time_in_us)); + + if (num_active_displays == 1) + tonga_notify_smc_display_change(hwmgr, true); + + return 0; +} + +int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + return fiji_program_display_gap(hwmgr); +} + +static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, + uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); +} + +static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, + uint16_t us_max_fan_rpm) +{ + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); +} + +int fiji_dpm_set_interrupt_state(void *private_data, + unsigned src_id, unsigned type, + int enabled) +{ + uint32_t cg_thermal_int; + struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; + + if (hwmgr == NULL) + return -EINVAL; + + switch (type) { + case AMD_THERMAL_IRQ_LOW_TO_HIGH: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + + case AMD_THERMAL_IRQ_HIGH_TO_LOW: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + default: + break; + } + return 0; +} + +int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, + const void *thermal_interrupt_info) +{ + int result; + const struct pp_interrupt_registration_info *info = + (const struct pp_interrupt_registration_info *) + thermal_interrupt_info; + + if (info == NULL) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, + fiji_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, + fiji_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + return 0; +} + +static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + if (mode) { + /* stop auto-manage */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + fiji_fan_ctrl_stop_smc_fan_control(hwmgr); + fiji_fan_ctrl_set_static_mode(hwmgr, mode); + } else + /* restart auto-manage */ + fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr); + + return 0; +} + +static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->fan_ctrl_is_in_default_mode) + return hwmgr->fan_ctrl_default_mode; + else + return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE); +} + +static const struct pp_hwmgr_func fiji_hwmgr_funcs = { + .backend_init = &fiji_hwmgr_backend_init, + .backend_fini = &tonga_hwmgr_backend_fini, + .asic_setup = &fiji_setup_asic_task, + .dynamic_state_management_enable = &fiji_enable_dpm_tasks, + .force_dpm_level = &fiji_dpm_force_dpm_level, + .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries, + .get_power_state_size = &fiji_get_power_state_size, + .get_pp_table_entry = &fiji_get_pp_table_entry, + .patch_boot_state = &fiji_patch_boot_state, + .apply_state_adjust_rules = &fiji_apply_state_adjust_rules, + .power_state_set = &fiji_set_power_state_tasks, + .get_sclk = &fiji_dpm_get_sclk, + .get_mclk = &fiji_dpm_get_mclk, + .print_current_perforce_level = &fiji_print_current_perforce_level, + .powergate_uvd = &fiji_phm_powergate_uvd, + .powergate_vce = &fiji_phm_powergate_vce, + .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating, + .notify_smc_display_config_after_ps_adjustment = + &tonga_notify_smc_display_config_after_ps_adjustment, + .display_config_changed = &fiji_display_configuration_changed_task, + .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output, + .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output, + .get_temperature = fiji_thermal_get_temperature, + .stop_thermal_controller = fiji_thermal_stop_thermal_controller, + .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info, + .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent, + .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default, + .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm, + .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller, + .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, + .set_fan_control_mode = fiji_set_fan_control_mode, + .get_fan_control_mode = fiji_get_fan_control_mode, +}; + +int fiji_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data; + int ret = 0; + + data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + hwmgr->hwmgr_func = &fiji_hwmgr_funcs; + hwmgr->pptable_func = &tonga_pptable_funcs; + pp_fiji_thermal_initialize(hwmgr); + return ret; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h new file mode 100644 index 000000000000..22e273b1c1c5 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h @@ -0,0 +1,361 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _FIJI_HWMGR_H_ +#define _FIJI_HWMGR_H_ + +#include "hwmgr.h" +#include "smu73.h" +#include "smu73_discrete.h" +#include "ppatomctrl.h" +#include "fiji_ppsmc.h" + +#define FIJI_MAX_HARDWARE_POWERLEVELS 2 +#define FIJI_AT_DFLT 30 + +#define FIJI_VOLTAGE_CONTROL_NONE 0x0 +#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2 +#define FIJI_VOLTAGE_CONTROL_MERGED 0x3 + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 + +struct fiji_performance_level { + uint32_t memory_clock; + uint32_t engine_clock; + uint16_t pcie_gen; + uint16_t pcie_lane; +}; + +struct fiji_uvd_clocks { + uint32_t vclk; + uint32_t dclk; +}; + +struct fiji_vce_clocks { + uint32_t evclk; + uint32_t ecclk; +}; + +struct fiji_power_state { + uint32_t magic; + struct fiji_uvd_clocks uvd_clks; + struct fiji_vce_clocks vce_clks; + uint32_t sam_clk; + uint32_t acp_clk; + uint16_t performance_level_count; + bool dc_compatible; + uint32_t sclk_threshold; + struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS]; +}; + +struct fiji_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; + +#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define FIJI_MINIMUM_ENGINE_CLOCK 2500 + +struct fiji_single_dpm_table { + uint32_t count; + struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct fiji_dpm_table { + struct fiji_single_dpm_table sclk_table; + struct fiji_single_dpm_table mclk_table; + struct fiji_single_dpm_table pcie_speed_table; + struct fiji_single_dpm_table vddc_table; + struct fiji_single_dpm_table vddci_table; + struct fiji_single_dpm_table mvdd_table; +}; + +struct fiji_clock_registers { + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t vDLL_CNTL; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_SS1; + uint32_t vMPLL_SS2; +}; + +struct fiji_voltage_smio_registers { + uint32_t vS0_VID_LOWER_SMIO_CNTL; +}; + +#define FIJI_MAX_LEAKAGE_COUNT 8 +struct fiji_leakage_voltage { + uint16_t count; + uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT]; +}; + +struct fiji_vbios_boot_state { + uint16_t mvdd_bootup_value; + uint16_t vddc_bootup_value; + uint16_t vddci_bootup_value; + uint32_t sclk_bootup_value; + uint32_t mclk_bootup_value; + uint16_t pcie_gen_bootup_value; + uint16_t pcie_lane_bootup_value; +}; + +struct fiji_bacos { + uint32_t best_match; + uint32_t baco_flags; + struct fiji_performance_level performance_level; +}; + +/* Ultra Low Voltage parameter structure */ +struct fiji_ulv_parm { + bool ulv_supported; + uint32_t cg_ulv_parameter; + uint32_t ulv_volt_change_delay; + struct fiji_performance_level ulv_power_level; +}; + +struct fiji_display_timing { + uint32_t min_clock_in_sr; + uint32_t num_existing_displays; +}; + +struct fiji_dpmlevel_enable_mask { + uint32_t uvd_dpm_enable_mask; + uint32_t vce_dpm_enable_mask; + uint32_t acp_dpm_enable_mask; + uint32_t samu_dpm_enable_mask; + uint32_t sclk_dpm_enable_mask; + uint32_t mclk_dpm_enable_mask; + uint32_t pcie_dpm_enable_mask; +}; + +struct fiji_pcie_perf_range { + uint16_t max; + uint16_t min; +}; + +struct fiji_hwmgr { + struct fiji_dpm_table dpm_table; + struct fiji_dpm_table golden_dpm_table; + + uint32_t voting_rights_clients0; + uint32_t voting_rights_clients1; + uint32_t voting_rights_clients2; + uint32_t voting_rights_clients3; + uint32_t voting_rights_clients4; + uint32_t voting_rights_clients5; + uint32_t voting_rights_clients6; + uint32_t voting_rights_clients7; + uint32_t static_screen_threshold_unit; + uint32_t static_screen_threshold; + uint32_t voltage_control; + uint32_t vddc_vddci_delta; + + uint32_t active_auto_throttle_sources; + + struct fiji_clock_registers clock_registers; + struct fiji_voltage_smio_registers voltage_smio_registers; + + bool is_memory_gddr5; + uint16_t acpi_vddc; + bool pspp_notify_required; + uint16_t force_pcie_gen; + uint16_t acpi_pcie_gen; + uint32_t pcie_gen_cap; + uint32_t pcie_lane_cap; + uint32_t pcie_spc_cap; + struct fiji_leakage_voltage vddc_leakage; + struct fiji_leakage_voltage Vddci_leakage; + + uint32_t mvdd_control; + uint32_t vddc_mask_low; + uint32_t mvdd_mask_low; + uint16_t max_vddc_in_pptable; + uint16_t min_vddc_in_pptable; + uint16_t max_vddci_in_pptable; + uint16_t min_vddci_in_pptable; + uint32_t mclk_strobe_mode_threshold; + uint32_t mclk_stutter_mode_threshold; + uint32_t mclk_edc_enable_threshold; + uint32_t mclk_edcwr_enable_threshold; + bool is_uvd_enabled; + struct fiji_vbios_boot_state vbios_boot_state; + + bool battery_state; + bool is_tlu_enabled; + + /* ---- SMC SRAM Address of firmware header tables ---- */ + uint32_t sram_end; + uint32_t dpm_table_start; + uint32_t soft_regs_start; + uint32_t mc_reg_table_start; + uint32_t fan_table_start; + uint32_t arb_table_start; + struct SMU73_Discrete_DpmTable smc_state_table; + struct SMU73_Discrete_Ulv ulv_setting; + + /* ---- Stuff originally coming from Evergreen ---- */ + uint32_t vddci_control; + struct pp_atomctrl_voltage_table vddc_voltage_table; + struct pp_atomctrl_voltage_table vddci_voltage_table; + struct pp_atomctrl_voltage_table mvdd_voltage_table; + + uint32_t mgcg_cgtt_local2; + uint32_t mgcg_cgtt_local3; + uint32_t gpio_debug; + uint32_t mc_micro_code_feature; + uint32_t highest_mclk; + uint16_t acpi_vddci; + uint8_t mvdd_high_index; + uint8_t mvdd_low_index; + bool dll_default_on; + bool performance_request_registered; + + /* ---- Low Power Features ---- */ + struct fiji_bacos bacos; + struct fiji_ulv_parm ulv; + + /* ---- CAC Stuff ---- */ + uint32_t cac_table_start; + bool cac_configuration_required; + bool driver_calculate_cac_leakage; + bool cac_enabled; + + /* ---- DPM2 Parameters ---- */ + uint32_t power_containment_features; + bool enable_dte_feature; + bool enable_tdc_limit_feature; + bool enable_pkg_pwr_tracking_feature; + bool disable_uvd_power_tune_feature; + struct fiji_pt_defaults *power_tune_defaults; + struct SMU73_Discrete_PmFuses power_tune_table; + uint32_t dte_tj_offset; + uint32_t fast_watermark_threshold; + + /* ---- Phase Shedding ---- */ + bool vddc_phase_shed_control; + + /* ---- DI/DT ---- */ + struct fiji_display_timing display_timing; + + /* ---- Thermal Temperature Setting ---- */ + struct fiji_dpmlevel_enable_mask dpm_level_enable_mask; + uint32_t need_update_smu7_dpm_table; + uint32_t sclk_dpm_key_disabled; + uint32_t mclk_dpm_key_disabled; + uint32_t pcie_dpm_key_disabled; + uint32_t min_engine_clocks; + struct fiji_pcie_perf_range pcie_gen_performance; + struct fiji_pcie_perf_range pcie_lane_performance; + struct fiji_pcie_perf_range pcie_gen_power_saving; + struct fiji_pcie_perf_range pcie_lane_power_saving; + bool use_pcie_performance_levels; + bool use_pcie_power_saving_levels; + uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS]; + uint32_t mclk_activity_target; + uint32_t mclk_dpm0_activity_target; + uint32_t low_sclk_interrupt_threshold; + uint32_t last_mclk_dpm_enable_mask; + bool uvd_enabled; + + /* ---- Power Gating States ---- */ + bool uvd_power_gated; + bool vce_power_gated; + bool samu_power_gated; + bool acp_power_gated; + bool pg_acp_init; + bool frtc_enabled; + bool frtc_status_changed; +}; + +/* To convert to Q8.8 format for firmware */ +#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256 + +enum Fiji_I2CLineID { + Fiji_I2CLineID_DDC1 = 0x90, + Fiji_I2CLineID_DDC2 = 0x91, + Fiji_I2CLineID_DDC3 = 0x92, + Fiji_I2CLineID_DDC4 = 0x93, + Fiji_I2CLineID_DDC5 = 0x94, + Fiji_I2CLineID_DDC6 = 0x95, + Fiji_I2CLineID_SCLSDA = 0x96, + Fiji_I2CLineID_DDCVGA = 0x97 +}; + +#define Fiji_I2C_DDC1DATA 0 +#define Fiji_I2C_DDC1CLK 1 +#define Fiji_I2C_DDC2DATA 2 +#define Fiji_I2C_DDC2CLK 3 +#define Fiji_I2C_DDC3DATA 4 +#define Fiji_I2C_DDC3CLK 5 +#define Fiji_I2C_SDA 40 +#define Fiji_I2C_SCL 41 +#define Fiji_I2C_DDC4DATA 65 +#define Fiji_I2C_DDC4CLK 66 +#define Fiji_I2C_DDC5DATA 0x48 +#define Fiji_I2C_DDC5CLK 0x49 +#define Fiji_I2C_DDC6DATA 0x4a +#define Fiji_I2C_DDC6CLK 0x4b +#define Fiji_I2C_DDCVGADATA 0x4c +#define Fiji_I2C_DDCVGACLK 0x4d + +#define FIJI_UNUSED_GPIO_PIN 0x7F + +extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); +extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); +extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr); +extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); +extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display); +int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input); +int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); + +#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X) +#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X) + +#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X) +#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X) + +#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X)) +#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X)) + +#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X)) + +#endif /* _FIJI_HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c new file mode 100644 index 000000000000..6efcb2bac45f --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c @@ -0,0 +1,553 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "hwmgr.h" +#include "smumgr.h" +#include "fiji_hwmgr.h" +#include "fiji_powertune.h" +#include "fiji_smumgr.h" +#include "smu73_discrete.h" +#include "pp_debug.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 + +struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { + /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */ + {1, 0xF, 0xFD, + /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */ + 0x19, 5, 45} +}; + +void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t tmp = 0; + + if(table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + fiji_hwmgr->power_tune_defaults = + &fiji_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0]; + + /* Assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + fiji_hwmgr->dte_tj_offset = tmp; + + if (!tmp) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + fiji_hwmgr->fast_watermark_threshold = 100; + + tmp = 1; + fiji_hwmgr->enable_dte_feature = tmp ? false : true; + fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; + fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } +} + +/* PPGen has the gain setting generated in x * 100 unit + * This function is to convert the unit to x * 4096(0x1000) unit. + * This is the unit expected by SMC firmware + */ +static uint16_t scale_fan_gain_settings(uint16_t raw_setting) +{ + uint32_t tmp; + tmp = raw_setting * 4096 / 100; + return (uint16_t)tmp; +} + +static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda) +{ + switch (line) { + case Fiji_I2CLineID_DDC1 : + *scl = Fiji_I2C_DDC1CLK; + *sda = Fiji_I2C_DDC1DATA; + break; + case Fiji_I2CLineID_DDC2 : + *scl = Fiji_I2C_DDC2CLK; + *sda = Fiji_I2C_DDC2DATA; + break; + case Fiji_I2CLineID_DDC3 : + *scl = Fiji_I2C_DDC3CLK; + *sda = Fiji_I2C_DDC3DATA; + break; + case Fiji_I2CLineID_DDC4 : + *scl = Fiji_I2C_DDC4CLK; + *sda = Fiji_I2C_DDC4DATA; + break; + case Fiji_I2CLineID_DDC5 : + *scl = Fiji_I2C_DDC5CLK; + *sda = Fiji_I2C_DDC5DATA; + break; + case Fiji_I2CLineID_DDC6 : + *scl = Fiji_I2C_DDC6CLK; + *sda = Fiji_I2C_DDC6DATA; + break; + case Fiji_I2CLineID_SCLSDA : + *scl = Fiji_I2C_SCL; + *sda = Fiji_I2C_SDA; + break; + case Fiji_I2CLineID_DDCVGA : + *scl = Fiji_I2C_DDCVGACLK; + *sda = Fiji_I2C_DDCVGADATA; + break; + default: + *scl = 0; + *sda = 0; + break; + } +} + +int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_pt_defaults *defaults = data->power_tune_defaults; + SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + struct pp_advance_fan_control_parameters *fan_table= + &hwmgr->thermal_controller.advanceFanControlParameters; + uint8_t uc_scl, uc_sda; + + /* TDP number of fraction bits are changed from 8 to 7 for Fiji + * as requested by SMC team + */ + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 128)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 128)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!",); + + dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase; + + /* The following are for new Fiji Multi-input fan/thermal control */ + dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( + cac_dtp_table->usTargetOperatingTemp * 256); + dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitHotspot * 256); + dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitLiquid1 * 256); + dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitLiquid2 * 256); + dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitVrVddc * 256); + dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitVrMvdd * 256); + dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitPlx * 256); + + dpm_table->FanGainEdge = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainEdge)); + dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHotspot)); + dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainLiquid)); + dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainVrVddc)); + dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainVrMvdd)); + dpm_table->FanGainPlx = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainPlx)); + dpm_table->FanGainHbm = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHbm)); + + dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address; + dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address; + dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address; + dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address; + + get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda); + dpm_table->Liquid_I2C_LineSCL = uc_scl; + dpm_table->Liquid_I2C_LineSDA = uc_sda; + + get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda); + dpm_table->Vr_I2C_LineSCL = uc_scl; + dpm_table->Vr_I2C_LineSDA = uc_sda; + + get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda); + dpm_table->Plx_I2C_LineSCL = uc_scl; + dpm_table->Plx_I2C_LineSDA = uc_sda; + + return 0; +} + +static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_pt_defaults *defaults = data->power_tune_defaults; + + data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; + data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; + data->power_tune_table.SviLoadLineTrimVddC = 3; + data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct fiji_pt_defaults *defaults = data->power_tune_defaults; + + /* TDC number of fraction bits are changed from 8 to 7 + * for Fiji as requested by SMC team + */ + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); + data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->TDC_VDDC_ThrottleReleaseLimitPerc; + data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; + + return 0; +} + +static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_pt_defaults *defaults = data->power_tune_defaults; + uint32_t temp; + + if (fiji_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else { + data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; + data->power_tune_table.LPMLTemperatureMin = + (uint8_t)((temp >> 16) & 0xff); + data->power_tune_table.LPMLTemperatureMax = + (uint8_t)((temp >> 8) & 0xff); + data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); + } + return 0; +} + +static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if( (hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity & (1 << 15)) || + 0 == hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity ) + hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity = hwmgr->thermal_controller. + advanceFanControlParameters.usDefaultFanOutputSensitivity; + + data->power_tune_table.FuzzyFan_PwmSetDelta = + PP_HOST_TO_SMC_US(hwmgr->thermal_controller. + advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + /* int i, min, max; + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd; + uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd; + + min = max = pHiVID[0]; + for (i = 0; i < 8; i++) { + if (0 != pHiVID[i]) { + if (min > pHiVID[i]) + min = pHiVID[i]; + if (max < pHiVID[i]) + max = pHiVID[i]; + } + + if (0 != pLoVID[i]) { + if (min > pLoVID[i]) + min = pLoVID[i]; + if (max < pLoVID[i]) + max = pLoVID[i]; + } + } + + PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed); + data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max; + data->power_tune_table.GnbLPMLMinVid = (uint8_t)min; +*/ + return 0; +} + +static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (fiji_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW6 */ + if (fiji_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (fiji_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + /* DW9-DW12 */ + if (0 != fiji_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW14 */ + if(fiji_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + /* DW15-DW18 */ + if (fiji_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW19 */ + if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW20 */ + if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo " + "Sidd Failed!", return -EINVAL); + + if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&data->power_tune_table, + sizeof(struct SMU73_Discrete_PmFuses), data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC)) { + int smc_result; + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableCac)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable CAC in SMC.", result = -1); + + data->cac_enabled = (0 == smc_result) ? true : false; + } + return result; +} + +int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if(data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PkgPwrSetLimit, n); + return 0; +} + +static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) +{ + return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, + PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); +} + +int fiji_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int smc_result; + int result = 0; + + data->power_containment_features = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (data->enable_dte_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableDTE)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable DTE in SMC.", result = -1;); + if (0 == smc_result) + data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE; + } + + if (data->enable_tdc_limit_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable TDCLimit in SMC.", result = -1;); + if (0 == smc_result) + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (data->enable_pkg_pwr_tracking_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable PkgPwrTracking in SMC.", result = -1;); + if (0 == smc_result) { + struct phm_cac_tdp_table *cac_table = + table_info->cac_dtp_table; + uint32_t default_limit = + (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); + + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + if (fiji_set_power_limit(hwmgr, default_limit)) + printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); + } + } + } + return result; +} + +int fiji_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + int adjust_percent, target_tdp; + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + /* adjustment percentage has already been validated */ + adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + /* SMC requested that target_tdp to be 7 bit fraction in DPM table + * but message to be 8 bit fraction for messages + */ + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); + } + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h new file mode 100644 index 000000000000..55e58200f33a --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h @@ -0,0 +1,66 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef FIJI_POWERTUNE_H +#define FIJI_POWERTUNE_H + +enum fiji_pt_config_reg_type { + FIJI_CONFIGREG_MMR = 0, + FIJI_CONFIGREG_SMC_IND, + FIJI_CONFIGREG_DIDT_IND, + FIJI_CONFIGREG_CACHE, + FIJI_CONFIGREG_MAX +}; + +/* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 + +struct fiji_pt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; + enum fiji_pt_config_reg_type type; +}; + +struct fiji_pt_defaults +{ + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + uint8_t TdcWaterfallCtl; + uint8_t DTEAmbientTempBase; +}; + +void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); +int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); +int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr); +int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr); +int fiji_enable_power_containment(struct pp_hwmgr *hwmgr); +int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); +int fiji_power_control_set_level(struct pp_hwmgr *hwmgr); + +#endif /* FIJI_POWERTUNE_H */ + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c new file mode 100644 index 000000000000..e76a7de9aa32 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c @@ -0,0 +1,687 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include "fiji_thermal.h" +#include "fiji_hwmgr.h" +#include "fiji_smumgr.h" +#include "fiji_ppsmc.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + +int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info) +{ + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + fan_speed_info->supports_percent_read = true; + fan_speed_info->supports_percent_write = true; + fan_speed_info->min_percent = 0; + fan_speed_info->max_percent = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM) && + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + fan_speed_info->supports_rpm_read = true; + fan_speed_info->supports_rpm_write = true; + fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; + fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; + } else { + fan_speed_info->min_rpm = 0; + fan_speed_info->max_rpm = 0; + } + + return 0; +} + +int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t *speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_STATUS, FDO_PWM_DUTY); + + if (duty100 == 0) + return -EINVAL; + + + tmp64 = (uint64_t)duty * 100; + do_div(tmp64, duty100); + *speed = (uint32_t)tmp64; + + if (*speed > 100) + *speed = 100; + + return 0; +} + +int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + uint32_t tach_period; + uint32_t crystal_clock_freq; + + if (hwmgr->thermal_controller.fanInfo.bNoFan || + (hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution == 0)) + return 0; + + tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_STATUS, TACH_PERIOD); + + if (tach_period == 0) + return -EINVAL; + + crystal_clock_freq = tonga_get_xclk(hwmgr); + + *speed = 60 * crystal_clock_freq * 10000/ tach_period; + + return 0; +} + +/** +* Set Fan Speed Control to static mode, so that the user can decide what speed to use. +* @param hwmgr the address of the powerplay hardware manager. +* mode the fan control mode, 0 default, 1 by percent, 5, by RPM +* @exception Should always succeed. +*/ +int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + + if (hwmgr->fan_ctrl_is_in_default_mode) { + hwmgr->fan_ctrl_default_mode = + PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE); + hwmgr->tmin = + PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, TMIN); + hwmgr->fan_ctrl_is_in_default_mode = false; + } + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, TMIN, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE, mode); + + return 0; +} + +/** +* Reset Fan Speed Control to default mode. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Should always succeed. +*/ +int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->fan_ctrl_is_in_default_mode) { + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, TMIN, hwmgr->tmin); + hwmgr->fan_ctrl_is_in_default_mode = true; + } + + return 0; +} + +int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + int result; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODFuzzyFanControlSupport)) { + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); + result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM)) + hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM); + else + hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr, + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanPWM); + + } else { + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); + result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); + } + + if (!result && hwmgr->thermal_controller. + advanceFanControlParameters.ucTargetTemperature) + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanTemperatureTarget, + hwmgr->thermal_controller. + advanceFanControlParameters.ucTargetTemperature); + + return result; +} + + +int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); +} + +/** +* Set Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (0% - 100%) to be set. +* @exception Fails is the 100% setting appears to be 0. +*/ +int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (speed > 100) + speed = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + fiji_fan_ctrl_stop_smc_fan_control(hwmgr); + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (uint64_t)speed * 100; + do_div(tmp64, duty100); + duty = (uint32_t)tmp64; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); + + return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reset Fan Speed to default. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Always succeeds. +*/ +int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) +{ + int result; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) { + result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + if (!result) + result = fiji_fan_ctrl_start_smc_fan_control(hwmgr); + } else + result = fiji_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (min - max) to be set. +* @exception Fails is the speed not lie between min and max. +*/ +int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + uint32_t tach_period; + uint32_t crystal_clock_freq; + + if (hwmgr->thermal_controller.fanInfo.bNoFan || + (hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution == 0) || + (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || + (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) + return 0; + + crystal_clock_freq = tonga_get_xclk(hwmgr); + + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_STATUS, TACH_PERIOD, tach_period); + + return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reads the remote temperature from the SIslands thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int temp; + + temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_MULT_THERMAL_STATUS, CTF_TEMP); + + /* Bit 9 means the reading is lower than the lowest usable value. */ + if (temp & 0x200) + temp = FIJI_THERMAL_MAXIMUM_TEMP_READING; + else + temp = temp & 0x1ff; + + temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return temp; +} + +/** +* Set the requested temperature range for high and low alert signals +* +* @param hwmgr The address of the hardware manager. +* @param range Temperature range to be programmed for high and low alert signals +* @exception PP_Result_BadInput if the input data is not valid. +*/ +static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + uint32_t low_temp, uint32_t high_temp) +{ + uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + if (low < low_temp) + low = low_temp; + if (high > high_temp) + high = high_temp; + + if (low > high) + return -EINVAL; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, DIG_THERM_INTH, + (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, DIG_THERM_INTL, + (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_CTRL, DIG_THERM_DPM, + (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + + return 0; +} + +/** +* Programs thermal controller one-time setting registers +* +* @param hwmgr The address of the hardware manager. +*/ +static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_CTRL, EDGE_PER_REV, + hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution - 1); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); + + return 0; +} + +/** +* Enable thermal alerts on the RV770 thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, THERM_INT_MASK); + alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to enable internal thermal interrupts */ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable); +} + +/** +* Disable thermal alerts on the RV770 thermal controller. +* @param hwmgr The address of the hardware manager. +*/ +static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, THERM_INT_MASK); + alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to disable internal thermal interrupts */ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable); +} + +/** +* Uninitialize the thermal controller. +* Currently just disables alerts. +* @param hwmgr The address of the hardware manager. +*/ +int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + int result = fiji_thermal_disable_alert(hwmgr); + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + fiji_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (data->fan_table_start == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + + if (duty100 == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. + usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - + hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr-> + thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = tonga_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr-> + thermal_controller.advanceFanControlParameters.ulCycleDelay * + reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD( + hwmgr->device, CGS_IND_REG__SMC, + CG_MULT_THERMAL_CTRL, TEMP_SEL); + + res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, + (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), + data->sram_end); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanMinPwm, + hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanSclkTarget, + hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit); + + if (res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + return 0; +} + +/** +* Start the fan control on the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ +/* If the fantable setup has failed we could have disabled + * PHM_PlatformCaps_MicrocodeFanControl even after + * this function was included in the table. + * Make sure that we still think controlling the fan is OK. +*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) { + fiji_fan_ctrl_start_smc_fan_control(hwmgr); + fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + } + + return 0; +} + +/** +* Set temperature range for high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + + if (range == NULL) + return -EINVAL; + + return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max); +} + +/** +* Programs one-time setting registers +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from initialize thermal controller routine +*/ +int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return fiji_thermal_initialize(hwmgr); +} + +/** +* Enable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from enable alert routine +*/ +int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return fiji_thermal_enable_alert(hwmgr); +} + +/** +* Disable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from disable alert routine +*/ +static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return fiji_thermal_disable_alert(hwmgr); +} + +static struct phm_master_table_item +fiji_thermal_start_thermal_controller_master_list[] = { + {NULL, tf_fiji_thermal_initialize}, + {NULL, tf_fiji_thermal_set_temperature_range}, + {NULL, tf_fiji_thermal_enable_alert}, +/* We should restrict performance levels to low before we halt the SMC. + * On the other hand we are still in boot state when we do this + * so it would be pointless. + * If this assumption changes we have to revisit this table. + */ + {NULL, tf_fiji_thermal_setup_fan_table}, + {NULL, tf_fiji_thermal_start_smc_fan_control}, + {NULL, NULL} +}; + +static struct phm_master_table_header +fiji_thermal_start_thermal_controller_master = { + 0, + PHM_MasterTableFlag_None, + fiji_thermal_start_thermal_controller_master_list +}; + +static struct phm_master_table_item +fiji_thermal_set_temperature_range_master_list[] = { + {NULL, tf_fiji_thermal_disable_alert}, + {NULL, tf_fiji_thermal_set_temperature_range}, + {NULL, tf_fiji_thermal_enable_alert}, + {NULL, NULL} +}; + +struct phm_master_table_header +fiji_thermal_set_temperature_range_master = { + 0, + PHM_MasterTableFlag_None, + fiji_thermal_set_temperature_range_master_list +}; + +int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + fiji_fan_ctrl_set_default_mode(hwmgr); + return 0; +} + +/** +* Initializes the thermal controller related functions in the Hardware Manager structure. +* @param hwmgr The address of the hardware manager. +* @exception Any error code from the low-level communication. +*/ +int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + int result; + + result = phm_construct_table(hwmgr, + &fiji_thermal_set_temperature_range_master, + &(hwmgr->set_temperature_range)); + + if (!result) { + result = phm_construct_table(hwmgr, + &fiji_thermal_start_thermal_controller_master, + &(hwmgr->start_thermal_controller)); + if (result) + phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); + } + + if (!result) + hwmgr->fan_ctrl_is_in_default_mode = true; + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h new file mode 100644 index 000000000000..8621493b8574 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h @@ -0,0 +1,62 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef FIJI_THERMAL_H +#define FIJI_THERMAL_H + +#include "hwmgr.h" + +#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1 +#define FIJI_THERMAL_LOW_ALERT_MASK 0x2 + +#define FIJI_THERMAL_MINIMUM_TEMP_READING -256 +#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255 + +#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0 +#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + + +extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); +extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); +extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); + +extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); +extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); +extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); +extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr); +extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); +extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); +extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c new file mode 100644 index 000000000000..9deadabbc81c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c @@ -0,0 +1,155 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include "hwmgr.h" + +static int phm_run_table(struct pp_hwmgr *hwmgr, + struct phm_runtime_table_header *rt_table, + void *input, + void *output, + void *temp_storage) +{ + int result = 0; + phm_table_function *function; + + for (function = rt_table->function_list; NULL != *function; function++) { + int tmp = (*function)(hwmgr, input, output, temp_storage, result); + + if (tmp == PP_Result_TableImmediateExit) + break; + if (tmp) { + if (0 == result) + result = tmp; + if (rt_table->exit_error) + break; + } + } + + return result; +} + +int phm_dispatch_table(struct pp_hwmgr *hwmgr, + struct phm_runtime_table_header *rt_table, + void *input, void *output) +{ + int result = 0; + void *temp_storage = NULL; + + if (hwmgr == NULL || rt_table == NULL || rt_table->function_list == NULL) { + printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n"); + return 0; /*temp return ture because some function not implement on some asic */ + } + + if (0 != rt_table->storage_size) { + temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL); + if (temp_storage == NULL) { + printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n"); + return -ENOMEM; + } + } + + result = phm_run_table(hwmgr, rt_table, input, output, temp_storage); + + if (NULL != temp_storage) + kfree(temp_storage); + + return result; +} + +int phm_construct_table(struct pp_hwmgr *hwmgr, + struct phm_master_table_header *master_table, + struct phm_runtime_table_header *rt_table) +{ + uint32_t function_count = 0; + const struct phm_master_table_item *table_item; + uint32_t size; + phm_table_function *run_time_list; + phm_table_function *rtf; + + if (hwmgr == NULL || master_table == NULL || rt_table == NULL) { + printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n"); + return -EINVAL; + } + + for (table_item = master_table->master_list; + NULL != table_item->tableFunction; table_item++) { + if ((NULL == table_item->isFunctionNeededInRuntimeTable) || + (table_item->isFunctionNeededInRuntimeTable(hwmgr))) + function_count++; + } + + size = (function_count + 1) * sizeof(phm_table_function); + run_time_list = kzalloc(size, GFP_KERNEL); + + if (NULL == run_time_list) + return -ENOMEM; + + rtf = run_time_list; + for (table_item = master_table->master_list; + NULL != table_item->tableFunction; table_item++) { + if ((rtf - run_time_list) > function_count) { + printk(KERN_ERR "[ powerplay ] Check function results have changed\n"); + kfree(run_time_list); + return -EINVAL; + } + + if ((NULL == table_item->isFunctionNeededInRuntimeTable) || + (table_item->isFunctionNeededInRuntimeTable(hwmgr))) { + *(rtf++) = table_item->tableFunction; + } + } + + if ((rtf - run_time_list) > function_count) { + printk(KERN_ERR "[ powerplay ] Check function results have changed\n"); + kfree(run_time_list); + return -EINVAL; + } + + *rtf = NULL; + rt_table->function_list = run_time_list; + rt_table->exit_error = (0 != (master_table->flags & PHM_MasterTableFlag_ExitOnError)); + rt_table->storage_size = master_table->storage_size; + return 0; +} + +int phm_destroy_table(struct pp_hwmgr *hwmgr, + struct phm_runtime_table_header *rt_table) +{ + if (hwmgr == NULL || rt_table == NULL) { + printk(KERN_ERR "[ powerplay ] Invalid Parameter\n"); + return -EINVAL; + } + + if (NULL == rt_table->function_list) + return 0; + + kfree(rt_table->function_list); + + rt_table->function_list = NULL; + rt_table->storage_size = 0; + rt_table->exit_error = false; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c new file mode 100644 index 000000000000..0f2d5e4bc241 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -0,0 +1,334 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include "hwmgr.h" +#include "hardwaremanager.h" +#include "power_state.h" +#include "pp_acpi.h" +#include "amd_acpi.h" +#include "amd_powerplay.h" + +#define PHM_FUNC_CHECK(hw) \ + do { \ + if ((hw) == NULL || (hw)->hwmgr_func == NULL) \ + return -EINVAL; \ + } while (0) + +void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr) +{ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress); + + if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) && + acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); +} + +bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr) +{ + return hwmgr->block_hw_access; +} + +int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block) +{ + hwmgr->block_hw_access = block; + return 0; +} + +int phm_setup_asic(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) { + if (NULL != hwmgr->hwmgr_func->asic_setup) + return hwmgr->hwmgr_func->asic_setup(hwmgr); + } else { + return phm_dispatch_table(hwmgr, &(hwmgr->setup_asic), + NULL, NULL); + } + + return 0; +} + +int phm_power_down_asic(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) { + if (NULL != hwmgr->hwmgr_func->power_off_asic) + return hwmgr->hwmgr_func->power_off_asic(hwmgr); + } else { + return phm_dispatch_table(hwmgr, &(hwmgr->power_down_asic), + NULL, NULL); + } + + return 0; +} + +int phm_set_power_state(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *pcurrent_state, + const struct pp_hw_power_state *pnew_power_state) +{ + struct phm_set_power_state_input states; + + PHM_FUNC_CHECK(hwmgr); + + states.pcurrent_state = pcurrent_state; + states.pnew_state = pnew_power_state; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) { + if (NULL != hwmgr->hwmgr_func->power_state_set) + return hwmgr->hwmgr_func->power_state_set(hwmgr, &states); + } else { + return phm_dispatch_table(hwmgr, &(hwmgr->set_power_state), &states, NULL); + } + + return 0; +} + +int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) { + if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) + return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); + } else { + return phm_dispatch_table(hwmgr, + &(hwmgr->enable_dynamic_state_management), + NULL, NULL); + } + return 0; +} + +int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->force_dpm_level != NULL) + return hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); + + return 0; +} + +int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *adjusted_ps, + const struct pp_power_state *current_ps) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL) + return hwmgr->hwmgr_func->apply_state_adjust_rules( + hwmgr, + adjusted_ps, + current_ps); + return 0; +} + +int phm_powerdown_uvd(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->powerdown_uvd != NULL) + return hwmgr->hwmgr_func->powerdown_uvd(hwmgr); + return 0; +} + +int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->powergate_uvd != NULL) + return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); + return 0; +} + +int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->powergate_vce != NULL) + return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); + return 0; +} + +int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) { + if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating) + return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr); + } else { + return phm_dispatch_table(hwmgr, &(hwmgr->enable_clock_power_gatings), NULL, NULL); + } + return 0; +} + +int phm_display_configuration_changed(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) { + if (NULL != hwmgr->hwmgr_func->display_config_changed) + hwmgr->hwmgr_func->display_config_changed(hwmgr); + } else + return phm_dispatch_table(hwmgr, &hwmgr->display_configuration_changed, NULL, NULL); + return 0; +} + +int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) + if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment) + hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr); + + return 0; +} + +int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->stop_thermal_controller == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->stop_thermal_controller(hwmgr); +} + +int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->register_internal_thermal_interrupt == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info); +} + +/** +* Initializes the thermal controller subsystem. +* +* @param pHwMgr the address of the powerplay hardware manager. +* @param pTemperatureRange the address of the structure holding the temperature range. +* @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher. +*/ +int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range) +{ + return phm_dispatch_table(hwmgr, &(hwmgr->start_thermal_controller), temperature_range, NULL); +} + + +bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr); +} + + +int phm_check_states_equal(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *pstate1, + const struct pp_hw_power_state *pstate2, + bool *equal) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->check_states_equal == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->check_states_equal(hwmgr, pstate1, pstate2, equal); +} + +int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, + const struct amd_pp_display_configuration *display_config) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->store_cc6_data == NULL) + return -EINVAL; + + hwmgr->display_config = *display_config; + /* to do pass other display configuration in furture */ + + if (hwmgr->hwmgr_func->store_cc6_data) + hwmgr->hwmgr_func->store_cc6_data(hwmgr, + display_config->cpu_pstate_separation_time, + display_config->cpu_cc6_disable, + display_config->cpu_pstate_disable, + display_config->nb_pstate_switch_disable); + + return 0; +} + +int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, + struct amd_pp_dal_clock_info *info) +{ + PHM_FUNC_CHECK(hwmgr); + + if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info); +} + +int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->set_cpu_power_state != NULL) + return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr); + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c new file mode 100644 index 000000000000..5fb98aa2e719 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -0,0 +1,563 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "linux/delay.h" +#include +#include +#include +#include "cgs_common.h" +#include "power_state.h" +#include "hwmgr.h" +#include "pppcielanes.h" +#include "pp_debug.h" +#include "ppatomctrl.h" + +extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); +extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); +extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr); + +int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) +{ + struct pp_hwmgr *hwmgr; + + if ((handle == NULL) || (pp_init == NULL)) + return -EINVAL; + + hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL); + if (hwmgr == NULL) + return -ENOMEM; + + handle->hwmgr = hwmgr; + hwmgr->smumgr = handle->smu_mgr; + hwmgr->device = pp_init->device; + hwmgr->chip_family = pp_init->chip_family; + hwmgr->chip_id = pp_init->chip_id; + hwmgr->hw_revision = pp_init->rev_id; + hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; + hwmgr->power_source = PP_PowerSource_AC; + + switch (hwmgr->chip_family) { + case AMD_FAMILY_CZ: + cz_hwmgr_init(hwmgr); + break; + case AMD_FAMILY_VI: + switch (hwmgr->chip_id) { + case CHIP_TONGA: + tonga_hwmgr_init(hwmgr); + break; + case CHIP_FIJI: + fiji_hwmgr_init(hwmgr); + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + phm_init_dynamic_caps(hwmgr); + + return 0; +} + +int hwmgr_fini(struct pp_hwmgr *hwmgr) +{ + if (hwmgr == NULL || hwmgr->ps == NULL) + return -EINVAL; + + kfree(hwmgr->ps); + kfree(hwmgr); + return 0; +} + +int hw_init_power_state_table(struct pp_hwmgr *hwmgr) +{ + int result; + unsigned int i; + unsigned int table_entries; + struct pp_power_state *state; + int size; + + if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL) + return -EINVAL; + + if (hwmgr->hwmgr_func->get_power_state_size == NULL) + return -EINVAL; + + hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); + + hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + + sizeof(struct pp_power_state); + + hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL); + + if (hwmgr->ps == NULL) + return -ENOMEM; + + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); + + if (state->classification.flags & PP_StateClassificationFlag_Boot) { + hwmgr->boot_ps = state; + hwmgr->current_ps = hwmgr->request_ps = state; + } + + state->id = i + 1; /* assigned unique num for every power state id */ + + if (state->classification.flags & PP_StateClassificationFlag_Uvd) + hwmgr->uvd_ps = state; + state = (struct pp_power_state *)((unsigned long)state + size); + } + + return 0; +} + + +/** + * Returns once the part of the register indicated by the mask has + * reached the given value. + */ +int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, + uint32_t value, uint32_t mask) +{ + uint32_t i; + uint32_t cur_value; + + if (hwmgr == NULL || hwmgr->device == NULL) { + printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); + return -EINVAL; + } + + for (i = 0; i < hwmgr->usec_timeout; i++) { + cur_value = cgs_read_register(hwmgr->device, index); + if ((cur_value & mask) == (value & mask)) + break; + udelay(1); + } + + /* timeout means wrong logic*/ + if (i == hwmgr->usec_timeout) + return -1; + return 0; +} + +int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, + uint32_t index, uint32_t value, uint32_t mask) +{ + uint32_t i; + uint32_t cur_value; + + if (hwmgr == NULL || hwmgr->device == NULL) { + printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); + return -EINVAL; + } + + for (i = 0; i < hwmgr->usec_timeout; i++) { + cur_value = cgs_read_register(hwmgr->device, index); + if ((cur_value & mask) != (value & mask)) + break; + udelay(1); + } + + /* timeout means wrong logic*/ + if (i == hwmgr->usec_timeout) + return -1; + return 0; +} + + +/** + * Returns once the part of the register indicated by the mask has + * reached the given value.The indirect space is described by giving + * the memory-mapped index of the indirect index register. + */ +void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, + uint32_t indirect_port, + uint32_t index, + uint32_t value, + uint32_t mask) +{ + if (hwmgr == NULL || hwmgr->device == NULL) { + printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); + return; + } + + cgs_write_register(hwmgr->device, indirect_port, index); + phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); +} + +void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr, + uint32_t indirect_port, + uint32_t index, + uint32_t value, + uint32_t mask) +{ + if (hwmgr == NULL || hwmgr->device == NULL) { + printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); + return; + } + + cgs_write_register(hwmgr->device, indirect_port, index); + phm_wait_for_register_unequal(hwmgr, indirect_port + 1, + value, mask); +} + +bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr) +{ + return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating); +} + +bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr) +{ + return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating); +} + + +int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table) +{ + uint32_t i, j; + uint16_t vvalue; + bool found = false; + struct pp_atomctrl_voltage_table *table; + + PP_ASSERT_WITH_CODE((NULL != vol_table), + "Voltage Table empty.", return -EINVAL); + + table = kzalloc(sizeof(struct pp_atomctrl_voltage_table), + GFP_KERNEL); + + if (NULL == table) + return -EINVAL; + + table->mask_low = vol_table->mask_low; + table->phase_delay = vol_table->phase_delay; + + for (i = 0; i < vol_table->count; i++) { + vvalue = vol_table->entries[i].value; + found = false; + + for (j = 0; j < table->count; j++) { + if (vvalue == table->entries[j].value) { + found = true; + break; + } + } + + if (!found) { + table->entries[table->count].value = vvalue; + table->entries[table->count].smio_low = + vol_table->entries[i].smio_low; + table->count++; + } + } + + memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table)); + kfree(table); + + return 0; +} + +int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, + phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + uint32_t i; + int result; + + PP_ASSERT_WITH_CODE((0 != dep_table->count), + "Voltage Dependency Table empty.", return -EINVAL); + + PP_ASSERT_WITH_CODE((NULL != vol_table), + "vol_table empty.", return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < dep_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].mvdd; + vol_table->entries[i].smio_low = 0; + } + + result = phm_trim_voltage_table(vol_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to trim MVDD table.", return result); + + return 0; +} + +int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, + phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + uint32_t i; + int result; + + PP_ASSERT_WITH_CODE((0 != dep_table->count), + "Voltage Dependency Table empty.", return -EINVAL); + + PP_ASSERT_WITH_CODE((NULL != vol_table), + "vol_table empty.", return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < dep_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].vddci; + vol_table->entries[i].smio_low = 0; + } + + result = phm_trim_voltage_table(vol_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to trim VDDCI table.", return result); + + return 0; +} + +int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, + phm_ppt_v1_voltage_lookup_table *lookup_table) +{ + int i = 0; + + PP_ASSERT_WITH_CODE((0 != lookup_table->count), + "Voltage Lookup Table empty.", return -EINVAL); + + PP_ASSERT_WITH_CODE((NULL != vol_table), + "vol_table empty.", return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + + vol_table->count = lookup_table->count; + + for (i = 0; i < vol_table->count; i++) { + vol_table->entries[i].value = lookup_table->entries[i].us_vdd; + vol_table->entries[i].smio_low = 0; + } + + return 0; +} + +void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, + struct pp_atomctrl_voltage_table *vol_table) +{ + unsigned int i, diff; + + if (vol_table->count <= max_vol_steps) + return; + + diff = vol_table->count - max_vol_steps; + + for (i = 0; i < max_vol_steps; i++) + vol_table->entries[i] = vol_table->entries[i + diff]; + + vol_table->count = max_vol_steps; + + return; +} + +int phm_reset_single_dpm_table(void *table, + uint32_t count, int max) +{ + int i; + + struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; + + PP_ASSERT_WITH_CODE(count <= max, + "Fatal error, can not set up single DPM table entries to exceed max number!", + ); + + dpm_table->count = count; + for (i = 0; i < max; i++) + dpm_table->dpm_level[i].enabled = false; + + return 0; +} + +void phm_setup_pcie_table_entry( + void *table, + uint32_t index, uint32_t pcie_gen, + uint32_t pcie_lanes) +{ + struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; + dpm_table->dpm_level[index].value = pcie_gen; + dpm_table->dpm_level[index].param1 = pcie_lanes; + dpm_table->dpm_level[index].enabled = 1; +} + +int32_t phm_get_dpm_level_enable_mask_value(void *table) +{ + int32_t i; + int32_t mask = 0; + struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; + + for (i = dpm_table->count; i > 0; i--) { + mask = mask << 1; + if (dpm_table->dpm_level[i - 1].enabled) + mask |= 0x1; + else + mask &= 0xFFFFFFFE; + } + + return mask; +} + +uint8_t phm_get_voltage_index( + struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage) +{ + uint8_t count = (uint8_t) (lookup_table->count); + uint8_t i; + + PP_ASSERT_WITH_CODE((NULL != lookup_table), + "Lookup Table empty.", return 0); + PP_ASSERT_WITH_CODE((0 != count), + "Lookup Table empty.", return 0); + + for (i = 0; i < lookup_table->count; i++) { + /* find first voltage equal or bigger than requested */ + if (lookup_table->entries[i].us_vdd >= voltage) + return i; + } + /* voltage is bigger than max voltage in the table */ + return i - 1; +} + +uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci) +{ + uint32_t i; + + for (i = 0; i < vddci_table->count; i++) { + if (vddci_table->entries[i].value >= vddci) + return vddci_table->entries[i].value; + } + + PP_ASSERT_WITH_CODE(false, + "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", + return vddci_table->entries[i].value); +} + +int phm_find_boot_level(void *table, + uint32_t value, uint32_t *boot_level) +{ + int result = -EINVAL; + uint32_t i; + struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; + + for (i = 0; i < dpm_table->count; i++) { + if (value == dpm_table->dpm_level[i].value) { + *boot_level = i; + result = 0; + } + } + + return result; +} + +int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + uint16_t virtual_voltage_id, int32_t *sclk) +{ + uint8_t entryId; + uint8_t voltageId; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL); + + /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ + for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) { + voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd; + if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) + break; + } + + PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count, + "Can't find requested voltage id in vdd_dep_on_sclk table!", + return -EINVAL; + ); + + *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk; + + return 0; +} + +/** + * Initialize Dynamic State Adjustment Rule Settings + * + * @param hwmgr the address of the powerplay hardware manager. + */ +int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) +{ + uint32_t table_size; + struct phm_clock_voltage_dependency_table *table_clk_vlt; + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + /* initialize vddc_dep_on_dal_pwrl table */ + table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); + table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + + if (NULL == table_clk_vlt) { + printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); + return -ENOMEM; + } else { + table_clk_vlt->count = 4; + table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; + table_clk_vlt->entries[0].v = 0; + table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; + table_clk_vlt->entries[1].v = 720; + table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; + table_clk_vlt->entries[2].v = 810; + table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; + table_clk_vlt->entries[3].v = 900; + pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt; + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; + } + + return 0; +} + +int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; + } + + if (NULL != hwmgr->backend) { + kfree(hwmgr->backend); + hwmgr->backend = NULL; + } + + return 0; +} + +uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask) +{ + uint32_t level = 0; + + while (0 == (mask & (1 << level))) + level++; + + return level; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h new file mode 100644 index 000000000000..c9e6c2d80ea6 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h @@ -0,0 +1,105 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef PP_HWMGR_PPT_H +#define PP_HWMGR_PPT_H + +#include "hardwaremanager.h" +#include "smumgr.h" +#include "atom-types.h" + +struct phm_ppt_v1_clock_voltage_dependency_record { + uint32_t clk; + uint8_t vddInd; + uint16_t vdd_offset; + uint16_t vddc; + uint16_t vddgfx; + uint16_t vddci; + uint16_t mvdd; + uint8_t phases; + uint8_t cks_enable; + uint8_t cks_voffset; +}; + +typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record; + +struct phm_ppt_v1_clock_voltage_dependency_table { + uint32_t count; /* Number of entries. */ + phm_ppt_v1_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +typedef struct phm_ppt_v1_clock_voltage_dependency_table phm_ppt_v1_clock_voltage_dependency_table; + + +/* Multimedia Clock Voltage Dependency records and table */ +struct phm_ppt_v1_mm_clock_voltage_dependency_record { + uint32_t dclk; /* UVD D-clock */ + uint32_t vclk; /* UVD V-clock */ + uint32_t eclk; /* VCE clock */ + uint32_t aclk; /* ACP clock */ + uint32_t samclock; /* SAMU clock */ + uint8_t vddcInd; + uint16_t vddgfx_offset; + uint16_t vddc; + uint16_t vddgfx; + uint8_t phases; +}; +typedef struct phm_ppt_v1_mm_clock_voltage_dependency_record phm_ppt_v1_mm_clock_voltage_dependency_record; + +struct phm_ppt_v1_mm_clock_voltage_dependency_table { + uint32_t count; /* Number of entries. */ + phm_ppt_v1_mm_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; +typedef struct phm_ppt_v1_mm_clock_voltage_dependency_table phm_ppt_v1_mm_clock_voltage_dependency_table; + +struct phm_ppt_v1_voltage_lookup_record { + uint16_t us_calculated; + uint16_t us_vdd; /* Base voltage */ + uint16_t us_cac_low; + uint16_t us_cac_mid; + uint16_t us_cac_high; +}; +typedef struct phm_ppt_v1_voltage_lookup_record phm_ppt_v1_voltage_lookup_record; + +struct phm_ppt_v1_voltage_lookup_table { + uint32_t count; + phm_ppt_v1_voltage_lookup_record entries[1]; /* Dynamically allocate count entries. */ +}; +typedef struct phm_ppt_v1_voltage_lookup_table phm_ppt_v1_voltage_lookup_table; + +/* PCIE records and Table */ + +struct phm_ppt_v1_pcie_record { + uint8_t gen_speed; + uint8_t lane_width; +}; +typedef struct phm_ppt_v1_pcie_record phm_ppt_v1_pcie_record; + +struct phm_ppt_v1_pcie_table { + uint32_t count; /* Number of entries. */ + phm_ppt_v1_pcie_record entries[1]; /* Dynamically allocate count entries. */ +}; +typedef struct phm_ppt_v1_pcie_table phm_ppt_v1_pcie_table; + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c new file mode 100644 index 000000000000..7b2d5000292d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c @@ -0,0 +1,76 @@ +#include +#include "linux/delay.h" +#include "hwmgr.h" +#include "amd_acpi.h" + +bool acpi_atcs_functions_supported(void *device, uint32_t index) +{ + int32_t result; + struct atcs_verify_interface output_buf = {0}; + + int32_t temp_buffer = 1; + + result = cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS, + ATCS_FUNCTION_VERIFY_INTERFACE, + &temp_buffer, + &output_buf, + 1, + sizeof(temp_buffer), + sizeof(output_buf)); + + return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false; +} + +int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) +{ + struct atcs_pref_req_input atcs_input; + struct atcs_pref_req_output atcs_output; + u32 retry = 3; + int result; + struct cgs_system_info info = {0}; + + if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST)) + return -EINVAL; + + info.size = sizeof(struct cgs_system_info); + info.info_id = CGS_SYSTEM_INFO_ADAPTER_BDF_ID; + result = cgs_query_system_info(device, &info); + if (result != 0) + return -EINVAL; + atcs_input.client_id = (uint16_t)info.value; + atcs_input.size = sizeof(struct atcs_pref_req_input); + atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK; + atcs_input.flags = ATCS_WAIT_FOR_COMPLETION; + if (advertise) + atcs_input.flags |= ATCS_ADVERTISE_CAPS; + atcs_input.req_type = ATCS_PCIE_LINK_SPEED; + atcs_input.perf_req = perf_req; + + atcs_output.size = sizeof(struct atcs_pref_req_input); + + while (retry--) { + result = cgs_call_acpi_method(device, + CGS_ACPI_METHOD_ATCS, + ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, + &atcs_input, + &atcs_output, + 0, + sizeof(atcs_input), + sizeof(atcs_output)); + if (result != 0) + return -EIO; + + switch (atcs_output.ret_val) { + case ATCS_REQUEST_REFUSED: + default: + return -EINVAL; + case ATCS_REQUEST_COMPLETE: + return 0; + case ATCS_REQUEST_IN_PROGRESS: + udelay(10); + break; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c new file mode 100644 index 000000000000..2a83a4af2904 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -0,0 +1,1207 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include + +#include "ppatomctrl.h" +#include "atombios.h" +#include "cgs_common.h" +#include "pp_debug.h" +#include "ppevvmath.h" + +#define MEM_ID_MASK 0xff000000 +#define MEM_ID_SHIFT 24 +#define CLOCK_RANGE_MASK 0x00ffffff +#define CLOCK_RANGE_SHIFT 0 +#define LOW_NIBBLE_MASK 0xf +#define DATA_EQU_PREV 0 +#define DATA_FROM_TABLE 4 + +union voltage_object_info { + struct _ATOM_VOLTAGE_OBJECT_INFO v1; + struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2; + struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3; +}; + +static int atomctrl_retrieve_ac_timing( + uint8_t index, + ATOM_INIT_REG_BLOCK *reg_block, + pp_atomctrl_mc_reg_table *table) +{ + uint32_t i, j; + uint8_t tmem_id; + ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) + ((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize)); + + uint8_t num_ranges = 0; + + while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK && + num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES) { + tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT); + + if (index == tmem_id) { + table->mc_reg_table_entry[num_ranges].mclk_max = + (uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK) >> + CLOCK_RANGE_SHIFT); + + for (i = 0, j = 1; i < table->last; i++) { + if ((table->mc_reg_address[i].uc_pre_reg_data & + LOW_NIBBLE_MASK) == DATA_FROM_TABLE) { + table->mc_reg_table_entry[num_ranges].mc_data[i] = + (uint32_t)*((uint32_t *)reg_data + j); + j++; + } else if ((table->mc_reg_address[i].uc_pre_reg_data & + LOW_NIBBLE_MASK) == DATA_EQU_PREV) { + table->mc_reg_table_entry[num_ranges].mc_data[i] = + table->mc_reg_table_entry[num_ranges].mc_data[i-1]; + } + } + num_ranges++; + } + + reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) + ((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)) ; + } + + PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK), + "Invalid VramInfo table.", return -1); + table->num_entries = num_ranges; + + return 0; +} + +/** + * Get memory clock AC timing registers index from VBIOS table + * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1 + * @param reg_block the address ATOM_INIT_REG_BLOCK + * @param table the address of MCRegTable + * @return 0 + */ +static int atomctrl_set_mc_reg_address_table( + ATOM_INIT_REG_BLOCK *reg_block, + pp_atomctrl_mc_reg_table *table) +{ + uint8_t i = 0; + uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize)) + / sizeof(ATOM_INIT_REG_INDEX_FORMAT)); + ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0]; + + num_entries--; /* subtract 1 data end mark entry */ + + PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + + /* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */ + while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER)) && + (i < num_entries)) { + table->mc_reg_address[i].s1 = + (uint16_t)(le16_to_cpu(format->usRegIndex)); + table->mc_reg_address[i].uc_pre_reg_data = + format->ucPreRegDataLength; + + i++; + format = (ATOM_INIT_REG_INDEX_FORMAT *) + ((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); + } + + table->last = i; + return 0; +} + + +int atomctrl_initialize_mc_reg_table( + struct pp_hwmgr *hwmgr, + uint8_t module_index, + pp_atomctrl_mc_reg_table *table) +{ + ATOM_VRAM_INFO_HEADER_V2_1 *vram_info; + ATOM_INIT_REG_BLOCK *reg_block; + int result = 0; + u8 frev, crev; + u16 size; + + vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *) + cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); + + if (module_index >= vram_info->ucNumOfVRAMModule) { + printk(KERN_ERR "[ powerplay ] Invalid VramInfo table."); + result = -1; + } else if (vram_info->sHeader.ucTableFormatRevision < 2) { + printk(KERN_ERR "[ powerplay ] Invalid VramInfo table."); + result = -1; + } + + if (0 == result) { + reg_block = (ATOM_INIT_REG_BLOCK *) + ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset)); + result = atomctrl_set_mc_reg_address_table(reg_block, table); + } + + if (0 == result) { + result = atomctrl_retrieve_ac_timing(module_index, + reg_block, table); + } + + return result; +} + +/** + * Set DRAM timings based on engine clock and memory clock. + */ +int atomctrl_set_engine_dram_timings_rv770( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock) +{ + SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters; + + /* They are both in 10KHz Units. */ + engine_clock_parameters.ulTargetEngineClock = + (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK; + engine_clock_parameters.ulTargetEngineClock |= + (COMPUTE_ENGINE_PLL_PARAM << 24); + + /* in 10 khz units.*/ + engine_clock_parameters.sReserved.ulClock = + (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK; + return cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), + &engine_clock_parameters); +} + +/** + * Private Function to get the PowerPlay Table Address. + * WARNING: The tabled returned by this function is in + * dynamically allocated memory. + * The caller has to release if by calling kfree. + */ +static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device) +{ + int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); + u8 frev, crev; + u16 size; + union voltage_object_info *voltage_info; + + voltage_info = (union voltage_object_info *) + cgs_atom_get_data_table(device, index, + &size, &frev, &crev); + + if (voltage_info != NULL) + return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3); + else + return NULL; +} + +static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3( + const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table, + uint8_t voltage_type, uint8_t voltage_mode) +{ + unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize); + unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]); + uint8_t *start = (uint8_t *)voltage_object_info_table; + + while (offset < size) { + const ATOM_VOLTAGE_OBJECT_V3 *voltage_object = + (const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); + + if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType && + voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode) + return voltage_object; + + offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize); + } + + return NULL; +} + +/** atomctrl_get_memory_pll_dividers_si(). + * + * @param hwmgr input parameter: pointer to HwMgr + * @param clock_value input parameter: memory clock + * @param dividers output parameter: memory PLL dividers + * @param strobe_mode input parameter: 1 for strobe mode, 0 for performance mode + */ +int atomctrl_get_memory_pll_dividers_si( + struct pp_hwmgr *hwmgr, + uint32_t clock_value, + pp_atomctrl_memory_clock_param *mpll_param, + bool strobe_mode) +{ + COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; + int result; + + mpll_parameters.ulClock = (uint32_t) clock_value; + mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); + + result = cgs_atom_exec_cmd_table + (hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), + &mpll_parameters); + + if (0 == result) { + mpll_param->mpll_fb_divider.clk_frac = + mpll_parameters.ulFbDiv.usFbDivFrac; + mpll_param->mpll_fb_divider.cl_kf = + mpll_parameters.ulFbDiv.usFbDiv; + mpll_param->mpll_post_divider = + (uint32_t)mpll_parameters.ucPostDiv; + mpll_param->vco_mode = + (uint32_t)(mpll_parameters.ucPllCntlFlag & + MPLL_CNTL_FLAG_VCO_MODE_MASK); + mpll_param->yclk_sel = + (uint32_t)((mpll_parameters.ucPllCntlFlag & + MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0); + mpll_param->qdr = + (uint32_t)((mpll_parameters.ucPllCntlFlag & + MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0); + mpll_param->half_rate = + (uint32_t)((mpll_parameters.ucPllCntlFlag & + MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0); + mpll_param->dll_speed = + (uint32_t)(mpll_parameters.ucDllSpeed); + mpll_param->bw_ctrl = + (uint32_t)(mpll_parameters.ucBWCntl); + } + + return result; +} + +/** atomctrl_get_memory_pll_dividers_vi(). + * + * @param hwmgr input parameter: pointer to HwMgr + * @param clock_value input parameter: memory clock + * @param dividers output parameter: memory PLL dividers + */ +int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, + uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param) +{ + COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; + int result; + + mpll_parameters.ulClock.ulClock = (uint32_t)clock_value; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), + &mpll_parameters); + + if (!result) + mpll_param->mpll_post_divider = + (uint32_t)mpll_parameters.ulClock.ucPostDiv; + + return result; +} + +int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, + uint32_t clock_value, + pp_atomctrl_clock_dividers_kong *dividers) +{ + COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; + int result; + + pll_parameters.ulClock = clock_value; + + result = cgs_atom_exec_cmd_table + (hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), + &pll_parameters); + + if (0 == result) { + dividers->pll_post_divider = pll_parameters.ucPostDiv; + dividers->real_clock = pll_parameters.ulClock; + } + + return result; +} + +int atomctrl_get_engine_pll_dividers_vi( + struct pp_hwmgr *hwmgr, + uint32_t clock_value, + pp_atomctrl_clock_dividers_vi *dividers) +{ + COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; + int result; + + pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; + + result = cgs_atom_exec_cmd_table + (hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), + &pll_patameters); + + if (0 == result) { + dividers->pll_post_divider = + pll_patameters.ulClock.ucPostDiv; + dividers->real_clock = + pll_patameters.ulClock.ulClock; + + dividers->ul_fb_div.ul_fb_div_frac = + pll_patameters.ulFbDiv.usFbDivFrac; + dividers->ul_fb_div.ul_fb_div = + pll_patameters.ulFbDiv.usFbDiv; + + dividers->uc_pll_ref_div = + pll_patameters.ucPllRefDiv; + dividers->uc_pll_post_div = + pll_patameters.ucPllPostDiv; + dividers->uc_pll_cntl_flag = + pll_patameters.ucPllCntlFlag; + } + + return result; +} + +int atomctrl_get_dfs_pll_dividers_vi( + struct pp_hwmgr *hwmgr, + uint32_t clock_value, + pp_atomctrl_clock_dividers_vi *dividers) +{ + COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; + int result; + + pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ucPostDiv = + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK; + + result = cgs_atom_exec_cmd_table + (hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), + &pll_patameters); + + if (0 == result) { + dividers->pll_post_divider = + pll_patameters.ulClock.ucPostDiv; + dividers->real_clock = + pll_patameters.ulClock.ulClock; + + dividers->ul_fb_div.ul_fb_div_frac = + pll_patameters.ulFbDiv.usFbDivFrac; + dividers->ul_fb_div.ul_fb_div = + pll_patameters.ulFbDiv.usFbDiv; + + dividers->uc_pll_ref_div = + pll_patameters.ucPllRefDiv; + dividers->uc_pll_post_div = + pll_patameters.ucPllPostDiv; + dividers->uc_pll_cntl_flag = + pll_patameters.ucPllCntlFlag; + } + + return result; +} + +/** + * Get the reference clock in 10KHz + */ +uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr) +{ + ATOM_FIRMWARE_INFO *fw_info; + u8 frev, crev; + u16 size; + uint32_t clock; + + fw_info = (ATOM_FIRMWARE_INFO *) + cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, FirmwareInfo), + &size, &frev, &crev); + + if (fw_info == NULL) + clock = 2700; + else + clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock)); + + return clock; +} + +/** + * Returns true if the given voltage type is controlled by GPIO pins. + * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC, + * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ. + * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE + */ +bool atomctrl_is_voltage_controled_by_gpio_v3( + struct pp_hwmgr *hwmgr, + uint8_t voltage_type, + uint8_t voltage_mode) +{ + ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = + (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); + bool ret; + + PP_ASSERT_WITH_CODE((NULL != voltage_info), + "Could not find Voltage Table in BIOS.", return false;); + + ret = (NULL != atomctrl_lookup_voltage_type_v3 + (voltage_info, voltage_type, voltage_mode)) ? true : false; + + return ret; +} + +int atomctrl_get_voltage_table_v3( + struct pp_hwmgr *hwmgr, + uint8_t voltage_type, + uint8_t voltage_mode, + pp_atomctrl_voltage_table *voltage_table) +{ + ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = + (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); + const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; + unsigned int i; + + PP_ASSERT_WITH_CODE((NULL != voltage_info), + "Could not find Voltage Table in BIOS.", return -1;); + + voltage_object = atomctrl_lookup_voltage_type_v3 + (voltage_info, voltage_type, voltage_mode); + + if (voltage_object == NULL) + return -1; + + PP_ASSERT_WITH_CODE( + (voltage_object->asGpioVoltageObj.ucGpioEntryNum <= + PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES), + "Too many voltage entries!", + return -1; + ); + + for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) { + voltage_table->entries[i].value = + voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue; + voltage_table->entries[i].smio_low = + voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId; + } + + voltage_table->mask_low = + voltage_object->asGpioVoltageObj.ulGpioMaskVal; + voltage_table->count = + voltage_object->asGpioVoltageObj.ucGpioEntryNum; + voltage_table->phase_delay = + voltage_object->asGpioVoltageObj.ucPhaseDelay; + + return 0; +} + +static bool atomctrl_lookup_gpio_pin( + ATOM_GPIO_PIN_LUT * gpio_lookup_table, + const uint32_t pinId, + pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) +{ + unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize); + unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]); + uint8_t *start = (uint8_t *)gpio_lookup_table; + + while (offset < size) { + const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment = + (const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset); + + if (pinId == pin_assignment->ucGPIO_ID) { + gpio_pin_assignment->uc_gpio_pin_bit_shift = + pin_assignment->ucGpioPinBitShift; + gpio_pin_assignment->us_gpio_pin_aindex = + le16_to_cpu(pin_assignment->usGpioPin_AIndex); + return false; + } + + offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1; + } + + return true; +} + +/** + * Private Function to get the PowerPlay Table Address. + * WARNING: The tabled returned by this function is in + * dynamically allocated memory. + * The caller has to release if by calling kfree. + */ +static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device) +{ + u8 frev, crev; + u16 size; + void *table_address; + + table_address = (ATOM_GPIO_PIN_LUT *) + cgs_atom_get_data_table(device, + GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT), + &size, &frev, &crev); + + PP_ASSERT_WITH_CODE((NULL != table_address), + "Error retrieving BIOS Table Address!", return NULL;); + + return (ATOM_GPIO_PIN_LUT *)table_address; +} + +/** + * Returns 1 if the given pin id find in lookup table. + */ +bool atomctrl_get_pp_assign_pin( + struct pp_hwmgr *hwmgr, + const uint32_t pinId, + pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) +{ + bool bRet = 0; + ATOM_GPIO_PIN_LUT *gpio_lookup_table = + get_gpio_lookup_table(hwmgr->device); + + PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table), + "Could not find GPIO lookup Table in BIOS.", return -1); + + bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId, + gpio_pin_assignment); + + return bRet; +} + +int atomctrl_calculate_voltage_evv_on_sclk( + struct pp_hwmgr *hwmgr, + uint8_t voltage_type, + uint32_t sclk, + uint16_t virtual_voltage_Id, + uint16_t *voltage, + uint16_t dpm_level, + bool debug) +{ + ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; + + EFUSE_LINEAR_FUNC_PARAM sRO_fuse; + EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; + EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; + EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse; + EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse; + EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse; + EFUSE_INPUT_PARAMETER sInput_FuseValues; + READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues; + + uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused; + fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7; + fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma; + fInt fLkg_FT, repeat; + fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX; + fInt fRLL_LoadLine, fPowerDPMx, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin; + fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM; + fInt fSclk_margin, fSclk, fEVV_V; + fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL; + uint32_t ul_FT_Lkg_V0NORM; + fInt fLn_MaxDivMin, fMin, fAverage, fRange; + fInt fRoots[2]; + fInt fStepSize = GetScaledFraction(625, 100000); + + int result; + + getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) + cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), + NULL, NULL, NULL); + + if (!getASICProfilingInfo) + return -1; + + if(getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || + (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && + getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) + return -1; + + /*----------------------------------------------------------- + *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL + *----------------------------------------------------------- + */ + fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000); + + switch (dpm_level) { + case 1: + fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1); + fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000); + break; + case 2: + fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2); + fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000); + break; + case 3: + fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3); + fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000); + break; + case 4: + fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4); + fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000); + break; + case 5: + fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5); + fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000); + break; + case 6: + fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6); + fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000); + break; + case 7: + fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7); + fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000); + break; + default: + printk(KERN_ERR "DPM Level not supported\n"); + fPowerDPMx = Convert_ULONG_ToFraction(1); + fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000); + } + + /*------------------------- + * DECODING FUSE VALUES + * ------------------------ + */ + /*Decode RO_Fused*/ + sRO_fuse = getASICProfilingInfo->sRoFuse; + + sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex; + sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB; + sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength; + + sOutput_FuseValues.sEfuse = sInput_FuseValues; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), + &sOutput_FuseValues); + + if (result) + return result; + + /* Finally, the actual fuse value */ + ul_RO_fused = sOutput_FuseValues.ulEfuseValue; + fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1); + fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1); + fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); + + sCACm_fuse = getASICProfilingInfo->sCACm; + + sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex; + sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB; + sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength; + + sOutput_FuseValues.sEfuse = sInput_FuseValues; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), + &sOutput_FuseValues); + + if (result) + return result; + + ul_CACm_fused = sOutput_FuseValues.ulEfuseValue; + fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000); + fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000); + + fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); + + sCACb_fuse = getASICProfilingInfo->sCACb; + + sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex; + sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB; + sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; + sOutput_FuseValues.sEfuse = sInput_FuseValues; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), + &sOutput_FuseValues); + + if (result) + return result; + + ul_CACb_fused = sOutput_FuseValues.ulEfuseValue; + fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000); + fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000); + + fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); + + sKt_Beta_fuse = getASICProfilingInfo->sKt_b; + + sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex; + sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB; + sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength; + + sOutput_FuseValues.sEfuse = sInput_FuseValues; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), + &sOutput_FuseValues); + + if (result) + return result; + + ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue; + fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000); + fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000); + + fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, + fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); + + sKv_m_fuse = getASICProfilingInfo->sKv_m; + + sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex; + sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB; + sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength; + + sOutput_FuseValues.sEfuse = sInput_FuseValues; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), + &sOutput_FuseValues); + if (result) + return result; + + ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue; + fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000); + fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000); + fRange = fMultiply(fRange, ConvertToFraction(-1)); + + fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, + fAverage, fRange, sKv_m_fuse.ucEfuseLength); + + sKv_b_fuse = getASICProfilingInfo->sKv_b; + + sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex; + sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB; + sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; + sOutput_FuseValues.sEfuse = sInput_FuseValues; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), + &sOutput_FuseValues); + + if (result) + return result; + + ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue; + fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000); + fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000); + + fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, + fAverage, fRange, sKv_b_fuse.ucEfuseLength); + + /* Decoding the Leakage - No special struct container */ + /* + * usLkgEuseIndex=56 + * ucLkgEfuseBitLSB=6 + * ucLkgEfuseLength=10 + * ulLkgEncodeLn_MaxDivMin=69077 + * ulLkgEncodeMax=1000000 + * ulLkgEncodeMin=1000 + * ulEfuseLogisticAlpha=13 + */ + + sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex; + sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB; + sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength; + + sOutput_FuseValues.sEfuse = sInput_FuseValues; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), + &sOutput_FuseValues); + + if (result) + return result; + + ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue; + fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000); + fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000); + + fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, + fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); + fLkg_FT = fFT_Lkg_V0NORM; + + /*------------------------------------------- + * PART 2 - Grabbing all required values + *------------------------------------------- + */ + fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000), + ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); + fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000), + ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); + fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000), + ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); + fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000), + ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); + fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000), + ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); + fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000), + ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); + fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000), + ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); + fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000), + ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); + + fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a); + fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b); + fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c); + + fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed); + + fMargin_FMAX_mean = GetScaledFraction( + getASICProfilingInfo->ulMargin_Fmax_mean, 10000); + fMargin_Plat_mean = GetScaledFraction( + getASICProfilingInfo->ulMargin_plat_mean, 10000); + fMargin_FMAX_sigma = GetScaledFraction( + getASICProfilingInfo->ulMargin_Fmax_sigma, 10000); + fMargin_Plat_sigma = GetScaledFraction( + getASICProfilingInfo->ulMargin_plat_sigma, 10000); + + fMargin_DC_sigma = GetScaledFraction( + getASICProfilingInfo->ulMargin_DC_sigma, 100); + fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); + + fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); + fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100)); + fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100)); + fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100))); + fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10)); + + fSclk = GetScaledFraction(sclk, 100); + + fV_max = fDivide(GetScaledFraction( + getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4)); + fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10); + fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100); + fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10); + fV_FT = fDivide(GetScaledFraction( + getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4)); + fV_min = fDivide(GetScaledFraction( + getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4)); + + /*----------------------- + * PART 3 + *----------------------- + */ + + fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4,fSclk), fSM_A5)); + fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); + fC_Term = fAdd(fMargin_RO_c, + fAdd(fMultiply(fSM_A0,fLkg_FT), + fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT,fSclk)), + fAdd(fMultiply(fSM_A3, fSclk), + fSubtract(fSM_A7,fRO_fused))))); + + fVDDC_base = fSubtract(fRO_fused, + fSubtract(fMargin_RO_c, + fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk)))); + fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0,fSclk), fSM_A2)); + + repeat = fSubtract(fVDDC_base, + fDivide(fMargin_DC_sigma, ConvertToFraction(1000))); + + fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a, + fGetSquare(repeat)), + fAdd(fMultiply(fMargin_RO_b, repeat), + fMargin_RO_c)); + + fDC_SCLK = fSubtract(fRO_fused, + fSubtract(fRO_DC_margin, + fSubtract(fSM_A3, + fMultiply(fSM_A2, repeat)))); + fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0,repeat), fSM_A1)); + + fSigma_DC = fSubtract(fSclk, fDC_SCLK); + + fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean); + fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean); + fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma); + fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma); + + fSquared_Sigma_DC = fGetSquare(fSigma_DC); + fSquared_Sigma_CR = fGetSquare(fSigma_CR); + fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX); + + fSclk_margin = fAdd(fMicro_FMAX, + fAdd(fMicro_CR, + fAdd(fMargin_fixed, + fSqrt(fAdd(fSquared_Sigma_FMAX, + fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR)))))); + /* + fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5; + fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6; + fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused; + */ + + fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5); + fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6); + fC_Term = fAdd(fRO_DC_margin, + fAdd(fMultiply(fSM_A0, fLkg_FT), + fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT), + fAdd(fSclk, fSclk_margin)), + fAdd(fMultiply(fSM_A3, + fAdd(fSclk, fSclk_margin)), + fSubtract(fSM_A7, fRO_fused))))); + + SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots); + + if (GreaterThan(fRoots[0], fRoots[1])) + fEVV_V = fRoots[1]; + else + fEVV_V = fRoots[0]; + + if (GreaterThan(fV_min, fEVV_V)) + fEVV_V = fV_min; + else if (GreaterThan(fEVV_V, fV_max)) + fEVV_V = fSubtract(fV_max, fStepSize); + + fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0); + + /*----------------- + * PART 4 + *----------------- + */ + + fV_x = fV_min; + + while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) { + fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd( + fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk), + fGetSquare(fV_x)), fDerateTDP); + + fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor, + fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused, + fT_prod), fKv_b_fused), fV_x)), fV_x))); + fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply( + fKt_Beta_fused, fT_prod))); + fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( + fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT))); + fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( + fKt_Beta_fused, fT_FT))); + + fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right); + + fTDP_Current = fDivide(fTDP_Power, fV_x); + + fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine), + ConvertToFraction(10))); + + fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0); + + if (GreaterThan(fV_max, fV_NL) && + (GreaterThan(fV_NL,fEVV_V) || + Equal(fV_NL, fEVV_V))) { + fV_NL = fMultiply(fV_NL, ConvertToFraction(1000)); + + *voltage = (uint16_t)fV_NL.partial.real; + break; + } else + fV_x = fAdd(fV_x, fStepSize); + } + + return result; +} + +/** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table. + * @param hwmgr input: pointer to hwManager + * @param voltage_type input: type of EVV voltage VDDC or VDDGFX + * @param sclk input: in 10Khz unit. DPM state SCLK frequency + * which is define in PPTable SCLK/VDDC dependence + * table associated with this virtual_voltage_Id + * @param virtual_voltage_Id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 + * @param voltage output: real voltage level in unit of mv + */ +int atomctrl_get_voltage_evv_on_sclk( + struct pp_hwmgr *hwmgr, + uint8_t voltage_type, + uint32_t sclk, uint16_t virtual_voltage_Id, + uint16_t *voltage) +{ + int result; + GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; + + get_voltage_info_param_space.ucVoltageType = + voltage_type; + get_voltage_info_param_space.ucVoltageMode = + ATOM_GET_VOLTAGE_EVV_VOLTAGE; + get_voltage_info_param_space.usVoltageLevel = + virtual_voltage_Id; + get_voltage_info_param_space.ulSCLKFreq = + sclk; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), + &get_voltage_info_param_space); + + if (0 != result) + return result; + + *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) + (&get_voltage_info_param_space))->usVoltageLevel; + + return result; +} + +/** + * Get the mpll reference clock in 10KHz + */ +uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr) +{ + ATOM_COMMON_TABLE_HEADER *fw_info; + uint32_t clock; + u8 frev, crev; + u16 size; + + fw_info = (ATOM_COMMON_TABLE_HEADER *) + cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, FirmwareInfo), + &size, &frev, &crev); + + if (fw_info == NULL) + clock = 2700; + else { + if ((fw_info->ucTableFormatRevision == 2) && + (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) { + ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 = + (ATOM_FIRMWARE_INFO_V2_1 *)fw_info; + clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock)); + } else { + ATOM_FIRMWARE_INFO *fwInfo_0_0 = + (ATOM_FIRMWARE_INFO *)fw_info; + clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock)); + } + } + + return clock; +} + +/** + * Get the asic internal spread spectrum table + */ +static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device) +{ + ATOM_ASIC_INTERNAL_SS_INFO *table = NULL; + u8 frev, crev; + u16 size; + + table = (ATOM_ASIC_INTERNAL_SS_INFO *) + cgs_atom_get_data_table(device, + GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info), + &size, &frev, &crev); + + return table; +} + +/** + * Get the asic internal spread spectrum assignment + */ +static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr, + const uint8_t clockSource, + const uint32_t clockSpeed, + pp_atomctrl_internal_ss_info *ssEntry) +{ + ATOM_ASIC_INTERNAL_SS_INFO *table; + ATOM_ASIC_SS_ASSIGNMENT *ssInfo; + int entry_found = 0; + + memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info)); + + table = asic_internal_ss_get_ss_table(hwmgr->device); + + if (NULL == table) + return -1; + + ssInfo = &table->asSpreadSpectrum[0]; + + while (((uint8_t *)ssInfo - (uint8_t *)table) < + le16_to_cpu(table->sHeader.usStructureSize)) { + if ((clockSource == ssInfo->ucClockIndication) && + ((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange))) { + entry_found = 1; + break; + } + + ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo + + sizeof(ATOM_ASIC_SS_ASSIGNMENT)); + } + + if (entry_found) { + ssEntry->speed_spectrum_percentage = + ssInfo->usSpreadSpectrumPercentage; + ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz; + + if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) && + (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) || + (GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) { + ssEntry->speed_spectrum_rate /= 100; + } + + switch (ssInfo->ucSpreadSpectrumMode) { + case 0: + ssEntry->speed_spectrum_mode = + pp_atomctrl_spread_spectrum_mode_down; + break; + case 1: + ssEntry->speed_spectrum_mode = + pp_atomctrl_spread_spectrum_mode_center; + break; + default: + ssEntry->speed_spectrum_mode = + pp_atomctrl_spread_spectrum_mode_down; + break; + } + } + + return entry_found ? 0 : 1; +} + +/** + * Get the memory clock spread spectrum info + */ +int atomctrl_get_memory_clock_spread_spectrum( + struct pp_hwmgr *hwmgr, + const uint32_t memory_clock, + pp_atomctrl_internal_ss_info *ssInfo) +{ + return asic_internal_ss_get_ss_asignment(hwmgr, + ASIC_INTERNAL_MEMORY_SS, memory_clock, ssInfo); +} +/** + * Get the engine clock spread spectrum info + */ +int atomctrl_get_engine_clock_spread_spectrum( + struct pp_hwmgr *hwmgr, + const uint32_t engine_clock, + pp_atomctrl_internal_ss_info *ssInfo) +{ + return asic_internal_ss_get_ss_asignment(hwmgr, + ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo); +} + +int atomctrl_read_efuse(void *device, uint16_t start_index, + uint16_t end_index, uint32_t mask, uint32_t *efuse) +{ + int result; + READ_EFUSE_VALUE_PARAMETER efuse_param; + + efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4; + efuse_param.sEfuse.ucBitShift = (uint8_t) + (start_index - ((start_index / 32) * 32)); + efuse_param.sEfuse.ucBitLength = (uint8_t) + ((end_index - start_index) + 1); + + result = cgs_atom_exec_cmd_table(device, + GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), + &efuse_param); + if (!result) + *efuse = efuse_param.ulEfuseValue & mask; + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h new file mode 100644 index 000000000000..627420b80a5f --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -0,0 +1,246 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef PP_ATOMVOLTAGECTRL_H +#define PP_ATOMVOLTAGECTRL_H + +#include "hwmgr.h" + +#define MEM_TYPE_GDDR5 0x50 +#define MEM_TYPE_GDDR4 0x40 +#define MEM_TYPE_GDDR3 0x30 +#define MEM_TYPE_DDR2 0x20 +#define MEM_TYPE_GDDR1 0x10 +#define MEM_TYPE_DDR3 0xb0 +#define MEM_TYPE_MASK 0xF0 + + +/* As returned from PowerConnectorDetectionTable. */ +#define PP_ATOM_POWER_BUDGET_DISABLE_OVERDRIVE 0x80 +#define PP_ATOM_POWER_BUDGET_SHOW_WARNING 0x40 +#define PP_ATOM_POWER_BUDGET_SHOW_WAIVER 0x20 +#define PP_ATOM_POWER_POWER_BUDGET_BEHAVIOUR 0x0F + +/* New functions for Evergreen and beyond. */ +#define PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES 32 + +struct pp_atomctrl_clock_dividers { + uint32_t pll_post_divider; + uint32_t pll_feedback_divider; + uint32_t pll_ref_divider; + bool enable_post_divider; +}; + +typedef struct pp_atomctrl_clock_dividers pp_atomctrl_clock_dividers; + +union pp_atomctrl_tcipll_fb_divider { + struct { + uint32_t ul_fb_div_frac : 14; + uint32_t ul_fb_div : 12; + uint32_t un_used : 6; + }; + uint32_t ul_fb_divider; +}; + +typedef union pp_atomctrl_tcipll_fb_divider pp_atomctrl_tcipll_fb_divider; + +struct pp_atomctrl_clock_dividers_rv730 { + uint32_t pll_post_divider; + pp_atomctrl_tcipll_fb_divider mpll_feedback_divider; + uint32_t pll_ref_divider; + bool enable_post_divider; + bool enable_dithen; + uint32_t vco_mode; +}; +typedef struct pp_atomctrl_clock_dividers_rv730 pp_atomctrl_clock_dividers_rv730; + + +struct pp_atomctrl_clock_dividers_kong { + uint32_t pll_post_divider; + uint32_t real_clock; +}; +typedef struct pp_atomctrl_clock_dividers_kong pp_atomctrl_clock_dividers_kong; + +struct pp_atomctrl_clock_dividers_ci { + uint32_t pll_post_divider; /* post divider value */ + uint32_t real_clock; + pp_atomctrl_tcipll_fb_divider ul_fb_div; /* Output Parameter: PLL FB divider */ + uint8_t uc_pll_ref_div; /* Output Parameter: PLL ref divider */ + uint8_t uc_pll_post_div; /* Output Parameter: PLL post divider */ + uint8_t uc_pll_cntl_flag; /*Output Flags: control flag */ +}; +typedef struct pp_atomctrl_clock_dividers_ci pp_atomctrl_clock_dividers_ci; + +struct pp_atomctrl_clock_dividers_vi { + uint32_t pll_post_divider; /* post divider value */ + uint32_t real_clock; + pp_atomctrl_tcipll_fb_divider ul_fb_div; /*Output Parameter: PLL FB divider */ + uint8_t uc_pll_ref_div; /*Output Parameter: PLL ref divider */ + uint8_t uc_pll_post_div; /*Output Parameter: PLL post divider */ + uint8_t uc_pll_cntl_flag; /*Output Flags: control flag */ +}; +typedef struct pp_atomctrl_clock_dividers_vi pp_atomctrl_clock_dividers_vi; + +union pp_atomctrl_s_mpll_fb_divider { + struct { + uint32_t cl_kf : 12; + uint32_t clk_frac : 12; + uint32_t un_used : 8; + }; + uint32_t ul_fb_divider; +}; +typedef union pp_atomctrl_s_mpll_fb_divider pp_atomctrl_s_mpll_fb_divider; + +enum pp_atomctrl_spread_spectrum_mode { + pp_atomctrl_spread_spectrum_mode_down = 0, + pp_atomctrl_spread_spectrum_mode_center +}; +typedef enum pp_atomctrl_spread_spectrum_mode pp_atomctrl_spread_spectrum_mode; + +struct pp_atomctrl_memory_clock_param { + pp_atomctrl_s_mpll_fb_divider mpll_fb_divider; + uint32_t mpll_post_divider; + uint32_t bw_ctrl; + uint32_t dll_speed; + uint32_t vco_mode; + uint32_t yclk_sel; + uint32_t qdr; + uint32_t half_rate; +}; +typedef struct pp_atomctrl_memory_clock_param pp_atomctrl_memory_clock_param; + +struct pp_atomctrl_internal_ss_info { + uint32_t speed_spectrum_percentage; /* in 1/100 percentage */ + uint32_t speed_spectrum_rate; /* in KHz */ + pp_atomctrl_spread_spectrum_mode speed_spectrum_mode; +}; +typedef struct pp_atomctrl_internal_ss_info pp_atomctrl_internal_ss_info; + +#ifndef NUMBER_OF_M3ARB_PARAMS +#define NUMBER_OF_M3ARB_PARAMS 3 +#endif + +#ifndef NUMBER_OF_M3ARB_PARAM_SETS +#define NUMBER_OF_M3ARB_PARAM_SETS 10 +#endif + +struct pp_atomctrl_kong_system_info { + uint32_t ul_bootup_uma_clock; /* in 10kHz unit */ + uint16_t us_max_nb_voltage; /* high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; */ + uint16_t us_min_nb_voltage; /* low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; */ + uint16_t us_bootup_nb_voltage; /* boot up NB voltage */ + uint8_t uc_htc_tmp_lmt; /* bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD */ + uint8_t uc_tj_offset; /* bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD */ + /* 0: default 1: uvd 2: fs-3d */ + uint32_t ul_csr_m3_srb_cntl[NUMBER_OF_M3ARB_PARAM_SETS][NUMBER_OF_M3ARB_PARAMS];/* arrays with values for CSR M3 arbiter for default */ +}; +typedef struct pp_atomctrl_kong_system_info pp_atomctrl_kong_system_info; + +struct pp_atomctrl_memory_info { + uint8_t memory_vendor; + uint8_t memory_type; +}; +typedef struct pp_atomctrl_memory_info pp_atomctrl_memory_info; + +#define MAX_AC_TIMING_ENTRIES 16 + +struct pp_atomctrl_memory_clock_range_table { + uint8_t num_entries; + uint8_t rsv[3]; + + uint32_t mclk[MAX_AC_TIMING_ENTRIES]; +}; +typedef struct pp_atomctrl_memory_clock_range_table pp_atomctrl_memory_clock_range_table; + +struct pp_atomctrl_voltage_table_entry { + uint16_t value; + uint32_t smio_low; +}; + +typedef struct pp_atomctrl_voltage_table_entry pp_atomctrl_voltage_table_entry; + +struct pp_atomctrl_voltage_table { + uint32_t count; + uint32_t mask_low; + uint32_t phase_delay; /* Used for ATOM_GPIO_VOLTAGE_OBJECT_V3 and later */ + pp_atomctrl_voltage_table_entry entries[PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES]; +}; + +typedef struct pp_atomctrl_voltage_table pp_atomctrl_voltage_table; + +#define VBIOS_MC_REGISTER_ARRAY_SIZE 32 +#define VBIOS_MAX_AC_TIMING_ENTRIES 20 + +struct pp_atomctrl_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct pp_atomctrl_mc_reg_entry pp_atomctrl_mc_reg_entry; + +struct pp_atomctrl_mc_register_address { + uint16_t s1; + uint8_t uc_pre_reg_data; +}; + +typedef struct pp_atomctrl_mc_register_address pp_atomctrl_mc_register_address; + +struct pp_atomctrl_mc_reg_table { + uint8_t last; /* number of registers */ + uint8_t num_entries; /* number of AC timing entries */ + pp_atomctrl_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES]; + pp_atomctrl_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct pp_atomctrl_mc_reg_table pp_atomctrl_mc_reg_table; + +struct pp_atomctrl_gpio_pin_assignment { + uint16_t us_gpio_pin_aindex; + uint8_t uc_gpio_pin_bit_shift; +}; +typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment; + +extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); +extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); +extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); +extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo); +extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo); +extern int atomctrl_initialize_mc_reg_table(struct pp_hwmgr *hwmgr, uint8_t module_index, pp_atomctrl_mc_reg_table *table); +extern int atomctrl_set_engine_dram_timings_rv770(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint32_t memory_clock); +extern uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr); +extern int atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param, bool strobe_mode); +extern int atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers); +extern int atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers); +extern bool atomctrl_is_voltage_controled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode); +extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table); +extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, + uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param); +extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, + uint32_t clock_value, + pp_atomctrl_clock_dividers_kong *dividers); +extern int atomctrl_read_efuse(void *device, uint16_t start_index, + uint16_t end_index, uint32_t mask, uint32_t *efuse); +extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, + uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug); + + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h new file mode 100644 index 000000000000..b7429a527828 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h @@ -0,0 +1,612 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include + +#define SHIFT_AMOUNT 16 /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */ + +#define PRECISION 5 /* Change this value to change the number of decimal places in the final output - 5 is a good default */ + +#define SHIFTED_2 (2 << SHIFT_AMOUNT) +#define MAX (1 << (SHIFT_AMOUNT - 1)) - 1 /* 32767 - Might change in the future */ + +/* ------------------------------------------------------------------------------- + * NEW TYPE - fINT + * ------------------------------------------------------------------------------- + * A variable of type fInt can be accessed in 3 ways using the dot (.) operator + * fInt A; + * A.full => The full number as it is. Generally not easy to read + * A.partial.real => Only the integer portion + * A.partial.decimal => Only the fractional portion + */ +typedef union _fInt { + int full; + struct _partial { + unsigned int decimal: SHIFT_AMOUNT; /*Needs to always be unsigned*/ + int real: 32 - SHIFT_AMOUNT; + } partial; +} fInt; + +/* ------------------------------------------------------------------------------- + * Function Declarations + * ------------------------------------------------------------------------------- + */ +fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ +fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ +fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ +int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ + +fInt fNegate(fInt); /* Returns -1 * input fInt value */ +fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ +fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ +fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ +fInt fDivide (fInt A, fInt B); /* Returns A/B */ +fInt fGetSquare(fInt); /* Returns the square of a fInt number */ +fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ + +int uAbs(int); /* Returns the Absolute value of the Int */ +fInt fAbs(fInt); /* Returns the Absolute value of the fInt */ +int uPow(int base, int exponent); /* Returns base^exponent an INT */ + +void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ +bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ +bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ + +fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ +fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ + +/* Fuse decoding functions + * ------------------------------------------------------------------------------------- + */ +fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); +fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); +fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); + +/* Internal Support Functions - Use these ONLY for testing or adding to internal functions + * ------------------------------------------------------------------------------------- + * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons. + */ +fInt Add (int, int); /* Add two INTs and return Sum as FINT */ +fInt Multiply (int, int); /* Multiply two INTs and return Product as FINT */ +fInt Divide (int, int); /* You get the idea... */ +fInt fNegate(fInt); + +int uGetScaledDecimal (fInt); /* Internal function */ +int GetReal (fInt A); /* Internal function */ + +/* Future Additions and Incomplete Functions + * ------------------------------------------------------------------------------------- + */ +int GetRoundedValue(fInt); /* Incomplete function - Useful only when Precision is lacking */ + /* Let us say we have 2.126 but can only handle 2 decimal points. We could */ + /* either chop of 6 and keep 2.12 or use this function to get 2.13, which is more accurate */ + +/* ------------------------------------------------------------------------------------- + * TROUBLESHOOTING INFORMATION + * ------------------------------------------------------------------------------------- + * 1) ConvertToFraction - InputOutOfRangeException: Only accepts numbers smaller than MAX (default: 32767) + * 2) fAdd - OutputOutOfRangeException: Output bigger than MAX (default: 32767) + * 3) fMultiply - OutputOutOfRangeException: + * 4) fGetSquare - OutputOutOfRangeException: + * 5) fDivide - DivideByZeroException + * 6) fSqrt - NegativeSquareRootException: Input cannot be a negative number + */ + +/* ------------------------------------------------------------------------------------- + * START OF CODE + * ------------------------------------------------------------------------------------- + */ +fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ +{ + uint32_t i; + bool bNegated = false; + + fInt fPositiveOne = ConvertToFraction(1); + fInt fZERO = ConvertToFraction(0); + + fInt lower_bound = Divide(78, 10000); + fInt solution = fPositiveOne; /*Starting off with baseline of 1 */ + fInt error_term; + + uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; + uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; + + if (GreaterThan(fZERO, exponent)) { + exponent = fNegate(exponent); + bNegated = true; + } + + while (GreaterThan(exponent, lower_bound)) { + for (i = 0; i < 11; i++) { + if (GreaterThan(exponent, GetScaledFraction(k_array[i], 10000))) { + exponent = fSubtract(exponent, GetScaledFraction(k_array[i], 10000)); + solution = fMultiply(solution, GetScaledFraction(expk_array[i], 10000)); + } + } + } + + error_term = fAdd(fPositiveOne, exponent); + + solution = fMultiply(solution, error_term); + + if (bNegated) + solution = fDivide(fPositiveOne, solution); + + return solution; +} + +fInt fNaturalLog(fInt value) +{ + uint32_t i; + fInt upper_bound = Divide(8, 1000); + fInt fNegativeOne = ConvertToFraction(-1); + fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */ + fInt error_term; + + uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; + uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; + + while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) { + for (i = 0; i < 10; i++) { + if (GreaterThan(value, GetScaledFraction(k_array[i], 10000))) { + value = fDivide(value, GetScaledFraction(k_array[i], 10000)); + solution = fAdd(solution, GetScaledFraction(logk_array[i], 10000)); + } + } + } + + error_term = fAdd(fNegativeOne, value); + + return (fAdd(solution, error_term)); +} + +fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) +{ + fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); + fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); + + fInt f_decoded_value; + + f_decoded_value = fDivide(f_fuse_value, f_bit_max_value); + f_decoded_value = fMultiply(f_decoded_value, f_range); + f_decoded_value = fAdd(f_decoded_value, f_min); + + return f_decoded_value; +} + + +fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) +{ + fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); + fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); + + fInt f_CONSTANT_NEG13 = ConvertToFraction(-13); + fInt f_CONSTANT1 = ConvertToFraction(1); + + fInt f_decoded_value; + + f_decoded_value = fSubtract(fDivide(f_bit_max_value, f_fuse_value), f_CONSTANT1); + f_decoded_value = fNaturalLog(f_decoded_value); + f_decoded_value = fMultiply(f_decoded_value, fDivide(f_range, f_CONSTANT_NEG13)); + f_decoded_value = fAdd(f_decoded_value, f_average); + + return f_decoded_value; +} + +fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) +{ + fInt fLeakage; + fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); + + fLeakage = fMultiply(ln_max_div_min, Convert_ULONG_ToFraction(leakageID_fuse)); + fLeakage = fDivide(fLeakage, f_bit_max_value); + fLeakage = fExponential(fLeakage); + fLeakage = fMultiply(fLeakage, f_min); + + return fLeakage; +} + +fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ +{ + fInt temp; + + if (X <= MAX) + temp.full = (X << SHIFT_AMOUNT); + else + temp.full = 0; + + return temp; +} + +fInt fNegate(fInt X) +{ + fInt CONSTANT_NEGONE = ConvertToFraction(-1); + return (fMultiply(X, CONSTANT_NEGONE)); +} + +fInt Convert_ULONG_ToFraction(uint32_t X) +{ + fInt temp; + + if (X <= MAX) + temp.full = (X << SHIFT_AMOUNT); + else + temp.full = 0; + + return temp; +} + +fInt GetScaledFraction(int X, int factor) +{ + int times_shifted, factor_shifted; + bool bNEGATED; + fInt fValue; + + times_shifted = 0; + factor_shifted = 0; + bNEGATED = false; + + if (X < 0) { + X = -1*X; + bNEGATED = true; + } + + if (factor < 0) { + factor = -1*factor; + bNEGATED = !bNEGATED; /*If bNEGATED = true due to X < 0, this will cover the case of negative cancelling negative */ + } + + if ((X > MAX) || factor > MAX) { + if ((X/factor) <= MAX) { + while (X > MAX) { + X = X >> 1; + times_shifted++; + } + + while (factor > MAX) { + factor = factor >> 1; + factor_shifted++; + } + } else { + fValue.full = 0; + return fValue; + } + } + + if (factor == 1) + return (ConvertToFraction(X)); + + fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor)); + + fValue.full = fValue.full << times_shifted; + fValue.full = fValue.full >> factor_shifted; + + return fValue; +} + +/* Addition using two fInts */ +fInt fAdd (fInt X, fInt Y) +{ + fInt Sum; + + Sum.full = X.full + Y.full; + + return Sum; +} + +/* Addition using two fInts */ +fInt fSubtract (fInt X, fInt Y) +{ + fInt Difference; + + Difference.full = X.full - Y.full; + + return Difference; +} + +bool Equal(fInt A, fInt B) +{ + if (A.full == B.full) + return true; + else + return false; +} + +bool GreaterThan(fInt A, fInt B) +{ + if (A.full > B.full) + return true; + else + return false; +} + +fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ +{ + fInt Product; + int64_t tempProduct; + bool X_LessThanOne, Y_LessThanOne; + + X_LessThanOne = (X.partial.real == 0 && X.partial.decimal != 0 && X.full >= 0); + Y_LessThanOne = (Y.partial.real == 0 && Y.partial.decimal != 0 && Y.full >= 0); + + /*The following is for a very specific common case: Non-zero number with ONLY fractional portion*/ + /* TEMPORARILY DISABLED - CAN BE USED TO IMPROVE PRECISION + + if (X_LessThanOne && Y_LessThanOne) { + Product.full = X.full * Y.full; + return Product + }*/ + + tempProduct = ((int64_t)X.full) * ((int64_t)Y.full); /*Q(16,16)*Q(16,16) = Q(32, 32) - Might become a negative number! */ + tempProduct = tempProduct >> 16; /*Remove lagging 16 bits - Will lose some precision from decimal; */ + Product.full = (int)tempProduct; /*The int64_t will lose the leading 16 bits that were part of the integer portion */ + + return Product; +} + +fInt fDivide (fInt X, fInt Y) +{ + fInt fZERO, fQuotient; + int64_t longlongX, longlongY; + + fZERO = ConvertToFraction(0); + + if (Equal(Y, fZERO)) + return fZERO; + + longlongX = (int64_t)X.full; + longlongY = (int64_t)Y.full; + + longlongX = longlongX << 16; /*Q(16,16) -> Q(32,32) */ + + div64_s64(longlongX, longlongY); /*Q(32,32) divided by Q(16,16) = Q(16,16) Back to original format */ + + fQuotient.full = (int)longlongX; + return fQuotient; +} + +int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ +{ + fInt fullNumber, scaledDecimal, scaledReal; + + scaledReal.full = GetReal(A) * uPow(10, PRECISION-1); /* DOUBLE CHECK THISSSS!!! */ + + scaledDecimal.full = uGetScaledDecimal(A); + + fullNumber = fAdd(scaledDecimal,scaledReal); + + return fullNumber.full; +} + +fInt fGetSquare(fInt A) +{ + return fMultiply(A,A); +} + +/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ +fInt fSqrt(fInt num) +{ + fInt F_divide_Fprime, Fprime; + fInt test; + fInt twoShifted; + int seed, counter, error; + fInt x_new, x_old, C, y; + + fInt fZERO = ConvertToFraction(0); + + /* (0 > num) is the same as (num < 0), i.e., num is negative */ + + if (GreaterThan(fZERO, num) || Equal(fZERO, num)) + return fZERO; + + C = num; + + if (num.partial.real > 3000) + seed = 60; + else if (num.partial.real > 1000) + seed = 30; + else if (num.partial.real > 100) + seed = 10; + else + seed = 2; + + counter = 0; + + if (Equal(num, fZERO)) /*Square Root of Zero is zero */ + return fZERO; + + twoShifted = ConvertToFraction(2); + x_new = ConvertToFraction(seed); + + do { + counter++; + + x_old.full = x_new.full; + + test = fGetSquare(x_old); /*1.75*1.75 is reverting back to 1 when shifted down */ + y = fSubtract(test, C); /*y = f(x) = x^2 - C; */ + + Fprime = fMultiply(twoShifted, x_old); + F_divide_Fprime = fDivide(y, Fprime); + + x_new = fSubtract(x_old, F_divide_Fprime); + + error = ConvertBackToInteger(x_new) - ConvertBackToInteger(x_old); + + if (counter > 20) /*20 is already way too many iterations. If we dont have an answer by then, we never will*/ + return x_new; + + } while (uAbs(error) > 0); + + return (x_new); +} + +void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) +{ + fInt *pRoots = &Roots[0]; + fInt temp, root_first, root_second; + fInt f_CONSTANT10, f_CONSTANT100; + + f_CONSTANT100 = ConvertToFraction(100); + f_CONSTANT10 = ConvertToFraction(10); + + while(GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) { + A = fDivide(A, f_CONSTANT10); + B = fDivide(B, f_CONSTANT10); + C = fDivide(C, f_CONSTANT10); + } + + temp = fMultiply(ConvertToFraction(4), A); /* root = 4*A */ + temp = fMultiply(temp, C); /* root = 4*A*C */ + temp = fSubtract(fGetSquare(B), temp); /* root = b^2 - 4AC */ + temp = fSqrt(temp); /*root = Sqrt (b^2 - 4AC); */ + + root_first = fSubtract(fNegate(B), temp); /* b - Sqrt(b^2 - 4AC) */ + root_second = fAdd(fNegate(B), temp); /* b + Sqrt(b^2 - 4AC) */ + + root_first = fDivide(root_first, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ + root_first = fDivide(root_first, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ + + root_second = fDivide(root_second, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ + root_second = fDivide(root_second, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ + + *(pRoots + 0) = root_first; + *(pRoots + 1) = root_second; +} + +/* ----------------------------------------------------------------------------- + * SUPPORT FUNCTIONS + * ----------------------------------------------------------------------------- + */ + +/* Addition using two normal ints - Temporary - Use only for testing purposes?. */ +fInt Add (int X, int Y) +{ + fInt A, B, Sum; + + A.full = (X << SHIFT_AMOUNT); + B.full = (Y << SHIFT_AMOUNT); + + Sum.full = A.full + B.full; + + return Sum; +} + +/* Conversion Functions */ +int GetReal (fInt A) +{ + return (A.full >> SHIFT_AMOUNT); +} + +/* Temporarily Disabled */ +int GetRoundedValue(fInt A) /*For now, round the 3rd decimal place */ +{ + /* ROUNDING TEMPORARLY DISABLED + int temp = A.full; + int decimal_cutoff, decimal_mask = 0x000001FF; + decimal_cutoff = temp & decimal_mask; + if (decimal_cutoff > 0x147) { + temp += 673; + }*/ + + return ConvertBackToInteger(A)/10000; /*Temporary - in case this was used somewhere else */ +} + +fInt Multiply (int X, int Y) +{ + fInt A, B, Product; + + A.full = X << SHIFT_AMOUNT; + B.full = Y << SHIFT_AMOUNT; + + Product = fMultiply(A, B); + + return Product; +} + +fInt Divide (int X, int Y) +{ + fInt A, B, Quotient; + + A.full = X << SHIFT_AMOUNT; + B.full = Y << SHIFT_AMOUNT; + + Quotient = fDivide(A, B); + + return Quotient; +} + +int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ +{ + int dec[PRECISION]; + int i, scaledDecimal = 0, tmp = A.partial.decimal; + + for (i = 0; i < PRECISION; i++) { + dec[i] = tmp / (1 << SHIFT_AMOUNT); + tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]); + tmp *= 10; + scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 -i); + } + + return scaledDecimal; +} + +int uPow(int base, int power) +{ + if (power == 0) + return 1; + else + return (base)*uPow(base, power - 1); +} + +fInt fAbs(fInt A) +{ + if (A.partial.real < 0) + return (fMultiply(A, ConvertToFraction(-1))); + else + return A; +} + +int uAbs(int X) +{ + if (X < 0) + return (X * -1); + else + return X; +} + +fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) +{ + fInt solution; + + solution = fDivide(A, fStepSize); + solution.partial.decimal = 0; /*All fractional digits changes to 0 */ + + if (error_term) + solution.partial.real += 1; /*Error term of 1 added */ + + solution = fMultiply(solution, fStepSize); + solution = fAdd(solution, fStepSize); + + return solution; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c new file mode 100644 index 000000000000..186496a34cbe --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c @@ -0,0 +1,64 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "atom-types.h" +#include "atombios.h" +#include "pppcielanes.h" + +/** \file + * Functions related to PCIe lane changes. + */ + +/* For converting from number of lanes to lane bits. */ +static const unsigned char pp_r600_encode_lanes[] = { + 0, /* 0 Not Supported */ + 1, /* 1 Lane */ + 2, /* 2 Lanes */ + 0, /* 3 Not Supported */ + 3, /* 4 Lanes */ + 0, /* 5 Not Supported */ + 0, /* 6 Not Supported */ + 0, /* 7 Not Supported */ + 4, /* 8 Lanes */ + 0, /* 9 Not Supported */ + 0, /* 10 Not Supported */ + 0, /* 11 Not Supported */ + 5, /* 12 Lanes (Not actually supported) */ + 0, /* 13 Not Supported */ + 0, /* 14 Not Supported */ + 0, /* 15 Not Supported */ + 6 /* 16 Lanes */ +}; + +static const unsigned char pp_r600_decoded_lanes[8] = { 16, 1, 2, 4, 8, 12, 16, }; + +uint8_t encode_pcie_lane_width(uint32_t num_lanes) +{ + return pp_r600_encode_lanes[num_lanes]; +} + +uint8_t decode_pcie_lane_width(uint32_t num_lanes) +{ + return pp_r600_decoded_lanes[num_lanes]; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h new file mode 100644 index 000000000000..70b163b35570 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h @@ -0,0 +1,31 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef PP_PCIELANES_H +#define PP_PCIELANES_H + +extern uint8_t encode_pcie_lane_width(uint32_t num_lanes); +extern uint8_t decode_pcie_lane_width(uint32_t num_lanes); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c new file mode 100644 index 000000000000..2f1a14fe05b1 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -0,0 +1,1688 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include + +#include "processpptables.h" +#include +#include +#include "pp_debug.h" +#include "pptable.h" +#include "power_state.h" +#include "hwmgr.h" +#include "hardwaremanager.h" + + +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 + +#define NUM_BITS_CLOCK_INFO_ARRAY_INDEX 6 + +static uint16_t get_vce_table_offset(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t vce_table_offset = 0; + + if (le16_to_cpu(powerplay_table->usTableSize) >= + sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) { + const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 = + (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table; + + if (powerplay_table3->usExtendendedHeaderOffset > 0) { + const ATOM_PPLIB_EXTENDEDHEADER *extended_header = + (const ATOM_PPLIB_EXTENDEDHEADER *) + (((unsigned long)powerplay_table3) + + le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset)); + if (le16_to_cpu(extended_header->usSize) >= + SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) + vce_table_offset = le16_to_cpu(extended_header->usVCETableOffset); + } + } + + return vce_table_offset; +} + +static uint16_t get_vce_clock_info_array_offset(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_vce_table_offset(hwmgr, + powerplay_table); + + if (table_offset > 0) + return table_offset + 1; + + return 0; +} + +static uint16_t get_vce_clock_info_array_size(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_vce_clock_info_array_offset(hwmgr, + powerplay_table); + uint16_t table_size = 0; + + if (table_offset > 0) { + const VCEClockInfoArray *p = (const VCEClockInfoArray *) + (((unsigned long) powerplay_table) + table_offset); + table_size = sizeof(uint8_t) + p->ucNumEntries * sizeof(VCEClockInfo); + } + + return table_size; +} + +static uint16_t get_vce_clock_voltage_limit_table_offset(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_vce_clock_info_array_offset(hwmgr, + powerplay_table); + + if (table_offset > 0) + return table_offset + get_vce_clock_info_array_size(hwmgr, + powerplay_table); + + return 0; +} + +static uint16_t get_vce_clock_voltage_limit_table_size(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_vce_clock_voltage_limit_table_offset(hwmgr, powerplay_table); + uint16_t table_size = 0; + + if (table_offset > 0) { + const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *ptable = + (const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)(((unsigned long) powerplay_table) + table_offset); + + table_size = sizeof(uint8_t) + ptable->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record); + } + return table_size; +} + +static uint16_t get_vce_state_table_offset(struct pp_hwmgr *hwmgr, const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_vce_clock_voltage_limit_table_offset(hwmgr, powerplay_table); + + if (table_offset > 0) + return table_offset + get_vce_clock_voltage_limit_table_size(hwmgr, powerplay_table); + + return 0; +} + +static const ATOM_PPLIB_VCE_State_Table *get_vce_state_table( + struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_vce_state_table_offset(hwmgr, powerplay_table); + + if (table_offset > 0) + return (const ATOM_PPLIB_VCE_State_Table *)(((unsigned long) powerplay_table) + table_offset); + + return NULL; +} + +static uint16_t get_uvd_table_offset(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t uvd_table_offset = 0; + + if (le16_to_cpu(powerplay_table->usTableSize) >= + sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) { + const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 = + (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table; + if (powerplay_table3->usExtendendedHeaderOffset > 0) { + const ATOM_PPLIB_EXTENDEDHEADER *extended_header = + (const ATOM_PPLIB_EXTENDEDHEADER *) + (((unsigned long)powerplay_table3) + + le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset)); + if (le16_to_cpu(extended_header->usSize) >= + SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) + uvd_table_offset = le16_to_cpu(extended_header->usUVDTableOffset); + } + } + return uvd_table_offset; +} + +static uint16_t get_uvd_clock_info_array_offset(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_uvd_table_offset(hwmgr, + powerplay_table); + + if (table_offset > 0) + return table_offset + 1; + return 0; +} + +static uint16_t get_uvd_clock_info_array_size(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_uvd_clock_info_array_offset(hwmgr, + powerplay_table); + uint16_t table_size = 0; + + if (table_offset > 0) { + const UVDClockInfoArray *p = (const UVDClockInfoArray *) + (((unsigned long) powerplay_table) + + table_offset); + table_size = sizeof(UCHAR) + + p->ucNumEntries * sizeof(UVDClockInfo); + } + + return table_size; +} + +static uint16_t get_uvd_clock_voltage_limit_table_offset( + struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_uvd_clock_info_array_offset(hwmgr, + powerplay_table); + + if (table_offset > 0) + return table_offset + + get_uvd_clock_info_array_size(hwmgr, powerplay_table); + + return 0; +} + +static uint16_t get_samu_table_offset(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t samu_table_offset = 0; + + if (le16_to_cpu(powerplay_table->usTableSize) >= + sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) { + const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 = + (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table; + if (powerplay_table3->usExtendendedHeaderOffset > 0) { + const ATOM_PPLIB_EXTENDEDHEADER *extended_header = + (const ATOM_PPLIB_EXTENDEDHEADER *) + (((unsigned long)powerplay_table3) + + le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset)); + if (le16_to_cpu(extended_header->usSize) >= + SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) + samu_table_offset = le16_to_cpu(extended_header->usSAMUTableOffset); + } + } + + return samu_table_offset; +} + +static uint16_t get_samu_clock_voltage_limit_table_offset( + struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t table_offset = get_samu_table_offset(hwmgr, + powerplay_table); + + if (table_offset > 0) + return table_offset + 1; + + return 0; +} + +static uint16_t get_acp_table_offset(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t acp_table_offset = 0; + + if (le16_to_cpu(powerplay_table->usTableSize) >= + sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) { + const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 = + (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table; + if (powerplay_table3->usExtendendedHeaderOffset > 0) { + const ATOM_PPLIB_EXTENDEDHEADER *pExtendedHeader = + (const ATOM_PPLIB_EXTENDEDHEADER *) + (((unsigned long)powerplay_table3) + + le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset)); + if (le16_to_cpu(pExtendedHeader->usSize) >= + SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) + acp_table_offset = le16_to_cpu(pExtendedHeader->usACPTableOffset); + } + } + + return acp_table_offset; +} + +static uint16_t get_acp_clock_voltage_limit_table_offset( + struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t tableOffset = get_acp_table_offset(hwmgr, powerplay_table); + + if (tableOffset > 0) + return tableOffset + 1; + + return 0; +} + +static uint16_t get_cacp_tdp_table_offset( + struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t cacTdpTableOffset = 0; + + if (le16_to_cpu(powerplay_table->usTableSize) >= + sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) { + const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 = + (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table; + if (powerplay_table3->usExtendendedHeaderOffset > 0) { + const ATOM_PPLIB_EXTENDEDHEADER *pExtendedHeader = + (const ATOM_PPLIB_EXTENDEDHEADER *) + (((unsigned long)powerplay_table3) + + le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset)); + if (le16_to_cpu(pExtendedHeader->usSize) >= + SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) + cacTdpTableOffset = le16_to_cpu(pExtendedHeader->usPowerTuneTableOffset); + } + } + + return cacTdpTableOffset; +} + +static int get_cac_tdp_table(struct pp_hwmgr *hwmgr, + struct phm_cac_tdp_table **ptable, + const ATOM_PowerTune_Table *table, + uint16_t us_maximum_power_delivery_limit) +{ + unsigned long table_size; + struct phm_cac_tdp_table *tdp_table; + + table_size = sizeof(unsigned long) + sizeof(struct phm_cac_tdp_table); + + tdp_table = kzalloc(table_size, GFP_KERNEL); + if (NULL == tdp_table) + return -ENOMEM; + + tdp_table->usTDP = le16_to_cpu(table->usTDP); + tdp_table->usConfigurableTDP = le16_to_cpu(table->usConfigurableTDP); + tdp_table->usTDC = le16_to_cpu(table->usTDC); + tdp_table->usBatteryPowerLimit = le16_to_cpu(table->usBatteryPowerLimit); + tdp_table->usSmallPowerLimit = le16_to_cpu(table->usSmallPowerLimit); + tdp_table->usLowCACLeakage = le16_to_cpu(table->usLowCACLeakage); + tdp_table->usHighCACLeakage = le16_to_cpu(table->usHighCACLeakage); + tdp_table->usMaximumPowerDeliveryLimit = us_maximum_power_delivery_limit; + + *ptable = tdp_table; + + return 0; +} + +static uint16_t get_sclk_vdd_gfx_table_offset(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t sclk_vdd_gfx_table_offset = 0; + + if (le16_to_cpu(powerplay_table->usTableSize) >= + sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) { + const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 = + (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table; + if (powerplay_table3->usExtendendedHeaderOffset > 0) { + const ATOM_PPLIB_EXTENDEDHEADER *pExtendedHeader = + (const ATOM_PPLIB_EXTENDEDHEADER *) + (((unsigned long)powerplay_table3) + + le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset)); + if (le16_to_cpu(pExtendedHeader->usSize) >= + SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) + sclk_vdd_gfx_table_offset = + le16_to_cpu(pExtendedHeader->usSclkVddgfxTableOffset); + } + } + + return sclk_vdd_gfx_table_offset; +} + +static uint16_t get_sclk_vdd_gfx_clock_voltage_dependency_table_offset( + struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + uint16_t tableOffset = get_sclk_vdd_gfx_table_offset(hwmgr, powerplay_table); + + if (tableOffset > 0) + return tableOffset; + + return 0; +} + + +static int get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table **ptable, + const ATOM_PPLIB_Clock_Voltage_Dependency_Table *table) +{ + + unsigned long table_size, i; + struct phm_clock_voltage_dependency_table *dep_table; + + table_size = sizeof(unsigned long) + + sizeof(struct phm_clock_voltage_dependency_table) + * table->ucNumEntries; + + dep_table = kzalloc(table_size, GFP_KERNEL); + if (NULL == dep_table) + return -ENOMEM; + + dep_table->count = (unsigned long)table->ucNumEntries; + + for (i = 0; i < dep_table->count; i++) { + dep_table->entries[i].clk = + ((unsigned long)table->entries[i].ucClockHigh << 16) | + le16_to_cpu(table->entries[i].usClockLow); + dep_table->entries[i].v = + (unsigned long)le16_to_cpu(table->entries[i].usVoltage); + } + + *ptable = dep_table; + + return 0; +} + +static int get_valid_clk(struct pp_hwmgr *hwmgr, + struct phm_clock_array **ptable, + const struct phm_clock_voltage_dependency_table *table) +{ + unsigned long table_size, i; + struct phm_clock_array *clock_table; + + table_size = sizeof(unsigned long) + sizeof(unsigned long) * table->count; + clock_table = kzalloc(table_size, GFP_KERNEL); + if (NULL == clock_table) + return -ENOMEM; + + clock_table->count = (unsigned long)table->count; + + for (i = 0; i < clock_table->count; i++) + clock_table->values[i] = (unsigned long)table->entries[i].clk; + + *ptable = clock_table; + + return 0; +} + +static int get_clock_voltage_limit(struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *limits, + const ATOM_PPLIB_Clock_Voltage_Limit_Table *table) +{ + limits->sclk = ((unsigned long)table->entries[0].ucSclkHigh << 16) | + le16_to_cpu(table->entries[0].usSclkLow); + limits->mclk = ((unsigned long)table->entries[0].ucMclkHigh << 16) | + le16_to_cpu(table->entries[0].usMclkLow); + limits->vddc = (unsigned long)le16_to_cpu(table->entries[0].usVddc); + limits->vddci = (unsigned long)le16_to_cpu(table->entries[0].usVddci); + + return 0; +} + + +static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, + enum phm_platform_caps cap) +{ + if (enable) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap); +} + +static int set_platform_caps(struct pp_hwmgr *hwmgr, + unsigned long powerplay_caps) +{ + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_POWERPLAY), + PHM_PlatformCaps_PowerPlaySupport + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE), + PHM_PlatformCaps_BiosPowerSourceControl + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s), + PHM_PlatformCaps_EnableASPML0s + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1), + PHM_PlatformCaps_EnableASPML1 + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS), + PHM_PlatformCaps_EnableBackbias + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC), + PHM_PlatformCaps_AutomaticDCTransition + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY), + PHM_PlatformCaps_GeminiPrimary + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC), + PHM_PlatformCaps_StepVddc + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL), + PHM_PlatformCaps_EnableVoltageControl + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL), + PHM_PlatformCaps_EnableSideportControl + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1), + PHM_PlatformCaps_TurnOffPll_ASPML1 + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_HTLINKCONTROL), + PHM_PlatformCaps_EnableHTLinkControl + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL), + PHM_PlatformCaps_EnableMVDDControl + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL), + PHM_PlatformCaps_ControlVDDCI + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT), + PHM_PlatformCaps_RegulatorHot + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT), + PHM_PlatformCaps_BootStateOnAlert + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT), + PHM_PlatformCaps_DontWaitForVBlankOnAlert + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_BACO), + PHM_PlatformCaps_BACO + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE), + PHM_PlatformCaps_NewCACVoltage + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY), + PHM_PlatformCaps_RevertGPIO5Polarity + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17), + PHM_PlatformCaps_Thermal2GPIO17 + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE), + PHM_PlatformCaps_VRHotGPIOConfigurable + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_TEMP_INVERSION), + PHM_PlatformCaps_TempInversion + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_EVV), + PHM_PlatformCaps_EVV + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL), + PHM_PlatformCaps_CombinePCCWithThermalSignal + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE), + PHM_PlatformCaps_LoadPostProductionFirmware + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_PP_PLATFORM_CAP_DISABLE_USING_ACTUAL_TEMPERATURE_FOR_POWER_CALC), + PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc + ); + + return 0; +} + +static PP_StateClassificationFlags make_classification_flags( + struct pp_hwmgr *hwmgr, + USHORT classification, + USHORT classification2) +{ + PP_StateClassificationFlags result = 0; + + if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT) + result |= PP_StateClassificationFlag_Boot; + + if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL) + result |= PP_StateClassificationFlag_Thermal; + + if (classification & + ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) + result |= PP_StateClassificationFlag_LimitedPowerSource; + + if (classification & ATOM_PPLIB_CLASSIFICATION_REST) + result |= PP_StateClassificationFlag_Rest; + + if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED) + result |= PP_StateClassificationFlag_Forced; + + if (classification & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) + result |= PP_StateClassificationFlag_3DPerformance; + + + if (classification & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) + result |= PP_StateClassificationFlag_ACOverdriveTemplate; + + if (classification & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + result |= PP_StateClassificationFlag_Uvd; + + if (classification & ATOM_PPLIB_CLASSIFICATION_HDSTATE) + result |= PP_StateClassificationFlag_UvdHD; + + if (classification & ATOM_PPLIB_CLASSIFICATION_SDSTATE) + result |= PP_StateClassificationFlag_UvdSD; + + if (classification & ATOM_PPLIB_CLASSIFICATION_HD2STATE) + result |= PP_StateClassificationFlag_HD2; + + if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI) + result |= PP_StateClassificationFlag_ACPI; + + if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) + result |= PP_StateClassificationFlag_LimitedPowerSource_2; + + + if (classification2 & ATOM_PPLIB_CLASSIFICATION2_ULV) + result |= PP_StateClassificationFlag_ULV; + + if (classification2 & ATOM_PPLIB_CLASSIFICATION2_MVC) + result |= PP_StateClassificationFlag_UvdMVC; + + return result; +} + +static int init_non_clock_fields(struct pp_hwmgr *hwmgr, + struct pp_power_state *ps, + uint8_t version, + const ATOM_PPLIB_NONCLOCK_INFO *pnon_clock_info) { + unsigned long rrr_index; + unsigned long tmp; + + ps->classification.ui_label = (le16_to_cpu(pnon_clock_info->usClassification) & + ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; + ps->classification.flags = make_classification_flags(hwmgr, + le16_to_cpu(pnon_clock_info->usClassification), + le16_to_cpu(pnon_clock_info->usClassification2)); + + ps->classification.temporary_state = false; + ps->classification.to_be_deleted = false; + tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & + ATOM_PPLIB_SINGLE_DISPLAY_ONLY; + + ps->validation.singleDisplayOnly = (0 != tmp); + + tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & + ATOM_PPLIB_DISALLOW_ON_DC; + + ps->validation.disallowOnDC = (0 != tmp); + + ps->pcie.lanes = ((le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & + ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> + ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + + ps->pcie.lanes = 0; + + ps->display.disableFrameModulation = false; + + rrr_index = (le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & + ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK) >> + ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT; + + if (rrr_index != ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED) { + static const uint8_t look_up[(ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK >> ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT) + 1] = \ + { 0, 50, 0 }; + + ps->display.refreshrateSource = PP_RefreshrateSource_Explicit; + ps->display.explicitRefreshrate = look_up[rrr_index]; + ps->display.limitRefreshrate = true; + + if (ps->display.explicitRefreshrate == 0) + ps->display.limitRefreshrate = false; + } else + ps->display.limitRefreshrate = false; + + tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & + ATOM_PPLIB_ENABLE_VARIBRIGHT; + + ps->display.enableVariBright = (0 != tmp); + + tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & + ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF; + + ps->memory.dllOff = (0 != tmp); + + ps->memory.m3arb = (le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & + ATOM_PPLIB_M3ARB_MASK) >> ATOM_PPLIB_M3ARB_SHIFT; + + ps->temperatures.min = PP_TEMPERATURE_UNITS_PER_CENTIGRADES * + pnon_clock_info->ucMinTemperature; + + ps->temperatures.max = PP_TEMPERATURE_UNITS_PER_CENTIGRADES * + pnon_clock_info->ucMaxTemperature; + + tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & + ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING; + + ps->software.disableLoadBalancing = tmp; + + tmp = le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & + ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS; + + ps->software.enableSleepForTimestamps = (0 != tmp); + + ps->validation.supportedPowerLevels = pnon_clock_info->ucRequiredPower; + + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < version) { + ps->uvd_clocks.VCLK = pnon_clock_info->ulVCLK; + ps->uvd_clocks.DCLK = pnon_clock_info->ulDCLK; + } else { + ps->uvd_clocks.VCLK = 0; + ps->uvd_clocks.DCLK = 0; + } + + return 0; +} + +static ULONG size_of_entry_v2(ULONG num_dpm_levels) +{ + return (sizeof(UCHAR) + sizeof(UCHAR) + + (num_dpm_levels * sizeof(UCHAR))); +} + +static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2( + const StateArray * pstate_arrays, + ULONG entry_index) +{ + ULONG i; + const ATOM_PPLIB_STATE_V2 *pstate; + + pstate = pstate_arrays->states; + if (entry_index <= pstate_arrays->ucNumEntries) { + for (i = 0; i < entry_index; i++) + pstate = (ATOM_PPLIB_STATE_V2 *)( + (unsigned long)pstate + + size_of_entry_v2(pstate->ucNumDPMLevels)); + } + return pstate; +} + + +static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( + struct pp_hwmgr *hwmgr) +{ + const void *table_addr = NULL; + uint8_t frev, crev; + uint16_t size; + + table_addr = cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, PowerPlayInfo), + &size, &frev, &crev); + + hwmgr->soft_pp_table = table_addr; + + return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; +} + + +int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, + unsigned long *num_of_entries) +{ + const StateArray *pstate_arrays; + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table = get_powerplay_table(hwmgr); + + if (powerplay_table == NULL) + return -1; + + if (powerplay_table->sHeader.ucTableFormatRevision >= 6) { + pstate_arrays = (StateArray *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usStateArrayOffset)); + + *num_of_entries = (unsigned long)(pstate_arrays->ucNumEntries); + } else + *num_of_entries = (unsigned long)(powerplay_table->ucNumStates); + + return 0; +} + +int pp_tables_get_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, + struct pp_power_state *ps, + pp_tables_hw_clock_info_callback func) +{ + int i; + const StateArray *pstate_arrays; + const ATOM_PPLIB_STATE_V2 *pstate_entry_v2; + const ATOM_PPLIB_NONCLOCK_INFO *pnon_clock_info; + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table = get_powerplay_table(hwmgr); + int result = 0; + int res = 0; + + const ClockInfoArray *pclock_arrays; + + const NonClockInfoArray *pnon_clock_arrays; + + const ATOM_PPLIB_STATE *pstate_entry; + + if (powerplay_table == NULL) + return -1; + + ps->classification.bios_index = entry_index; + + if (powerplay_table->sHeader.ucTableFormatRevision >= 6) { + pstate_arrays = (StateArray *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usStateArrayOffset)); + + if (entry_index > pstate_arrays->ucNumEntries) + return -1; + + pstate_entry_v2 = get_state_entry_v2(pstate_arrays, entry_index); + pclock_arrays = (ClockInfoArray *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usClockInfoArrayOffset)); + + pnon_clock_arrays = (NonClockInfoArray *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usNonClockInfoArrayOffset)); + + pnon_clock_info = (ATOM_PPLIB_NONCLOCK_INFO *)((unsigned long)(pnon_clock_arrays->nonClockInfo) + + (pstate_entry_v2->nonClockInfoIndex * pnon_clock_arrays->ucEntrySize)); + + result = init_non_clock_fields(hwmgr, ps, pnon_clock_arrays->ucEntrySize, pnon_clock_info); + + for (i = 0; i < pstate_entry_v2->ucNumDPMLevels; i++) { + const void *pclock_info = (const void *)( + (unsigned long)(pclock_arrays->clockInfo) + + (pstate_entry_v2->clockInfoIndex[i] * pclock_arrays->ucEntrySize)); + res = func(hwmgr, &ps->hardware, i, pclock_info); + if ((0 == result) && (0 != res)) + result = res; + } + } else { + if (entry_index > powerplay_table->ucNumStates) + return -1; + + pstate_entry = (ATOM_PPLIB_STATE *)((unsigned long)powerplay_table + powerplay_table->usStateArrayOffset + + entry_index * powerplay_table->ucStateEntrySize); + + pnon_clock_info = (ATOM_PPLIB_NONCLOCK_INFO *)((unsigned long)powerplay_table + + le16_to_cpu(powerplay_table->usNonClockInfoArrayOffset) + + pstate_entry->ucNonClockStateIndex * + powerplay_table->ucNonClockSize); + + result = init_non_clock_fields(hwmgr, ps, + powerplay_table->ucNonClockSize, + pnon_clock_info); + + for (i = 0; i < powerplay_table->ucStateEntrySize-1; i++) { + const void *pclock_info = (const void *)((unsigned long)powerplay_table + + le16_to_cpu(powerplay_table->usClockInfoArrayOffset) + + pstate_entry->ucClockStateIndices[i] * + powerplay_table->ucClockInfoSize); + + int res = func(hwmgr, &ps->hardware, i, pclock_info); + + if ((0 == result) && (0 != res)) + result = res; + } + } + + if ((0 == result) && + (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot))) + result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware)); + + return result; +} + + + +static int init_powerplay_tables( + struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table +) +{ + return 0; +} + + +static int init_thermal_controller( + struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + return 0; +} + +static int init_overdrive_limits_V1_4(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table, + const ATOM_FIRMWARE_INFO_V1_4 *fw_info) +{ + hwmgr->platform_descriptor.overdriveLimit.engineClock = + le32_to_cpu(fw_info->ulASICMaxEngineClock); + + hwmgr->platform_descriptor.overdriveLimit.memoryClock = + le32_to_cpu(fw_info->ulASICMaxMemoryClock); + + hwmgr->platform_descriptor.maxOverdriveVDDC = + le32_to_cpu(fw_info->ul3DAccelerationEngineClock) & 0x7FF; + + hwmgr->platform_descriptor.minOverdriveVDDC = + le16_to_cpu(fw_info->usBootUpVDDCVoltage); + + hwmgr->platform_descriptor.maxOverdriveVDDC = + le16_to_cpu(fw_info->usBootUpVDDCVoltage); + + hwmgr->platform_descriptor.overdriveVDDCStep = 0; + return 0; +} + +static int init_overdrive_limits_V2_1(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table, + const ATOM_FIRMWARE_INFO_V2_1 *fw_info) +{ + const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3; + const ATOM_PPLIB_EXTENDEDHEADER *header; + + if (le16_to_cpu(powerplay_table->usTableSize) < + sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) + return 0; + + powerplay_table3 = (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table; + + if (0 == powerplay_table3->usExtendendedHeaderOffset) + return 0; + + header = (ATOM_PPLIB_EXTENDEDHEADER *)(((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table3->usExtendendedHeaderOffset)); + + hwmgr->platform_descriptor.overdriveLimit.engineClock = le32_to_cpu(header->ulMaxEngineClock); + hwmgr->platform_descriptor.overdriveLimit.memoryClock = le32_to_cpu(header->ulMaxMemoryClock); + + + hwmgr->platform_descriptor.minOverdriveVDDC = 0; + hwmgr->platform_descriptor.maxOverdriveVDDC = 0; + hwmgr->platform_descriptor.overdriveVDDCStep = 0; + + return 0; +} + +static int init_overdrive_limits(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + int result; + uint8_t frev, crev; + uint16_t size; + + const ATOM_COMMON_TABLE_HEADER *fw_info = NULL; + + hwmgr->platform_descriptor.overdriveLimit.engineClock = 0; + hwmgr->platform_descriptor.overdriveLimit.memoryClock = 0; + hwmgr->platform_descriptor.minOverdriveVDDC = 0; + hwmgr->platform_descriptor.maxOverdriveVDDC = 0; + + /* We assume here that fw_info is unchanged if this call fails.*/ + fw_info = cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, FirmwareInfo), + &size, &frev, &crev); + + if ((fw_info->ucTableFormatRevision == 1) + && (fw_info->usStructureSize >= sizeof(ATOM_FIRMWARE_INFO_V1_4))) + result = init_overdrive_limits_V1_4(hwmgr, + powerplay_table, + (const ATOM_FIRMWARE_INFO_V1_4 *)fw_info); + + else if ((fw_info->ucTableFormatRevision == 2) + && (fw_info->usStructureSize >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) + result = init_overdrive_limits_V2_1(hwmgr, + powerplay_table, + (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info); + + if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 + && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0 + && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OverdriveDisabledByPowerBudget)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ACOverdriveSupport); + + return result; +} + +static int get_uvd_clock_voltage_limit_table(struct pp_hwmgr *hwmgr, + struct phm_uvd_clock_voltage_dependency_table **ptable, + const ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *table, + const UVDClockInfoArray *array) +{ + unsigned long table_size, i; + struct phm_uvd_clock_voltage_dependency_table *uvd_table; + + table_size = sizeof(unsigned long) + + sizeof(struct phm_uvd_clock_voltage_dependency_table) * + table->numEntries; + + uvd_table = kzalloc(table_size, GFP_KERNEL); + if (NULL == uvd_table) + return -ENOMEM; + + uvd_table->count = table->numEntries; + + for (i = 0; i < table->numEntries; i++) { + const UVDClockInfo *entry = + &array->entries[table->entries[i].ucUVDClockInfoIndex]; + uvd_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); + uvd_table->entries[i].vclk = ((unsigned long)entry->ucVClkHigh << 16) + | le16_to_cpu(entry->usVClkLow); + uvd_table->entries[i].dclk = ((unsigned long)entry->ucDClkHigh << 16) + | le16_to_cpu(entry->usDClkLow); + } + + *ptable = uvd_table; + + return 0; +} + +static int get_vce_clock_voltage_limit_table(struct pp_hwmgr *hwmgr, + struct phm_vce_clock_voltage_dependency_table **ptable, + const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *table, + const VCEClockInfoArray *array) +{ + unsigned long table_size, i; + struct phm_vce_clock_voltage_dependency_table *vce_table = NULL; + + table_size = sizeof(unsigned long) + + sizeof(struct phm_vce_clock_voltage_dependency_table) + * table->numEntries; + + vce_table = kzalloc(table_size, GFP_KERNEL); + if (NULL == vce_table) + return -ENOMEM; + + vce_table->count = table->numEntries; + for (i = 0; i < table->numEntries; i++) { + const VCEClockInfo *entry = &array->entries[table->entries[i].ucVCEClockInfoIndex]; + + vce_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); + vce_table->entries[i].evclk = ((unsigned long)entry->ucEVClkHigh << 16) + | le16_to_cpu(entry->usEVClkLow); + vce_table->entries[i].ecclk = ((unsigned long)entry->ucECClkHigh << 16) + | le16_to_cpu(entry->usECClkLow); + } + + *ptable = vce_table; + + return 0; +} + +static int get_samu_clock_voltage_limit_table(struct pp_hwmgr *hwmgr, + struct phm_samu_clock_voltage_dependency_table **ptable, + const ATOM_PPLIB_SAMClk_Voltage_Limit_Table *table) +{ + unsigned long table_size, i; + struct phm_samu_clock_voltage_dependency_table *samu_table; + + table_size = sizeof(unsigned long) + + sizeof(struct phm_samu_clock_voltage_dependency_table) * + table->numEntries; + + samu_table = kzalloc(table_size, GFP_KERNEL); + if (NULL == samu_table) + return -ENOMEM; + + samu_table->count = table->numEntries; + + for (i = 0; i < table->numEntries; i++) { + samu_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); + samu_table->entries[i].samclk = ((unsigned long)table->entries[i].ucSAMClockHigh << 16) + | le16_to_cpu(table->entries[i].usSAMClockLow); + } + + *ptable = samu_table; + + return 0; +} + +static int get_acp_clock_voltage_limit_table(struct pp_hwmgr *hwmgr, + struct phm_acp_clock_voltage_dependency_table **ptable, + const ATOM_PPLIB_ACPClk_Voltage_Limit_Table *table) +{ + unsigned table_size, i; + struct phm_acp_clock_voltage_dependency_table *acp_table; + + table_size = sizeof(unsigned long) + + sizeof(struct phm_acp_clock_voltage_dependency_table) * + table->numEntries; + + acp_table = kzalloc(table_size, GFP_KERNEL); + if (NULL == acp_table) + return -ENOMEM; + + acp_table->count = (unsigned long)table->numEntries; + + for (i = 0; i < table->numEntries; i++) { + acp_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); + acp_table->entries[i].acpclk = ((unsigned long)table->entries[i].ucACPClockHigh << 16) + | le16_to_cpu(table->entries[i].usACPClockLow); + } + + *ptable = acp_table; + + return 0; +} + +static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + ATOM_PPLIB_Clock_Voltage_Dependency_Table *table; + ATOM_PPLIB_Clock_Voltage_Limit_Table *limit_table; + int result = 0; + + uint16_t vce_clock_info_array_offset; + uint16_t uvd_clock_info_array_offset; + uint16_t table_offset; + + hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; + hwmgr->dyn_state.vddci_dependency_on_mclk = NULL; + hwmgr->dyn_state.vddc_dependency_on_mclk = NULL; + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; + hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL; + hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL; + hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL; + hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL; + hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL; + hwmgr->dyn_state.ppm_parameter_table = NULL; + hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL; + + vce_clock_info_array_offset = get_vce_clock_info_array_offset( + hwmgr, powerplay_table); + table_offset = get_vce_clock_voltage_limit_table_offset(hwmgr, + powerplay_table); + if (vce_clock_info_array_offset > 0 && table_offset > 0) { + const VCEClockInfoArray *array = (const VCEClockInfoArray *) + (((unsigned long) powerplay_table) + + vce_clock_info_array_offset); + const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *table = + (const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) + (((unsigned long) powerplay_table) + table_offset); + result = get_vce_clock_voltage_limit_table(hwmgr, + &hwmgr->dyn_state.vce_clock_voltage_dependency_table, + table, array); + } + + uvd_clock_info_array_offset = get_uvd_clock_info_array_offset(hwmgr, powerplay_table); + table_offset = get_uvd_clock_voltage_limit_table_offset(hwmgr, powerplay_table); + + if (uvd_clock_info_array_offset > 0 && table_offset > 0) { + const UVDClockInfoArray *array = (const UVDClockInfoArray *) + (((unsigned long) powerplay_table) + + uvd_clock_info_array_offset); + const ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *ptable = + (const ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) + (((unsigned long) powerplay_table) + table_offset); + result = get_uvd_clock_voltage_limit_table(hwmgr, + &hwmgr->dyn_state.uvd_clock_voltage_dependency_table, ptable, array); + } + + table_offset = get_samu_clock_voltage_limit_table_offset(hwmgr, + powerplay_table); + + if (table_offset > 0) { + const ATOM_PPLIB_SAMClk_Voltage_Limit_Table *ptable = + (const ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) + (((unsigned long) powerplay_table) + table_offset); + result = get_samu_clock_voltage_limit_table(hwmgr, + &hwmgr->dyn_state.samu_clock_voltage_dependency_table, ptable); + } + + table_offset = get_acp_clock_voltage_limit_table_offset(hwmgr, + powerplay_table); + + if (table_offset > 0) { + const ATOM_PPLIB_ACPClk_Voltage_Limit_Table *ptable = + (const ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) + (((unsigned long) powerplay_table) + table_offset); + result = get_acp_clock_voltage_limit_table(hwmgr, + &hwmgr->dyn_state.acp_clock_voltage_dependency_table, ptable); + } + + table_offset = get_cacp_tdp_table_offset(hwmgr, powerplay_table); + if (table_offset > 0) { + UCHAR rev_id = *(UCHAR *)(((unsigned long)powerplay_table) + table_offset); + + if (rev_id > 0) { + const ATOM_PPLIB_POWERTUNE_Table_V1 *tune_table = + (const ATOM_PPLIB_POWERTUNE_Table_V1 *) + (((unsigned long) powerplay_table) + table_offset); + result = get_cac_tdp_table(hwmgr, &hwmgr->dyn_state.cac_dtp_table, + &tune_table->power_tune_table, + le16_to_cpu(tune_table->usMaximumPowerDeliveryLimit)); + hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = + le16_to_cpu(tune_table->usTjMax); + } else { + const ATOM_PPLIB_POWERTUNE_Table *tune_table = + (const ATOM_PPLIB_POWERTUNE_Table *) + (((unsigned long) powerplay_table) + table_offset); + result = get_cac_tdp_table(hwmgr, + &hwmgr->dyn_state.cac_dtp_table, + &tune_table->power_tune_table, 255); + } + } + + if (le16_to_cpu(powerplay_table->usTableSize) >= + sizeof(ATOM_PPLIB_POWERPLAYTABLE4)) { + const ATOM_PPLIB_POWERPLAYTABLE4 *powerplay_table4 = + (const ATOM_PPLIB_POWERPLAYTABLE4 *)powerplay_table; + if (0 != powerplay_table4->usVddcDependencyOnSCLKOffset) { + table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (((unsigned long) powerplay_table4) + + powerplay_table4->usVddcDependencyOnSCLKOffset); + result = get_clock_voltage_dependency_table(hwmgr, + &hwmgr->dyn_state.vddc_dependency_on_sclk, table); + } + + if (result == 0 && (0 != powerplay_table4->usVddciDependencyOnMCLKOffset)) { + table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (((unsigned long) powerplay_table4) + + powerplay_table4->usVddciDependencyOnMCLKOffset); + result = get_clock_voltage_dependency_table(hwmgr, + &hwmgr->dyn_state.vddci_dependency_on_mclk, table); + } + + if (result == 0 && (0 != powerplay_table4->usVddcDependencyOnMCLKOffset)) { + table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (((unsigned long) powerplay_table4) + + powerplay_table4->usVddcDependencyOnMCLKOffset); + result = get_clock_voltage_dependency_table(hwmgr, + &hwmgr->dyn_state.vddc_dependency_on_mclk, table); + } + + if (result == 0 && (0 != powerplay_table4->usMaxClockVoltageOnDCOffset)) { + limit_table = (ATOM_PPLIB_Clock_Voltage_Limit_Table *) + (((unsigned long) powerplay_table4) + + powerplay_table4->usMaxClockVoltageOnDCOffset); + result = get_clock_voltage_limit(hwmgr, + &hwmgr->dyn_state.max_clock_voltage_on_dc, limit_table); + } + + if (result == 0 && (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) && + (0 != hwmgr->dyn_state.vddc_dependency_on_mclk->count)) + result = get_valid_clk(hwmgr, &hwmgr->dyn_state.valid_mclk_values, + hwmgr->dyn_state.vddc_dependency_on_mclk); + + if(result == 0 && (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) && + (0 != hwmgr->dyn_state.vddc_dependency_on_sclk->count)) + result = get_valid_clk(hwmgr, + &hwmgr->dyn_state.valid_sclk_values, + hwmgr->dyn_state.vddc_dependency_on_sclk); + + if (result == 0 && (0 != powerplay_table4->usMvddDependencyOnMCLKOffset)) { + table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (((unsigned long) powerplay_table4) + + powerplay_table4->usMvddDependencyOnMCLKOffset); + result = get_clock_voltage_dependency_table(hwmgr, + &hwmgr->dyn_state.mvdd_dependency_on_mclk, table); + } + } + + table_offset = get_sclk_vdd_gfx_clock_voltage_dependency_table_offset(hwmgr, + powerplay_table); + + if (table_offset > 0) { + table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (((unsigned long) powerplay_table) + table_offset); + result = get_clock_voltage_dependency_table(hwmgr, + &hwmgr->dyn_state.vdd_gfx_dependency_on_sclk, table); + } + + return result; +} + +static int get_cac_leakage_table(struct pp_hwmgr *hwmgr, + struct phm_cac_leakage_table **ptable, + const ATOM_PPLIB_CAC_Leakage_Table *table) +{ + struct phm_cac_leakage_table *cac_leakage_table; + unsigned long table_size, i; + + if (hwmgr == NULL || table == NULL || ptable == NULL) + return -EINVAL; + + table_size = sizeof(ULONG) + + (sizeof(struct phm_cac_leakage_table) * table->ucNumEntries); + + cac_leakage_table = kzalloc(table_size, GFP_KERNEL); + + if (cac_leakage_table == NULL) + return -ENOMEM; + + cac_leakage_table->count = (ULONG)table->ucNumEntries; + + for (i = 0; i < cac_leakage_table->count; i++) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV)) { + cac_leakage_table->entries[i].Vddc1 = le16_to_cpu(table->entries[i].usVddc1); + cac_leakage_table->entries[i].Vddc2 = le16_to_cpu(table->entries[i].usVddc2); + cac_leakage_table->entries[i].Vddc3 = le16_to_cpu(table->entries[i].usVddc3); + } else { + cac_leakage_table->entries[i].Vddc = le16_to_cpu(table->entries[i].usVddc); + cac_leakage_table->entries[i].Leakage = le32_to_cpu(table->entries[i].ulLeakageValue); + } + } + + *ptable = cac_leakage_table; + + return 0; +} + +static int get_platform_power_management_table(struct pp_hwmgr *hwmgr, + ATOM_PPLIB_PPM_Table *atom_ppm_table) +{ + struct phm_ppm_table *ptr = kzalloc(sizeof(struct phm_ppm_table), GFP_KERNEL); + + if (NULL == ptr) + return -ENOMEM; + + ptr->ppm_design = atom_ppm_table->ucPpmDesign; + ptr->cpu_core_number = le16_to_cpu(atom_ppm_table->usCpuCoreNumber); + ptr->platform_tdp = le32_to_cpu(atom_ppm_table->ulPlatformTDP); + ptr->small_ac_platform_tdp = le32_to_cpu(atom_ppm_table->ulSmallACPlatformTDP); + ptr->platform_tdc = le32_to_cpu(atom_ppm_table->ulPlatformTDC); + ptr->small_ac_platform_tdc = le32_to_cpu(atom_ppm_table->ulSmallACPlatformTDC); + ptr->apu_tdp = le32_to_cpu(atom_ppm_table->ulApuTDP); + ptr->dgpu_tdp = le32_to_cpu(atom_ppm_table->ulDGpuTDP); + ptr->dgpu_ulv_power = le32_to_cpu(atom_ppm_table->ulDGpuUlvPower); + ptr->tj_max = le32_to_cpu(atom_ppm_table->ulTjmax); + hwmgr->dyn_state.ppm_parameter_table = ptr; + + return 0; +} + +static int init_dpm2_parameters(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + int result = 0; + + if (le16_to_cpu(powerplay_table->usTableSize) >= + sizeof(ATOM_PPLIB_POWERPLAYTABLE5)) { + const ATOM_PPLIB_POWERPLAYTABLE5 *ptable5 = + (const ATOM_PPLIB_POWERPLAYTABLE5 *)powerplay_table; + const ATOM_PPLIB_POWERPLAYTABLE4 *ptable4 = + (const ATOM_PPLIB_POWERPLAYTABLE4 *) + (&ptable5->basicTable4); + const ATOM_PPLIB_POWERPLAYTABLE3 *ptable3 = + (const ATOM_PPLIB_POWERPLAYTABLE3 *) + (&ptable4->basicTable3); + const ATOM_PPLIB_EXTENDEDHEADER *extended_header; + uint16_t table_offset; + ATOM_PPLIB_PPM_Table *atom_ppm_table; + + hwmgr->platform_descriptor.TDPLimit = le32_to_cpu(ptable5->ulTDPLimit); + hwmgr->platform_descriptor.nearTDPLimit = le32_to_cpu(ptable5->ulNearTDPLimit); + + hwmgr->platform_descriptor.TDPODLimit = le16_to_cpu(ptable5->usTDPODLimit); + hwmgr->platform_descriptor.TDPAdjustment = 0; + + hwmgr->platform_descriptor.VidAdjustment = 0; + hwmgr->platform_descriptor.VidAdjustmentPolarity = 0; + hwmgr->platform_descriptor.VidMinLimit = 0; + hwmgr->platform_descriptor.VidMaxLimit = 1500000; + hwmgr->platform_descriptor.VidStep = 6250; + + hwmgr->platform_descriptor.nearTDPLimitAdjusted = le32_to_cpu(ptable5->ulNearTDPLimit); + + if (hwmgr->platform_descriptor.TDPODLimit != 0) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerControl); + + hwmgr->platform_descriptor.SQRampingThreshold = le32_to_cpu(ptable5->ulSQRampingThreshold); + + hwmgr->platform_descriptor.CACLeakage = le32_to_cpu(ptable5->ulCACLeakage); + + hwmgr->dyn_state.cac_leakage_table = NULL; + + if (0 != ptable5->usCACLeakageTableOffset) { + const ATOM_PPLIB_CAC_Leakage_Table *pCAC_leakage_table = + (ATOM_PPLIB_CAC_Leakage_Table *)(((unsigned long)ptable5) + + le16_to_cpu(ptable5->usCACLeakageTableOffset)); + result = get_cac_leakage_table(hwmgr, + &hwmgr->dyn_state.cac_leakage_table, pCAC_leakage_table); + } + + hwmgr->platform_descriptor.LoadLineSlope = le16_to_cpu(ptable5->usLoadLineSlope); + + hwmgr->dyn_state.ppm_parameter_table = NULL; + + if (0 != ptable3->usExtendendedHeaderOffset) { + extended_header = (const ATOM_PPLIB_EXTENDEDHEADER *) + (((unsigned long)powerplay_table) + + le16_to_cpu(ptable3->usExtendendedHeaderOffset)); + if ((extended_header->usPPMTableOffset > 0) && + le16_to_cpu(extended_header->usSize) >= + SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) { + table_offset = le16_to_cpu(extended_header->usPPMTableOffset); + atom_ppm_table = (ATOM_PPLIB_PPM_Table *) + (((unsigned long)powerplay_table) + table_offset); + if (0 == get_platform_power_management_table(hwmgr, atom_ppm_table)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnablePlatformPowerManagement); + } + } + } + return result; +} + +static int init_phase_shedding_table(struct pp_hwmgr *hwmgr, + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) +{ + if (le16_to_cpu(powerplay_table->usTableSize) >= + sizeof(ATOM_PPLIB_POWERPLAYTABLE4)) { + const ATOM_PPLIB_POWERPLAYTABLE4 *powerplay_table4 = + (const ATOM_PPLIB_POWERPLAYTABLE4 *)powerplay_table; + + if (0 != powerplay_table4->usVddcPhaseShedLimitsTableOffset) { + const ATOM_PPLIB_PhaseSheddingLimits_Table *ptable = + (ATOM_PPLIB_PhaseSheddingLimits_Table *) + (((unsigned long)powerplay_table4) + + le16_to_cpu(powerplay_table4->usVddcPhaseShedLimitsTableOffset)); + struct phm_phase_shedding_limits_table *table; + unsigned long size, i; + + + size = sizeof(unsigned long) + + (sizeof(struct phm_phase_shedding_limits_table) * + ptable->ucNumEntries); + + table = kzalloc(size, GFP_KERNEL); + + if (table == NULL) + return -ENOMEM; + + table->count = (unsigned long)ptable->ucNumEntries; + + for (i = 0; i < table->count; i++) { + table->entries[i].Voltage = (unsigned long)le16_to_cpu(ptable->entries[i].usVoltage); + table->entries[i].Sclk = ((unsigned long)ptable->entries[i].ucSclkHigh << 16) + | le16_to_cpu(ptable->entries[i].usSclkLow); + table->entries[i].Mclk = ((unsigned long)ptable->entries[i].ucMclkHigh << 16) + | le16_to_cpu(ptable->entries[i].usMclkLow); + } + hwmgr->dyn_state.vddc_phase_shed_limits_table = table; + } + } + + return 0; +} + +int get_number_of_vce_state_table_entries( + struct pp_hwmgr *hwmgr) +{ + const ATOM_PPLIB_POWERPLAYTABLE *table = + get_powerplay_table(hwmgr); + const ATOM_PPLIB_VCE_State_Table *vce_table = + get_vce_state_table(hwmgr, table); + + if (vce_table > 0) + return vce_table->numEntries; + + return 0; +} + +int get_vce_state_table_entry(struct pp_hwmgr *hwmgr, + unsigned long i, + struct PP_VCEState *vce_state, + void **clock_info, + unsigned long *flag) +{ + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table = get_powerplay_table(hwmgr); + + const ATOM_PPLIB_VCE_State_Table *vce_state_table = get_vce_state_table(hwmgr, powerplay_table); + + unsigned short vce_clock_info_array_offset = get_vce_clock_info_array_offset(hwmgr, powerplay_table); + + const VCEClockInfoArray *vce_clock_info_array = (const VCEClockInfoArray *)(((unsigned long) powerplay_table) + vce_clock_info_array_offset); + + const ClockInfoArray *clock_arrays = (ClockInfoArray *)(((unsigned long)powerplay_table) + powerplay_table->usClockInfoArrayOffset); + + const ATOM_PPLIB_VCE_State_Record *record = &vce_state_table->entries[i]; + + const VCEClockInfo *vce_clock_info = &vce_clock_info_array->entries[record->ucVCEClockInfoIndex]; + + unsigned long clockInfoIndex = record->ucClockInfoIndex & 0x3F; + + *flag = (record->ucClockInfoIndex >> NUM_BITS_CLOCK_INFO_ARRAY_INDEX); + + vce_state->evclk = ((uint32_t)vce_clock_info->ucEVClkHigh << 16) | vce_clock_info->usEVClkLow; + vce_state->ecclk = ((uint32_t)vce_clock_info->ucECClkHigh << 16) | vce_clock_info->usECClkLow; + + *clock_info = (void *)((unsigned long)(clock_arrays->clockInfo) + (clockInfoIndex * clock_arrays->ucEntrySize)); + + return 0; +} + + +static int pp_tables_initialize(struct pp_hwmgr *hwmgr) +{ + int result; + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table; + + hwmgr->need_pp_table_upload = true; + + powerplay_table = get_powerplay_table(hwmgr); + + result = init_powerplay_tables(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_powerplay_tables failed", return result); + + result = set_platform_caps(hwmgr, + le32_to_cpu(powerplay_table->ulPlatformCaps)); + + PP_ASSERT_WITH_CODE((result == 0), + "set_platform_caps failed", return result); + + result = init_thermal_controller(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_thermal_controller failed", return result); + + result = init_overdrive_limits(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_overdrive_limits failed", return result); + + result = init_clock_voltage_dependency(hwmgr, + powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_clock_voltage_dependency failed", return result); + + result = init_dpm2_parameters(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_dpm2_parameters failed", return result); + + result = init_phase_shedding_table(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_phase_shedding_table failed", return result); + + return result; +} + +static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->soft_pp_table) { + kfree(hwmgr->soft_pp_table); + hwmgr->soft_pp_table = NULL; + } + + if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) { + kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); + hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; + } + + if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { + kfree(hwmgr->dyn_state.vddci_dependency_on_mclk); + hwmgr->dyn_state.vddci_dependency_on_mclk = NULL; + } + + if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) { + kfree(hwmgr->dyn_state.vddc_dependency_on_mclk); + hwmgr->dyn_state.vddc_dependency_on_mclk = NULL; + } + + if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { + kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk); + hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL; + } + + if (NULL != hwmgr->dyn_state.valid_mclk_values) { + kfree(hwmgr->dyn_state.valid_mclk_values); + hwmgr->dyn_state.valid_mclk_values = NULL; + } + + if (NULL != hwmgr->dyn_state.valid_sclk_values) { + kfree(hwmgr->dyn_state.valid_sclk_values); + hwmgr->dyn_state.valid_sclk_values = NULL; + } + + if (NULL != hwmgr->dyn_state.cac_leakage_table) { + kfree(hwmgr->dyn_state.cac_leakage_table); + hwmgr->dyn_state.cac_leakage_table = NULL; + } + + if (NULL != hwmgr->dyn_state.vddc_phase_shed_limits_table) { + kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table); + hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL; + } + + if (NULL != hwmgr->dyn_state.vce_clock_voltage_dependency_table) { + kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table); + hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL; + } + + if (NULL != hwmgr->dyn_state.uvd_clock_voltage_dependency_table) { + kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table); + hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL; + } + + if (NULL != hwmgr->dyn_state.samu_clock_voltage_dependency_table) { + kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table); + hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL; + } + + if (NULL != hwmgr->dyn_state.acp_clock_voltage_dependency_table) { + kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table); + hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL; + } + + if (NULL != hwmgr->dyn_state.cac_dtp_table) { + kfree(hwmgr->dyn_state.cac_dtp_table); + hwmgr->dyn_state.cac_dtp_table = NULL; + } + + if (NULL != hwmgr->dyn_state.ppm_parameter_table) { + kfree(hwmgr->dyn_state.ppm_parameter_table); + hwmgr->dyn_state.ppm_parameter_table = NULL; + } + + if (NULL != hwmgr->dyn_state.vdd_gfx_dependency_on_sclk) { + kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk); + hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL; + } + + if (NULL != hwmgr->dyn_state.vq_budgeting_table) { + kfree(hwmgr->dyn_state.vq_budgeting_table); + hwmgr->dyn_state.vq_budgeting_table = NULL; + } + + return 0; +} + +const struct pp_table_func pptable_funcs = { + .pptable_init = pp_tables_initialize, + .pptable_fini = pp_tables_uninitialize, + .pptable_get_number_of_vce_state_table_entries = + get_number_of_vce_state_table_entries, + .pptable_get_vce_state_table_entry = + get_vce_state_table_entry, +}; + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h new file mode 100644 index 000000000000..30434802417e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h @@ -0,0 +1,47 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * Interface Functions related to the BIOS PowerPlay Tables. + * + */ + +#ifndef PROCESSPPTABLES_H +#define PROCESSPPTABLES_H + +struct pp_hwmgr; +struct pp_power_state; +struct pp_hw_power_state; + +extern const struct pp_table_func pptable_funcs; + +typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps, + unsigned int index, + const void *clock_info); + +int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, + unsigned long *num_of_entries); + +int pp_tables_get_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, + struct pp_power_state *ps, + pp_tables_hw_clock_info_callback func); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c new file mode 100644 index 000000000000..e58d038a997b --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c @@ -0,0 +1,350 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "hwmgr.h" +#include "tonga_clockpowergating.h" +#include "tonga_ppsmc.h" +#include "tonga_hwmgr.h" + +int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) +{ + if (phm_cf_want_uvd_power_gating(hwmgr)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_UVDPowerOFF); + return 0; +} + +int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr) +{ + if (phm_cf_want_uvd_power_gating(hwmgr)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDynamicPowerGating)) { + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDPowerON, 1); + } else { + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDPowerON, 0); + } + } + + return 0; +} + +int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr) +{ + if (phm_cf_want_vce_power_gating(hwmgr)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_VCEPowerOFF); + return 0; +} + +int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr) +{ + if (phm_cf_want_vce_power_gating(hwmgr)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_VCEPowerON); + return 0; +} + +int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating) +{ + int ret = 0; + + switch (block) { + case PHM_AsicBlock_UVD_MVC: + case PHM_AsicBlock_UVD: + case PHM_AsicBlock_UVD_HD: + case PHM_AsicBlock_UVD_SD: + if (gating == PHM_ClockGateSetting_StaticOff) + ret = tonga_phm_powerdown_uvd(hwmgr); + else + ret = tonga_phm_powerup_uvd(hwmgr); + break; + case PHM_AsicBlock_GFX: + default: + break; + } + + return ret; +} + +int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = false; + data->vce_power_gated = false; + + tonga_phm_powerup_uvd(hwmgr); + tonga_phm_powerup_vce(hwmgr); + + return 0; +} + +int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if (data->uvd_power_gated == bgate) + return 0; + + data->uvd_power_gated = bgate; + + if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + tonga_update_uvd_dpm(hwmgr, true); + tonga_phm_powerdown_uvd(hwmgr); + } else { + tonga_phm_powerup_uvd(hwmgr); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + + tonga_update_uvd_dpm(hwmgr, false); + } + + return 0; +} + +int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_set_power_state_input states; + const struct pp_power_state *pcurrent; + struct pp_power_state *requested; + + pcurrent = hwmgr->current_ps; + requested = hwmgr->request_ps; + + states.pcurrent_state = &(pcurrent->hardware); + states.pnew_state = &(requested->hardware); + + if (phm_cf_want_vce_power_gating(hwmgr)) { + if (data->vce_power_gated != bgate) { + if (bgate) { + cgs_set_clockgating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + cgs_set_powergating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + tonga_enable_disable_vce_dpm(hwmgr, false); + data->vce_power_gated = true; + } else { + tonga_phm_powerup_vce(hwmgr); + data->vce_power_gated = false; + cgs_set_powergating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + cgs_set_clockgating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + + tonga_update_vce_dpm(hwmgr, &states); + tonga_enable_disable_vce_dpm(hwmgr, true); + return 0; + } + } + } else { + tonga_update_vce_dpm(hwmgr, &states); + tonga_enable_disable_vce_dpm(hwmgr, true); + return 0; + } + + if (!data->vce_power_gated) + tonga_update_vce_dpm(hwmgr, &states); + + return 0; +} + +int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, + const uint32_t *msg_id) +{ + PPSMC_Msg msg; + uint32_t value; + + switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) { + case PP_GROUP_GFX: + switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { + case PP_BLOCK_GFX_CG: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = CG_GFX_CGCG_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = CG_GFX_CGLS_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + break; + + case PP_BLOCK_GFX_MG: + /* For GFX MGCG, there are three different ones; + * CPF, RLC, and all others. CPF MGCG will not be used for Tonga. + * For GFX MGLS, Tonga will not support it. + * */ + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK); + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + break; + + default: + return -1; + } + break; + + case PP_GROUP_SYS: + switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { + case PP_BLOCK_SYS_BIF: + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_BIF_MGLS_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + break; + + case PP_BLOCK_SYS_MC: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_MC_MGCG_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_MC_MGLS_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + + } + break; + + case PP_BLOCK_SYS_HDP: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_HDP_MGCG_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + + value = CG_SYS_HDP_MGLS_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + break; + + case PP_BLOCK_SYS_SDMA: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_SDMA_MGCG_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + + if (PP_STATE_SUPPORT_LS & *msg_id) { + msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + + value = CG_SYS_SDMA_MGLS_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + break; + + case PP_BLOCK_SYS_ROM: + if (PP_STATE_SUPPORT_CG & *msg_id) { + msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) + ? PPSMC_MSG_EnableClockGatingFeature + : PPSMC_MSG_DisableClockGatingFeature; + value = CG_SYS_ROM_MASK; + + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) + return -1; + } + break; + + default: + return -1; + + } + break; + + default: + return -1; + + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h new file mode 100644 index 000000000000..8bc38cb17b7f --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h @@ -0,0 +1,36 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _TONGA_CLOCK_POWER_GATING_H_ +#define _TONGA_CLOCK_POWER_GATING_H_ + +#include "tonga_hwmgr.h" +#include "pp_asicblocks.h" + +extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); +extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); +extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); +extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); +#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h new file mode 100644 index 000000000000..080d69d77f04 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h @@ -0,0 +1,107 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef TONGA_DYN_DEFAULTS_H +#define TONGA_DYN_DEFAULTS_H + + +/** \file + * Volcanic Islands Dynamic default parameters. + */ + +enum TONGAdpm_TrendDetection { + TONGAdpm_TrendDetection_AUTO, + TONGAdpm_TrendDetection_UP, + TONGAdpm_TrendDetection_DOWN +}; +typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection; + +/* Bit vector representing same fields as hardware register. */ +#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy */ +/* HDP_busy */ +/* IH_busy */ +/* DRM_busy */ +/* DRMDMA_busy */ +/* UVD_busy */ +/* VCE_busy */ +/* ACP_busy */ +/* SAMU_busy */ +/* AVP_busy */ +/* SDMA enabled */ +#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. */ +/* SH_Gfx_busy */ +/* RB_Gfx_busy */ +/* VCE_busy */ + +#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. */ +/* FE_Gfx_busy */ +/* RB_Gfx_busy */ +/* ACP_busy */ + +#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. */ +/* FE_Gfx_busy */ +/* SH_Gfx_busy */ +/* UVD_busy */ + +#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy */ +/* VCE_busy */ +/* ACP_busy */ +/* SAMU_busy */ + +#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP, DRMDMA */ +#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP, DRMDMA */ +#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP, DRMDMA */ + + +/* thermal protection counter (units).*/ +#define PPTONGA_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */ + +/* static screen threshold unit */ +#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT 0 + +/* static screen threshold */ +#define PPTONGA_STATICSCREENTHRESHOLD_DFLT 0x00C8 + +/* gfx idle clock stop threshold */ +#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */ + +/* Fixed reference divider to use when building baby stepping tables. */ +#define PPTONGA_REFERENCEDIVIDER_DFLT 4 + +/* + * ULV voltage change delay time + * Used to be delay_vreg in N.I. split for S.I. + * Using N.I. delay_vreg value as default + * ReferenceClock = 2700 + * VoltageResponseTime = 1000 + * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687 + */ + +#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT 1687 + +#define PPTONGA_CGULVPARAMETER_DFLT 0x00040035 +#define PPTONGA_CGULVCONTROL_DFLT 0x00007450 +#define PPTONGA_TARGETACTIVITY_DFLT 30 /*30% */ +#define PPTONGA_MCLK_TARGETACTIVITY_DFLT 10 /*10% */ + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c new file mode 100644 index 000000000000..44a925006479 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -0,0 +1,6075 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include "linux/delay.h" +#include "pp_acpi.h" +#include "hwmgr.h" +#include +#include "tonga_hwmgr.h" +#include "pptable.h" +#include "processpptables.h" +#include "tonga_processpptables.h" +#include "tonga_pptable.h" +#include "pp_debug.h" +#include "tonga_ppsmc.h" +#include "cgs_common.h" +#include "pppcielanes.h" +#include "tonga_dyn_defaults.h" +#include "smumgr.h" +#include "tonga_smumgr.h" +#include "tonga_clockpowergating.h" +#include "tonga_thermal.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "cgs_linux.h" +#include "eventmgr.h" +#include "amd_pcie_helpers.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define MC_CG_SEQ_DRAMCONF_S0 0x05 +#define MC_CG_SEQ_DRAMCONF_S1 0x06 +#define MC_CG_SEQ_YCLK_SUSPEND 0x04 +#define MC_CG_SEQ_YCLK_RESUME 0x0a + +#define PCIE_BUS_CLK 10000 +#define TCLK (PCIE_BUS_CLK / 10) + +#define SMC_RAM_END 0x40000 +#define SMC_CG_IND_START 0xc0030000 +#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/ + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +#define VDDC_VDDCI_DELTA 200 +#define VDDC_VDDGFX_DELTA 300 + +#define MC_SEQ_MISC0_GDDR5_SHIFT 28 +#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0_GDDR5_VALUE 5 + +typedef uint32_t PECI_RegistryValue; + +/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */ +uint16_t PP_ClockStretcherLookupTable[2][4] = { + {600, 1050, 3, 0}, + {600, 1050, 6, 1} }; + +/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */ +uint32_t PP_ClockStretcherDDTTable[2][4][4] = { + { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, + { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; + +/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */ +uint8_t PP_ClockStretchAmountConversion[2][6] = { + {0, 1, 3, 2, 4, 5}, + {0, 2, 4, 5, 6, 5} }; + +/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ +enum DPM_EVENT_SRC { + DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ + DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ + DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ + DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ + DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ +}; +typedef enum DPM_EVENT_SRC DPM_EVENT_SRC; + +const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic); + +struct tonga_power_state *cast_phw_tonga_power_state( + struct pp_hw_power_state *hw_ps) +{ + if (hw_ps == NULL) + return NULL; + + PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (struct tonga_power_state *)hw_ps; +} + +const struct tonga_power_state *cast_const_phw_tonga_power_state( + const struct pp_hw_power_state *hw_ps) +{ + if (hw_ps == NULL) + return NULL; + + PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (const struct tonga_power_state *)hw_ps; +} + +int tonga_add_voltage(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *look_up_table, + phm_ppt_v1_voltage_lookup_record *record) +{ + uint32_t i; + PP_ASSERT_WITH_CODE((NULL != look_up_table), + "Lookup Table empty.", return -1;); + PP_ASSERT_WITH_CODE((0 != look_up_table->count), + "Lookup Table empty.", return -1;); + PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count), + "Lookup Table is full.", return -1;); + + /* This is to avoid entering duplicate calculated records. */ + for (i = 0; i < look_up_table->count; i++) { + if (look_up_table->entries[i].us_vdd == record->us_vdd) { + if (look_up_table->entries[i].us_calculated == 1) + return 0; + else + break; + } + } + + look_up_table->entries[i].us_calculated = 1; + look_up_table->entries[i].us_vdd = record->us_vdd; + look_up_table->entries[i].us_cac_low = record->us_cac_low; + look_up_table->entries[i].us_cac_mid = record->us_cac_mid; + look_up_table->entries[i].us_cac_high = record->us_cac_high; + /* Only increment the count when we're appending, not replacing duplicate entry. */ + if (i == look_up_table->count) + look_up_table->count++; + + return 0; +} + +int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) +{ + PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; + + return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; +} + +uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table, + uint32_t voltage) +{ + uint8_t count = (uint8_t) (voltage_table->count); + uint8_t i = 0; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Table empty.", return 0;); + PP_ASSERT_WITH_CODE((0 != count), + "Voltage Table empty.", return 0;); + + for (i = 0; i < count; i++) { + /* find first voltage bigger than requested */ + if (voltage_table->entries[i].value >= voltage) + return i; + } + + /* voltage is bigger than max voltage in the table */ + return i - 1; +} + +/** + * @brief PhwTonga_GetVoltageOrder + * Returns index of requested voltage record in lookup(table) + * @param hwmgr - pointer to hardware manager + * @param lookupTable - lookup list to search in + * @param voltage - voltage to look for + * @return 0 on success + */ +uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table, + uint16_t voltage) +{ + uint8_t count = (uint8_t) (look_up_table->count); + uint8_t i; + + PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;); + PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;); + + for (i = 0; i < count; i++) { + /* find first voltage equal or bigger than requested */ + if (look_up_table->entries[i].us_vdd >= voltage) + return i; + } + + /* voltage is bigger than max voltage in the table */ + return i-1; +} + +bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + /* + * We return the status of Voltage Control instead of checking SCLK/MCLK DPM + * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, + * whereas voltage control is a fundemental change that will not be disabled + */ + + return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0); +} + +/** + * Re-generate the DPM level mask value + * @param hwmgr the address of the hardware manager + */ +static uint32_t tonga_get_dpm_level_enable_mask_value( + struct tonga_single_dpm_table * dpm_table) +{ + uint32_t i; + uint32_t mask_value = 0; + + for (i = dpm_table->count; i > 0; i--) { + mask_value = mask_value << 1; + + if (dpm_table->dpm_levels[i-1].enabled) + mask_value |= 0x1; + else + mask_value &= 0xFFFFFFFE; + } + return mask_value; +} + +/** + * Retrieve DPM default values from registry (if available) + * + * @param hwmgr the address of the powerplay hardware manager. + */ +void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + phw_tonga_ulv_parm *ulv = &(data->ulv); + uint32_t tmp; + + ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT; + data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0; + data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2; + data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7; + + data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT; + data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ABM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_NonABMSupportInPPLib); + + tmp = 0; + if (tmp == 0) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicACTiming); + + tmp = 0; + if (0 != tmp) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMemoryTransition); + + data->mclk_strobe_mode_threshold = 40000; + data->mclk_stutter_mode_threshold = 30000; + data->mclk_edc_enable_threshold = 40000; + data->mclk_edc_wr_enable_threshold = 40000; + + tmp = 0; + if (tmp != 0) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMCLS); + + data->pcie_gen_performance.max = PP_PCIEGen1; + data->pcie_gen_performance.min = PP_PCIEGen3; + data->pcie_gen_power_saving.max = PP_PCIEGen1; + data->pcie_gen_power_saving.min = PP_PCIEGen3; + + data->pcie_lane_performance.max = 0; + data->pcie_lane_performance.min = 16; + data->pcie_lane_power_saving.max = 0; + data->pcie_lane_power_saving.min = 16; + + tmp = 0; + + if (tmp) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicUVDState); + +} + +int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = tonga_copy_bytes_to_smc( + hwmgr->smumgr, + data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + data->sram_end + ); + } + + return result; +} + +/** + * Find SCLK value that is associated with specified virtual_voltage_Id. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param virtual_voltage_Id voltageId to look for. + * @param sclk output value . + * @return always 0 if success and 2 if association not found + */ +static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + uint16_t virtual_voltage_id, uint32_t *sclk) +{ + uint8_t entryId; + uint8_t voltageId; + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1); + + /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ + for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) { + voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd; + if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) + break; + } + + PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count, + "Can't find requested voltage id in vdd_dep_on_sclk table!", + return -1; + ); + + *sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk; + + return 0; +} + +/** + * Get Leakage VDDC based on leakage ID. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return 2 if vddgfx returned is greater than 2V or if BIOS + */ +int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; + uint16_t virtual_voltage_id; + uint16_t vddc = 0; + uint16_t vddgfx = 0; + uint16_t i, j; + uint32_t sclk = 0; + + /* retrieve voltage for leakage ID (0xff01 + i) */ + for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + + /* in split mode we should have only vddgfx EVV leakages */ + if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { + if (0 == tonga_get_sclk_for_voltage_evv(hwmgr, + pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + for (j = 1; j < sclk_table->count; j++) { + if (sclk_table->entries[j].clk == sclk && + sclk_table->entries[j].cks_enable == 0) { + sclk += 5000; + break; + } + } + } + PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk + (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, + virtual_voltage_id, &vddgfx), + "Error retrieving EVV voltage value!", continue); + + /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ + PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1); + + /* the voltage should not be zero nor equal to leakage ID */ + if (vddgfx != 0 && vddgfx != virtual_voltage_id) { + data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; + data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id; + data->vddcgfx_leakage.count++; + } + } + } else { + /* in merged mode we have only vddc EVV leakages */ + if (0 == tonga_get_sclk_for_voltage_evv(hwmgr, + pptable_info->vddc_lookup_table, + virtual_voltage_id, &sclk)) { + PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk + (hwmgr, VOLTAGE_TYPE_VDDC, sclk, + virtual_voltage_id, &vddc), + "Error retrieving EVV voltage value!", continue); + + /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ + if (vddc > 2000) + printk(KERN_ERR "[ powerplay ] Invalid VDDC value! \n"); + + /* the voltage should not be zero nor equal to leakage ID */ + if (vddc != 0 && vddc != virtual_voltage_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; + data->vddc_leakage.count++; + } + } + } + } + + return 0; +} + +int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + /* enable SCLK dpm */ + if (0 == data->sclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Enable)), + "Failed to enable SCLK DPM during DPM Start Function!", + return -1); + } + + /* enable MCLK dpm */ + if (0 == data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Enable)), + "Failed to enable MCLK DPM during DPM Start Function!", + return -1); + + PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, 0x100005);/*Read */ + + udelay(10); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, 0x500005);/* write */ + + } + + return 0; +} + +int tonga_start_dpm(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + /* enable general power management */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1); + /* enable sclk deep sleep */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1); + + /* prepare for PCIE DPM */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + + offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Enable)), + "Failed to enable voltage DPM during DPM Start Function!", + return -1); + + if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) { + PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1); + } + + /* enable PCIE dpm */ + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Enable)), + "Failed to enable pcie DPM during DPM Start Function!", + return -1 + ); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition)) { + smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_EnableACDCGPIOInterrupt); + } + + return 0; +} + +int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + /* disable SCLK dpm */ + if (0 == data->sclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + PP_ASSERT_WITH_CODE( + (0 == tonga_is_dpm_running(hwmgr)), + "Trying to Disable SCLK DPM when DPM is disabled", + return -1 + ); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Disable)), + "Failed to disable SCLK DPM during DPM stop Function!", + return -1); + } + + /* disable MCLK dpm */ + if (0 == data->mclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ + PP_ASSERT_WITH_CODE( + (0 == tonga_is_dpm_running(hwmgr)), + "Trying to Disable MCLK DPM when DPM is disabled", + return -1 + ); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Disable)), + "Failed to Disable MCLK DPM during DPM stop Function!", + return -1); + } + + return 0; +} + +int tonga_stop_dpm(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0); + /* disable sclk deep sleep*/ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0); + + /* disable PCIE dpm */ + if (0 == data->pcie_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + PP_ASSERT_WITH_CODE( + (0 == tonga_is_dpm_running(hwmgr)), + "Trying to Disable PCIE DPM when DPM is disabled", + return -1 + ); + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Disable)), + "Failed to disable pcie DPM during DPM stop Function!", + return -1); + } + + if (0 != tonga_disable_sclk_mclk_dpm(hwmgr)) + PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + PP_ASSERT_WITH_CODE( + (0 == tonga_is_dpm_running(hwmgr)), + "Trying to Disable Voltage CNTL when DPM is disabled", + return -1 + ); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Disable)), + "Failed to disable voltage DPM during DPM stop Function!", + return -1); + + return 0; +} + +int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0); + + return 0; +} + +/** + * Send a message to the SMC and return a parameter + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: pointer to the received parameter + * @return The response that came from the SMC. + */ +PPSMC_Result tonga_send_msg_to_smc_return_parameter( + struct pp_hwmgr *hwmgr, + PPSMC_Msg msg, + uint32_t *parameter) +{ + int result; + + result = smum_send_msg_to_smc(hwmgr->smumgr, msg); + + if ((0 == result) && parameter) { + *parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + } + + return result; +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + uint32_t level_mask = 1 << n; + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ + PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), + "Trying to force SCLK when DPM is disabled", return -1;); + if (0 == data->sclk_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask), + level_mask) ? 0 : 1); + + return 0; +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + uint32_t level_mask = 1 << n; + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ + PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), + "Trying to Force MCLK when DPM is disabled", return -1;); + if (0 == data->mclk_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask), + level_mask) ? 0 : 1); + + return 0; +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), + "Trying to Force PCIE level when DPM is disabled", return -1;); + if (0 == data->pcie_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel), + n) ? 0 : 1); + + return 0; +} + +/** + * Set the initial state by calling SMC to switch to this state directly + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_set_boot_state(struct pp_hwmgr *hwmgr) +{ + /* + * SMC only stores one state that SW will ask to switch too, + * so we switch the the just uploaded one + */ + return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1; +} + +/** + * Get the location of various tables inside the FW image. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + uint32_t tmp; + int result; + bool error = 0; + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, DpmTable), + &tmp, data->sram_end); + + if (0 == result) { + data->dpm_table_start = tmp; + } + + error |= (0 != result); + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, SoftRegisters), + &tmp, data->sram_end); + + if (0 == result) { + data->soft_regs_start = tmp; + tonga_smu->ulSoftRegsStart = tmp; + } + + error |= (0 != result); + + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, mcRegisterTable), + &tmp, data->sram_end); + + if (0 == result) { + data->mc_reg_table_start = tmp; + } + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, FanTable), + &tmp, data->sram_end); + + if (0 == result) { + data->fan_table_start = tmp; + } + + error |= (0 != result); + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, mcArbDramTimingTable), + &tmp, data->sram_end); + + if (0 == result) { + data->arb_table_start = tmp; + } + + error |= (0 != result); + + + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, Version), + &tmp, data->sram_end); + + if (0 == result) { + hwmgr->microcode_version_info.SMC = tmp; + } + + error |= (0 != result); + + return error ? 1 : 0; +} + +/** + * Read clock related registers. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_read_clock_registers(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + data->clock_registers.vCG_SPLL_FUNC_CNTL = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); + data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); + data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); + data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); + data->clock_registers.vDLL_CNTL = + cgs_read_register(hwmgr->device, mmDLL_CNTL); + data->clock_registers.vMCLK_PWRMGT_CNTL = + cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); + data->clock_registers.vMPLL_AD_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); + data->clock_registers.vMPLL_DQ_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL_1 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); + data->clock_registers.vMPLL_FUNC_CNTL_2 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); + data->clock_registers.vMPLL_SS1 = + cgs_read_register(hwmgr->device, mmMPLL_SS1); + data->clock_registers.vMPLL_SS2 = + cgs_read_register(hwmgr->device, mmMPLL_SS2); + + return 0; +} + +/** + * Find out if memory is GDDR5. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_get_memory_type(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + uint32_t temp; + + temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); + + data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE == + ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> + MC_SEQ_MISC0_GDDR5_SHIFT)); + + return 0; +} + +/** + * Enables Dynamic Power Management by SMC + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1); + + return 0; +} + +/** + * Initialize PowerGating States for different engines + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = 0; + data->vce_power_gated = 0; + data->samu_power_gated = 0; + data->acp_power_gated = 0; + data->pg_acp_init = 1; + + return 0; +} + +/** + * Checks if DPM is enabled + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr) +{ + /* + * We return the status of Voltage Control instead of checking SCLK/MCLK DPM + * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, + * whereas voltage control is a fundemental change that will not be disabled + */ + return (0 == tonga_is_dpm_running(hwmgr) ? 0 : 1); +} + +/** + * Checks if DPM is stopped + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + if (0 != tonga_is_dpm_running(hwmgr)) { + /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */ + if (!data->dpm_table_start) { + return 1; + } + } + + return 0; +} + +/** + * Remove repeated voltage values and create table with unique values. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param voltage_table the pointer to changing voltage table + * @return 1 in success + */ + +static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table *voltage_table) +{ + uint32_t table_size, i, j; + uint16_t vvalue; + bool bVoltageFound = 0; + pp_atomctrl_voltage_table *table; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;); + table_size = sizeof(pp_atomctrl_voltage_table); + table = kzalloc(table_size, GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + memset(table, 0x00, table_size); + table->mask_low = voltage_table->mask_low; + table->phase_delay = voltage_table->phase_delay; + + for (i = 0; i < voltage_table->count; i++) { + vvalue = voltage_table->entries[i].value; + bVoltageFound = 0; + + for (j = 0; j < table->count; j++) { + if (vvalue == table->entries[j].value) { + bVoltageFound = 1; + break; + } + } + + if (!bVoltageFound) { + table->entries[table->count].value = vvalue; + table->entries[table->count].smio_low = + voltage_table->entries[i].smio_low; + table->count++; + } + } + + memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table)); + + kfree(table); + + return 0; +} + +static int tonga_get_svi2_vdd_ci_voltage_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table) +{ + uint32_t i; + int result; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table); + + PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count), + "Voltage Dependency Table empty.", return -1;); + + vddci_voltage_table->mask_low = 0; + vddci_voltage_table->phase_delay = 0; + vddci_voltage_table->count = voltage_dependency_table->count; + + for (i = 0; i < voltage_dependency_table->count; i++) { + vddci_voltage_table->entries[i].value = + voltage_dependency_table->entries[i].vddci; + vddci_voltage_table->entries[i].smio_low = 0; + } + + result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to trim VDDCI table.", return result;); + + return 0; +} + + + +static int tonga_get_svi2_vdd_voltage_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *look_up_table, + pp_atomctrl_voltage_table *voltage_table) +{ + uint8_t i = 0; + + PP_ASSERT_WITH_CODE((0 != look_up_table->count), + "Voltage Lookup Table empty.", return -1;); + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + + voltage_table->count = look_up_table->count; + + for (i = 0; i < voltage_table->count; i++) { + voltage_table->entries[i].value = look_up_table->entries[i].us_vdd; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + +/* + * -------------------------------------------------------- Voltage Tables -------------------------------------------------------------------------- + * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries. + */ + +static void tonga_trim_voltage_table_to_fit_state_table( + struct pp_hwmgr *hwmgr, + uint32_t max_voltage_steps, + pp_atomctrl_voltage_table *voltage_table) +{ + unsigned int i, diff; + + if (voltage_table->count <= max_voltage_steps) { + return; + } + + diff = voltage_table->count - max_voltage_steps; + + for (i = 0; i < max_voltage_steps; i++) { + voltage_table->entries[i] = voltage_table->entries[i + diff]; + } + + voltage_table->count = max_voltage_steps; + + return; +} + +/** + * Create Voltage Tables. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result; + + /* MVDD has only GPIO voltage control */ + if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve MVDD table.", return result;); + } + + if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { + /* GPIO voltage */ + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDCI table.", return result;); + } else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { + /* SVI2 voltage */ + result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr, + pptable_info->vdd_dep_on_mclk); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;); + } + + if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { + /* VDDGFX has only SVI2 voltage control */ + result = tonga_get_svi2_vdd_voltage_table(hwmgr, + pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); + } + + if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + /* VDDC has only SVI2 voltage control */ + result = tonga_get_svi2_vdd_voltage_table(hwmgr, + pptable_info->vddc_lookup_table, &(data->vddc_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDC table from lookup table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)), + "Too many voltage values for VDDC. Trimming to fit state table.", + tonga_trim_voltage_table_to_fit_state_table(hwmgr, + SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)); + ); + + PP_ASSERT_WITH_CODE( + (data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)), + "Too many voltage values for VDDGFX. Trimming to fit state table.", + tonga_trim_voltage_table_to_fit_state_table(hwmgr, + SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table)); + ); + + PP_ASSERT_WITH_CODE( + (data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)), + "Too many voltage values for VDDCI. Trimming to fit state table.", + tonga_trim_voltage_table_to_fit_state_table(hwmgr, + SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)); + ); + + PP_ASSERT_WITH_CODE( + (data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)), + "Too many voltage values for MVDD. Trimming to fit state table.", + tonga_trim_voltage_table_to_fit_state_table(hwmgr, + SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)); + ); + + return 0; +} + +/** + * Vddc table preparation for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + unsigned int count; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + table->VddcTable[count] = + PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE); + } + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + } + return 0; +} + +/** + * VddGfx table preparation for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + unsigned int count; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { + table->VddGfxLevelCount = data->vddgfx_voltage_table.count; + for (count = 0; count < data->vddgfx_voltage_table.count; count++) { + table->VddGfxTable[count] = + PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE); + } + CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount); + } + return 0; +} + +/** + * Vddci table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + uint32_t count; + + table->VddciLevelCount = data->vddci_voltage_table.count; + for (count = 0; count < table->VddciLevelCount; count++) { + if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { + table->VddciTable[count] = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); + } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { + table->SmioTable1.Pattern[count].Voltage = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */ + table->SmioTable1.Pattern[count].Smio = + (uint8_t) count; + table->Smio[count] |= + data->vddci_voltage_table.entries[count].smio_low; + table->VddciTable[count] = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); + } + } + + table->SmioMask1 = data->vddci_voltage_table.mask_low; + CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); + + return 0; +} + +/** + * Mvdd table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + uint32_t count; + + if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + table->MvddLevelCount = data->mvdd_voltage_table.count; + for (count = 0; count < table->MvddLevelCount; count++) { + table->SmioTable2.Pattern[count].Voltage = + PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ + table->SmioTable2.Pattern[count].Smio = + (uint8_t) count; + table->Smio[count] |= + data->mvdd_voltage_table.entries[count].smio_low; + } + table->SmioMask2 = data->vddci_voltage_table.mask_low; + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + } + + return 0; +} + +/** + * Convert a voltage value in mv unit to VID number required by SMU firmware + */ +static uint8_t convert_to_vid(uint16_t vddc) +{ + return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); +} + + +/** + * Preparation of vddc and vddgfx CAC tables for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index; + int result = 0; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table; + + /* pTables is already swapped, so in order to use the value from it, we need to swap it back. */ + uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount); + uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount); + + for (count = 0; count < vddcLevelCount; count++) { + /* We are populating vddc CAC data to BapmVddc table in split and merged mode */ + index = tonga_get_voltage_index(vddc_lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); + table->BapmVddcVidHiSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); + table->BapmVddcVidHiSidd2[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); + } + + if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) { + /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */ + for (count = 0; count < vddgfxLevelCount; count++) { + index = tonga_get_voltage_index(vddgfx_lookup_table, + data->vddgfx_voltage_table.entries[count].value); + table->BapmVddGfxVidLoSidd[count] = + convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low); + table->BapmVddGfxVidHiSidd[count] = + convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid); + table->BapmVddGfxVidHiSidd2[count] = + convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high); + } + } else { + for (count = 0; count < vddcLevelCount; count++) { + index = tonga_get_voltage_index(vddc_lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddGfxVidLoSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); + table->BapmVddGfxVidHiSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); + table->BapmVddGfxVidHiSidd2[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); + } + } + + return result; +} + + +/** + * Preparation of voltage tables for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ + +int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result; + + result = tonga_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", return -1); + + result = tonga_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", return -1); + + result = tonga_populate_smc_vdd_gfx_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDGFX voltage table to SMC", return -1); + + result = tonga_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", return -1); + + result = tonga_populate_cac_tables(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate CAC voltage tables to SMC", return -1); + + return 0; +} + +/** + * Populates the SMC VRConfig field in DPM table. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + uint16_t config; + + if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { + /* Splitted mode */ + config = VR_SVI2_PLANE_1; + table->VRConfig |= (config<voltage_control) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= config; + } else { + printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n"); + } + } else { + /* Merged mode */ + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config<voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n"); + } + } + + /* Set Vddci Voltage Controller */ + if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config<vdd_ci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config<mvdd_control) { + config = VR_SMIO_PATTERN_2; + table->VRConfig |= (config<backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + /* clock - voltage dependency table is empty table */ + if (allowed_clock_voltage_table->count == 0) + return -1; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + /* find first sclk bigger than request */ + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, + allowed_clock_voltage_table->entries[i].vddgfx); + + voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table, + allowed_clock_voltage_table->entries[i].vddc); + + if (allowed_clock_voltage_table->entries[i].vddci) { + voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table, + allowed_clock_voltage_table->entries[i].vddci); + } else { + voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table, + allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta); + } + + if (allowed_clock_voltage_table->entries[i].mvdd) { + *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd; + } + + voltage->Phases = 1; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, + allowed_clock_voltage_table->entries[i-1].vddgfx); + voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table, + allowed_clock_voltage_table->entries[i-1].vddc); + + if (allowed_clock_voltage_table->entries[i-1].vddci) { + voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table, + allowed_clock_voltage_table->entries[i-1].vddci); + } + if (allowed_clock_voltage_table->entries[i-1].mvdd) { + *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd; + } + + return 0; +} + +/** + * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_reset_to_default(struct pp_hwmgr *hwmgr) +{ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1; +} + +int tonga_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +/** + * Setup parameters for the MC ARB. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + int result = 0; + SMU72_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = tonga_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (0 != result) { + break; + } + } + } + + if (0 == result) { + result = tonga_copy_bytes_to_smc( + hwmgr->smumgr, + data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU72_Discrete_MCArbDramTimingTable), + data->sram_end + ); + } + + return result; +} + +static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct tonga_dpm_table *dpm_table = &data->dpm_table; + uint32_t i; + + /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = + 1; + table->LinkLevel[i].SPC = + (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = + PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = + PP_HOST_TO_SMC_UL(30); + } + + data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; + + table->UvdLevelCount = (uint8_t) (mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage.Vddc = + tonga_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->UvdLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? + tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->UvdLevel[count].MinVoltage.Vddci = + tonga_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - data->vddc_vddci_delta); + table->UvdLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + //CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage); + } + + return result; + +} + +static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; + + table->VceLevelCount = (uint8_t) (mm_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = + mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage.Vddc = + tonga_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->VceLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? + tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->VceLevel[count].MinVoltage.Vddci = + tonga_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - data->vddc_vddci_delta); + table->VceLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + } + + return result; +} + +static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; + + table->AcpLevelCount = (uint8_t) (mm_table->count); + table->AcpBootLevel = 0; + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = + pptable_info->mm_dep_table->entries[count].aclk; + table->AcpLevel[count].MinVoltage.Vddc = + tonga_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->AcpLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? + tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->AcpLevel[count].MinVoltage.Vddci = + tonga_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - data->vddc_vddci_delta); + table->AcpLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->AcpLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for engine clock", return result); + + table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); + } + + return result; +} + +static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t) (mm_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + /* not sure whether we need evclk or not */ + table->SamuLevel[count].Frequency = + pptable_info->mm_dep_table->entries[count].samclock; + table->SamuLevel[count].MinVoltage.Vddc = + tonga_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->SamuLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? + tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->SamuLevel[count].MinVoltage.Vddci = + tonga_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - data->vddc_vddci_delta); + table->SamuLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + } + + return result; +} + +/** + * Populates the SMC MCLK structure using the provided memory clock + * + * @param hwmgr the address of the hardware manager + * @param memory_clock the memory clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int tonga_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU72_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE(0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", return result); + + /* MPLL_FUNC_CNTL setup*/ + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); + + /* MPLL_FUNC_CNTL_1 setup*/ + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); + + /* MPLL_AD_FUNC_CNTL setup*/ + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + + if (data->is_memory_GDDR5) { + /* MPLL_DQ_FUNC_CNTL setup*/ + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + /* + ************************************ + Fref = Reference Frequency + NF = Feedback divider ratio + NR = Reference divider ratio + Fnom = Nominal VCO output frequency = Fref * NF / NR + Fs = Spreading Rate + D = Percentage down-spread / 2 + Fint = Reference input frequency to PFD = Fref / NR + NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) + CLKS = NS - 1 = ISS_STEP_NUM[11:0] + NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) + CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] + ************************************* + */ + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ + /* ss.Info.speed_spectrum_rate -- in unit of khz */ + /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ + /* = reference_clock * 5 / speed_spectrum_rate */ + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + + /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ + /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + /* MCLK_PWRMGT_CNTL setup */ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) { + mc_para_index = 0x00; + } else if (memory_clock > 47500) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } + } else { + if (memory_clock < 65000) { + mc_para_index = 0x00; + } else if (memory_clock > 135000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + } + + return mc_para_index; +} + +static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) { + mc_para_index = 0; + } else if (memory_clock >= 80000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + } + + return mc_para_index; +} + +static int tonga_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU72_Discrete_MemoryLevel *memory_level + ) +{ + uint32_t minMvdd = 0; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + bool dllStateOn; + struct cgs_display_info info = {0}; + + + if (NULL != pptable_info->vdd_dep_on_mclk) { + result = tonga_get_dependecy_volt_by_clk(hwmgr, + pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); + } + + if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) { + memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value; + } else { + memory_level->MinMvdd = minMvdd; + } + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 0; + memory_level->UpHyst = 0; + memory_level->DownHyst = 100; + memory_level->VoltageDownHyst = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + if ((data->mclk_stutter_mode_threshold != 0) && + (memory_clock <= data->mclk_stutter_mode_threshold) && + (data->is_uvd_enabled == 0) +#if defined(LINUX) + && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) + && (data->display_timing.num_existing_displays <= 2) + && (data->display_timing.num_existing_displays != 0) +#endif + ) + memory_level->StutterEnable = 1; + + /* decide strobe mode*/ + memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) && + (memory_clock <= data->mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_GDDR5) { + memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((data->mclk_edc_enable_threshold != 0) && + (memory_clock > data->mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((data->mclk_edc_wr_enable_threshold != 0) && + (memory_clock > data->mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) { + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } else { + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } + + } else { + dllStateOn = data->dll_defaule_on; + } + } else { + memory_level->StrobeRatio = + tonga_get_ddr3_mclk_frequency_ratio(memory_clock); + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = tonga_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn); + + if (0 == result) { + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +/** + * Populates the SMC MVDD structure using the provided memory clock. + * + * @param hwmgr the address of the hardware manager + * @param mclk the MCLK value to be used in the decision if MVDD should be high or low. + * @param voltage the SMC VOLTAGE structure to be populated + */ +int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern) +{ + const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", return -1); + + } else { + return -1; + } + + return 0; +} + + +static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + SMIO_Pattern voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage; + + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, + CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ + table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage; + + /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/ + + if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level) +{ + int result = 0; + uint32_t i; + + for (i = 0; i < table->count; i++) { + if (value == table->dpm_levels[i].value) { + *boot_level = i; + result = 0; + } + } + return result; +} + +static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + table->GraphicsBootLevel = 0; /* 0 == DPM[0] (low), etc. */ + table->MemoryBootLevel = 0; /* 0 == DPM[0] (low), etc. */ + + /* find boot level from dpm table*/ + result = tonga_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(data->smc_state_table.GraphicsBootLevel)); + + if (0 != result) { + data->smc_state_table.GraphicsBootLevel = 0; + printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ + in dependency table. Using Graphics DPM level 0!"); + result = 0; + } + + result = tonga_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(data->smc_state_table.MemoryBootLevel)); + + if (0 != result) { + data->smc_state_table.MemoryBootLevel = 0; + printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ + in dependency table. Using Memory DPM level 0!"); + result = 0; + } + + table->BootVoltage.Vddc = + tonga_get_voltage_id(&(data->vddc_voltage_table), + data->vbios_boot_state.vddc_bootup_value); + table->BootVoltage.VddGfx = + tonga_get_voltage_id(&(data->vddgfx_voltage_table), + data->vbios_boot_state.vddgfx_bootup_value); + table->BootVoltage.Vddci = + tonga_get_voltage_id(&(data->vddci_voltage_table), + data->vbios_boot_state.vddci_bootup_value); + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return result; +} + + +/** + * Calculates the SCLK dividers using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk) +{ + const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t reference_clock; + uint32_t reference_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ + reference_clock = atomctrl_get_reference_clock(hwmgr); + + reference_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider*/ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup*/ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + + uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; + if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + */ + /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ + uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); + + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); + + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = + PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +/** + * Populates single SMC SCLK structure using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level) +{ + int result; + uint32_t threshold; + uint32_t mvdd; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level); + + + /* populate graphics levels*/ + result = tonga_get_dependecy_volt_by_clk(hwmgr, + pptable_info->vdd_dep_on_sclk, engine_clock, + &graphic_level->MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for VDDC \ + engine clock dependency table", return result); + + /* SCLK frequency in units of 10KHz*/ + graphic_level->SclkFrequency = engine_clock; + + /* Indicates maximum activity level for this performance level. 50% for now*/ + graphic_level->ActivityLevel = sclk_activity_level_threshold; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + /* this level can be used if activity is high enough.*/ + graphic_level->EnabledForActivity = 0; + /* this level can be used for throttling.*/ + graphic_level->EnabledForThrottle = 1; + graphic_level->UpHyst = 0; + graphic_level->DownHyst = 0; + graphic_level->VoltageDownHyst = 0; + graphic_level->PowerThrottle = 0; + + threshold = engine_clock * data->fast_watemark_threshold / 100; +/* + *get the DAL clock. do it in funture. + PECI_GetMinClockSettings(hwmgr->peci, &minClocks); + data->display_timing.min_clock_insr = minClocks.engineClockInSR; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) + { + graphic_level->DeepSleepDivId = PhwTonga_GetSleepDividerIdFromClock(hwmgr, engine_clock, minClocks.engineClockInSR); + } +*/ + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ + /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/ + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); + } + + return result; +} + +/** + * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states + * + * @param hwmgr the address of the hardware manager + */ +static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct tonga_dpm_table *dpm_table = &data->dpm_table; + phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; + uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count; + int result = 0; + uint32_t level_array_adress = data->dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, GraphicsLevel); + uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) * + SMU72_MAX_LEVELS_GRAPHICS; /* 64 -> long; 32 -> int*/ + SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; + uint32_t i, maxEntry; + uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0; + PECI_RegistryValue reg_value; + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = tonga_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)data->activity_target[i], + &(data->smc_state_table.GraphicsLevel[i])); + + if (0 != result) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + + if (0 == i) { + reg_value = 0; + if (reg_value != 0) + data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value; + } + + if (1 == i) { + reg_value = 0; + if (reg_value != 0) + data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value; + } + } + + /* Only enable level 0 for now. */ + data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + + /* set highest level watermark to high */ + if (dpm_table->sclk_table.count > 1) + data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((pcie_entry_count >= 1), + "There must be 1 or more PCIE levels defined in PPTable.", return -1); + maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/ + for (i = 0; i < dpm_table->sclk_table.count; i++) { + data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = + (uint8_t) ((i < maxEntry) ? i : maxEntry); + } + } else { + if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask) + printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!"); + + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1<<(highest_pcie_level_enabled+1))) != 0)) { + highest_pcie_level_enabled++; + } + + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1<dpm_level_enable_mask.pcie_dpm_enable_mask & + (1<<(lowest_pcie_level_enabled+1+count))) == 0)) { + count++; + } + mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? + (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled; + + + /* set pcieDpmLevel to highest_pcie_level_enabled*/ + for (i = 2; i < dpm_table->sclk_table.count; i++) { + data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; + } + + /* set pcieDpmLevel to lowest_pcie_level_enabled*/ + data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled*/ + data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change*/ + result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); + + if (0 != result) + return result; + + return 0; +} + +/** + * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states + * + * @param hwmgr the address of the hardware manager + */ + +static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct tonga_dpm_table *dpm_table = &data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY; + SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", return -1); + result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, + &(data->smc_state_table.MemoryLevel[i])); + if (0 != result) { + return result; + } + } + + /* Only enable level 0 for now.*/ + data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + /* + * in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in a higher state + * by default such that we are not effected by up threshold or and MCLK DPM latency. + */ + data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel); + + data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high*/ + data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change*/ + result = tonga_copy_bytes_to_smc(hwmgr->smumgr, + level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); + + if (0 != result) { + return result; + } + + return 0; +} + +struct TONGA_DLL_SPEED_SETTING { + uint16_t Min; /* Minimum Data Rate*/ + uint16_t Max; /* Maximum Data Rate*/ + uint32_t dll_speed; /* The desired DLL_SPEED setting*/ +}; + +static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +/* ---------------------------------------- ULV related functions ----------------------------------------------------*/ + + +static int tonga_reset_single_dpm_table( + struct pp_hwmgr *hwmgr, + struct tonga_single_dpm_table *dpm_table, + uint32_t count) +{ + uint32_t i; + if (!(count <= MAX_REGULAR_DPM_NUMBER)) + printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \ + table entries to exceed max number! \n"); + + dpm_table->count = count; + for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { + dpm_table->dpm_levels[i].enabled = 0; + } + + return 0; +} + +static void tonga_setup_pcie_table_entry( + struct tonga_single_dpm_table *dpm_table, + uint32_t index, uint32_t pcie_gen, + uint32_t pcie_lanes) +{ + dpm_table->dpm_levels[index].value = pcie_gen; + dpm_table->dpm_levels[index].param1 = pcie_lanes; + dpm_table->dpm_levels[index].enabled = 1; +} + +static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; + uint32_t i, maxEntry; + + if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) { + data->pcie_gen_power_saving = data->pcie_gen_performance; + data->pcie_lane_power_saving = data->pcie_lane_performance; + } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) { + data->pcie_gen_performance = data->pcie_gen_power_saving; + data->pcie_lane_performance = data->pcie_lane_power_saving; + } + + tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK); + + if (pcie_table != NULL) { + /* + * maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue). + * If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry. + */ + maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ? + SMU72_MAX_LEVELS_LINK : pcie_table->count; + for (i = 1; i < maxEntry; i++) { + tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1, + get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + } + data->dpm_table.pcie_speed_table.count = maxEntry - 1; + } else { + /* Hardcode Pcie Table */ + tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, + get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, + get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + data->dpm_table.pcie_speed_table.count = 6; + } + /* Populate last level for boot PCIE level, but do not increment count. */ + tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, + data->dpm_table.pcie_speed_table.count, + get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + + return 0; + +} + +/* + * This function is to initalize all DPM state tables for SMU7 based on the dependency table. + * Dynamic state patching function will then trim these state tables to the allowed range based + * on the power policy or external client requests, such as UVD request, etc. + */ +static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i; + + phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table = + pptable_info->vdd_dep_on_sclk; + phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table = + pptable_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, + "SCLK dependency table has to have is missing. This table is mandatory", return -1); + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, + "VMCLK dependency table has to have is missing. This table is mandatory", return -1); + + /* clear the state table to reset everything to default */ + memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); + tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS); + tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY); + /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */ + /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/ + /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/ + /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/ + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -1); + /* Initialize Sclk DPM table based on allow Sclk values*/ + data->dpm_table.sclk_table.count = 0; + + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != + allowed_vdd_sclk_table->entries[i].clk) { + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = + allowed_vdd_sclk_table->entries[i].clk; + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.count++; + } + } + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -1); + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mclk_table.count = 0; + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != + allowed_vdd_mclk_table->entries[i].clk) { + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = + allowed_vdd_mclk_table->entries[i].clk; + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.count++; + } + } + + /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc; + /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */ + /* param1 is for corresponding std voltage */ + data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; + + if (NULL != allowed_vdd_mclk_table) { + /* Initialize Vddci DPM table based on allow Mclk values */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci; + data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; + data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd; + data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; + data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; + } + + /* setup PCIE gen speed levels*/ + tonga_setup_default_pcie_tables(hwmgr); + + /* save a copy of the default DPM table*/ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table)); + + return 0; +} + +int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr, + const struct tonga_power_state *bootState) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint8_t count, level; + + count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count); + for (level = 0; level < count; level++) { + if (pptable_info->vdd_dep_on_sclk->entries[level].clk >= + bootState->performance_levels[0].engine_clock) { + data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count); + for (level = 0; level < count; level++) { + if (pptable_info->vdd_dep_on_mclk->entries[level].clk >= + bootState->performance_levels[0].memory_clock) { + data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +/** + * Initializes the SMC table and uploads it + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pInput the pointer to input data (PowerState) + * @return always 0 + */ +int tonga_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + SMU72_Discrete_DpmTable *table = &(data->smc_state_table); + const phw_tonga_ulv_parm *ulv = &(data->ulv); + uint8_t i; + PECI_RegistryValue reg_value; + pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; + + result = tonga_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to setup default DPM tables!", return result;); + memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table)); + if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) { + tonga_populate_smc_voltage_tables(hwmgr, table); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + } + + if (data->is_memory_GDDR5) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + } + + i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN); + + if (i == 1 || i == 0) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL; + } + + if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) { + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result;); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter); + } + + result = tonga_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result;); + + result = tonga_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result;); + + result = tonga_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result;); + + result = tonga_populate_smv_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result;); + + result = tonga_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result;); + + result = tonga_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result;); + + result = tonga_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result;); + + /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ + /* need to populate the ARB settings for the initial state. */ + result = tonga_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result;); + + result = tonga_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result;); + + result = tonga_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result;); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = tonga_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate Clock Stretcher Data Table!", return result;); + } + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + pptable_info->cac_dtp_table->usTargetOperatingTemp * + TONGA_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) * + TONGA_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + + /* + * Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had) + * SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again + * but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay + * To avoid it, we set PCIeBootLinkLevel to highest dpm level + */ + PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count), + "There must be 1 or more PCIE levels defined in PPTable.", + return -1); + + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); + + table->PCIeGenInterval = 1; + + result = tonga_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate VRConfig setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + reg_value = 0; + if ((0 == reg_value) && + (0 == atomctrl_get_pp_assign_pin(hwmgr, + VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) { + table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = TONGA_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + /* ACDC Switch GPIO */ + reg_value = 0; + if ((0 == reg_value) && + (0 == atomctrl_get_pp_assign_pin(hwmgr, + PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) { + table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = TONGA_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition); + + reg_value = 0; + if (1 == reg_value) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition); + } + + reg_value = 0; + if ((0 == reg_value) && + (0 == atomctrl_get_pp_assign_pin(hwmgr, + THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + + table->ThermOutPolarity = + (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & + (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0; + + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CombinePCCWithThermalSignal)){ + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } + } else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) { + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + } + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController), + data->sram_end); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result;); + + return result; +} + +/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/ +static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr) +{ + return; +} + +int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) +{ + PPSMC_Result result; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + /* Apply minimum voltage based on DAL's request level */ + tonga_apply_dal_minimum_voltage_request(hwmgr); + + if (0 == data->sclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + if (0 != tonga_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); + + if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Sclk Dpm enable Mask failed", return -1); + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + if (0 != tonga_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); + + if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Mclk Dpm enable Mask failed", return -1); + } + } + + return 0; +} + + +int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + uint32_t level, tmp; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + if (0 == data->pcie_dpm_key_disabled) { + /* PCIE */ + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)), + "force highest pcie dpm state failed!", return -1); + } + } + } + + if (0 == data->sclk_dpm_key_disabled) { + /* SCLK */ + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)), + "force highest sclk dpm state failed!", return -1); + if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level) + printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ + Curr_Sclk_Index does not match the level \n"); + + } + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* MCLK */ + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)), + "force highest mclk dpm state failed!", return -1); + if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level) + printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ + Curr_Mclk_Index does not match the level \n"); + } + } + } + + return 0; +} + +/** + * Find the MC microcode version and store it in the HwMgr struct + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr) +{ + cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); + + hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); + + return 0; +} + +/** + * Initialize Dynamic State Adjustment Rule Settings + * + * @param hwmgr the address of the powerplay hardware manager. + */ +int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) +{ + uint32_t table_size; + struct phm_clock_voltage_dependency_table *table_clk_vlt; + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + hwmgr->dyn_state.mclk_sclk_ratio = 4; + hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */ + hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */ + + /* initialize vddc_dep_on_dal_pwrl table */ + table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); + table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + + if (NULL == table_clk_vlt) { + printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); + return -ENOMEM; + } else { + table_clk_vlt->count = 4; + table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; + table_clk_vlt->entries[0].v = 0; + table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; + table_clk_vlt->entries[1].v = 720; + table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; + table_clk_vlt->entries[2].v = 810; + table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; + table_clk_vlt->entries[3].v = 900; + pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt; + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; + } + + return 0; +} + +static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = + pptable_info->vdd_dep_on_sclk; + phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = + pptable_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, + "VDD dependency on SCLK table is missing. \ + This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, + "VDD dependency on SCLK table has to have is missing. \ + This table is mandatory", return -1); + + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, + "VDD dependency on MCLK table is missing. \ + This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, + "VDD dependency on MCLK table has to have is missing. \ + This table is mandatory", return -1); + + data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc; + data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; + + pptable_info->max_clock_voltage_on_ac.sclk = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; + pptable_info->max_clock_voltage_on_ac.mclk = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; + pptable_info->max_clock_voltage_on_ac.vddc = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; + pptable_info->max_clock_voltage_on_ac.vddci = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = + pptable_info->max_clock_voltage_on_ac.sclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = + pptable_info->max_clock_voltage_on_ac.mclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = + pptable_info->max_clock_voltage_on_ac.vddc; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = + pptable_info->max_clock_voltage_on_ac.vddci; + + return 0; +} + +int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + int result = 1; + + PP_ASSERT_WITH_CODE (0 == tonga_is_dpm_running(hwmgr), + "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", + return result); + + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_UnForceLevel)), + "unforce pcie level failed!", + return -1); + } + + result = tonga_upload_dpm_level_enable_mask(hwmgr); + + return result; +} + +static uint32_t tonga_get_lowest_enable_level( + struct pp_hwmgr *hwmgr, uint32_t level_mask) +{ + uint32_t level = 0; + + while (0 == (level_mask & (1 << level))) + level++; + + return level; +} + +static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + uint32_t level; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + if (0 == data->pcie_dpm_key_disabled) { + /* PCIE */ + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { + level = tonga_get_lowest_enable_level(hwmgr, + data->dpm_level_enable_mask.pcie_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)), + "force lowest pcie dpm state failed!", return -1); + } + } + + if (0 == data->sclk_dpm_key_disabled) { + /* SCLK */ + if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = tonga_get_lowest_enable_level(hwmgr, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + + PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)), + "force sclk dpm state failed!", return -1); + + if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level) + printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ + Curr_Sclk_Index does not match the level \n"); + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* MCLK */ + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { + level = tonga_get_lowest_enable_level(hwmgr, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)), + "force lowest mclk dpm state failed!", return -1); + if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level) + printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ + Curr_Mclk_Index does not match the level \n"); + } + } + + return 0; +} + +static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr) +{ + uint8_t entryId; + uint8_t voltageId; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; + phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; + + if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { + for (entryId = 0; entryId < sclk_table->count; ++entryId) { + voltageId = sclk_table->entries[entryId].vddInd; + sclk_table->entries[entryId].vddgfx = + pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd; + } + } else { + for (entryId = 0; entryId < sclk_table->count; ++entryId) { + voltageId = sclk_table->entries[entryId].vddInd; + sclk_table->entries[entryId].vddc = + pptable_info->vddc_lookup_table->entries[voltageId].us_vdd; + } + } + + for (entryId = 0; entryId < mclk_table->count; ++entryId) { + voltageId = mclk_table->entries[entryId].vddInd; + mclk_table->entries[entryId].vddc = + pptable_info->vddc_lookup_table->entries[voltageId].us_vdd; + } + + for (entryId = 0; entryId < mm_table->count; ++entryId) { + voltageId = mm_table->entries[entryId].vddcInd; + mm_table->entries[entryId].vddc = + pptable_info->vddc_lookup_table->entries[voltageId].us_vdd; + } + + return 0; + +} + +static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) +{ + uint8_t entryId; + phm_ppt_v1_voltage_lookup_record v_record; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; + phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; + + if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { + for (entryId = 0; entryId < sclk_table->count; ++entryId) { + if (sclk_table->entries[entryId].vdd_offset & (1 << 15)) + v_record.us_vdd = sclk_table->entries[entryId].vddgfx + + sclk_table->entries[entryId].vdd_offset - 0xFFFF; + else + v_record.us_vdd = sclk_table->entries[entryId].vddgfx + + sclk_table->entries[entryId].vdd_offset; + + sclk_table->entries[entryId].vddc = + v_record.us_cac_low = v_record.us_cac_mid = + v_record.us_cac_high = v_record.us_vdd; + + tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); + } + + for (entryId = 0; entryId < mclk_table->count; ++entryId) { + if (mclk_table->entries[entryId].vdd_offset & (1 << 15)) + v_record.us_vdd = mclk_table->entries[entryId].vddc + + mclk_table->entries[entryId].vdd_offset - 0xFFFF; + else + v_record.us_vdd = mclk_table->entries[entryId].vddc + + mclk_table->entries[entryId].vdd_offset; + + mclk_table->entries[entryId].vddgfx = v_record.us_cac_low = + v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; + tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); + } + } + + return 0; + +} + +static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) +{ + uint32_t entryId; + phm_ppt_v1_voltage_lookup_record v_record; + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; + + if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { + for (entryId = 0; entryId < mm_table->count; entryId++) { + if (mm_table->entries[entryId].vddgfx_offset & (1 << 15)) + v_record.us_vdd = mm_table->entries[entryId].vddc + + mm_table->entries[entryId].vddgfx_offset - 0xFFFF; + else + v_record.us_vdd = mm_table->entries[entryId].vddc + + mm_table->entries[entryId].vddgfx_offset; + + /* Add the calculated VDDGFX to the VDDGFX lookup table */ + mm_table->entries[entryId].vddgfx = v_record.us_cac_low = + v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; + tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); + } + } + return 0; +} + + +/** + * Change virtual leakage voltage to actual value. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pointer to changing voltage + * @param pointer to leakage table + */ +static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, + uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable) +{ + uint32_t leakage_index; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) { + /* if this voltage matches a leakage voltage ID */ + /* patch with actual leakage voltage */ + if (pLeakageTable->leakage_id[leakage_index] == *voltage) { + *voltage = pLeakageTable->actual_voltage[leakage_index]; + break; + } + } + + if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) + printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n"); +} + +/** + * Patch voltage lookup table by EVV leakages. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pointer to voltage lookup table + * @param pointer to leakage table + * @return always 0 + */ +static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + phw_tonga_leakage_voltage *pLeakageTable) +{ + uint32_t i; + + for (i = 0; i < lookup_table->count; i++) { + tonga_patch_with_vdd_leakage(hwmgr, + &lookup_table->entries[i].us_vdd, pLeakageTable); + } + + return 0; +} + +static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr, + phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc) +{ + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable); + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = + pptable_info->max_clock_voltage_on_dc.vddc; + + return 0; +} + +static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage( + struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable, + uint16_t *Vddgfx) +{ + tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable); + return 0; +} + +int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table) +{ + uint32_t table_size, i, j; + phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; + table_size = lookup_table->count; + + PP_ASSERT_WITH_CODE(0 != lookup_table->count, + "Lookup table is empty", return -1); + + /* Sorting voltages */ + for (i = 0; i < table_size - 1; i++) { + for (j = i + 1; j > 0; j--) { + if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) { + tmp_voltage_lookup_record = lookup_table->entries[j-1]; + lookup_table->entries[j-1] = lookup_table->entries[j]; + lookup_table->entries[j] = tmp_voltage_lookup_record; + } + } + } + + return 0; +} + +static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr) +{ + int result = 0; + int tmp_result; + tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { + tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr, + pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); + if (tmp_result != 0) + result = tmp_result; + + tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr, + &(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx); + if (tmp_result != 0) + result = tmp_result; + } else { + tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr, + pptable_info->vddc_lookup_table, &(data->vddc_leakage)); + if (tmp_result != 0) + result = tmp_result; + + tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr, + &(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc); + if (tmp_result != 0) + result = tmp_result; + } + + tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr); + if (tmp_result != 0) + result = tmp_result; + + tmp_result = tonga_calc_voltage_dependency_tables(hwmgr); + if (tmp_result != 0) + result = tmp_result; + + tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr); + if (tmp_result != 0) + result = tmp_result; + + tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table); + if (tmp_result != 0) + result = tmp_result; + + tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table); + if (tmp_result != 0) + result = tmp_result; + + return result; +} + +int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + data->low_sclk_interrupt_threshold = 0; + + return 0; +} + +int tonga_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = tonga_read_clock_registers(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to read clock registers!", result = tmp_result); + + tmp_result = tonga_get_memory_type(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get memory type!", result = tmp_result); + + tmp_result = tonga_enable_acpi_power_management(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ACPI power management!", result = tmp_result); + + tmp_result = tonga_init_power_gate_state(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init power gate state!", result = tmp_result); + + tmp_result = tonga_get_mc_microcode_version(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get MC microcode version!", result = tmp_result); + + tmp_result = tonga_init_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init sclk threshold!", result = tmp_result); + + return result; +} + +/** + * Enable voltage control + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr) +{ + /* enable voltage control */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); + + return 0; +} + +/** + * Checks if we want to support voltage control + * + * @param hwmgr the address of the powerplay hardware manager. + */ +bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr) +{ + const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control); +} + +/*---------------------------MC----------------------------*/ + +uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) +{ + bool result = 1; + + switch (inReg) { + case mmMC_SEQ_RAS_TIMING: + *outReg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *outReg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *outReg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *outReg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *outReg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *outReg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *outReg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *outReg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *outReg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *outReg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *outReg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *outReg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *outReg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *outReg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *outReg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *outReg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *outReg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = 0; + break; + } + + return result; +} + +int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) + ? address : table->mc_reg_address[i].s1; + } + return 0; +} + +int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -1); + + for (i = 0; i < table->last; i++) { + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + } + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +/** + * VBIOS omits some information to reduce size, we need to recover them here. + * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. + * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] + * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0]. + * 3. need to set these data for each clock range + * + * @param hwmgr the address of the powerplay hardware manager. + * @param table the address of MCRegTable + * @return always 0 + */ +int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + switch (table->mc_reg_address[i].s1) { + /* + * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. + * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] + */ + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_GDDR5) { + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + + if (!data->is_memory_GDDR5) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + +int tonga_set_valid_flag(phw_tonga_mc_reg_table *table) +{ + uint8_t i, j; + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1<backend); + pp_atomctrl_mc_reg_table *table; + phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table; + uint8_t module_index = tonga_get_memory_modile_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = tonga_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + tonga_set_s0_mc_reg_index(ni_table); + result = tonga_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + tonga_set_valid_flag(ni_table); + + kfree(table); + return result; +} + +/* +* Copy one arb setting to another and then switch the active set. +* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants. +*/ +int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, + uint32_t arbFreqSrc, uint32_t arbFreqDest) +{ + uint32_t mc_arb_dram_timing; + uint32_t mc_arb_dram_timing2; + uint32_t burst_time; + uint32_t mc_cg_config; + + switch (arbFreqSrc) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + break; + + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); + break; + + default: + return -1; + } + + switch (arbFreqDest) { + case MC_CG_ARB_FREQ_F0: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); + break; + + case MC_CG_ARB_FREQ_F1: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); + break; + + default: + return -1; + } + + mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); + mc_cg_config |= 0x0000000F; + cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest); + + return 0; +} + +/** + * Initial switch from ARB F0->F1 + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr) +{ + return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +/** + * Initialize the ARB DRAM timing table's index field. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr) +{ + const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + uint32_t tmp; + int result; + + /* + * This is a read-modify-write on the first byte of the ARB table. + * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'. + * This solution is ugly, but we never write the whole table only individual fields in it. + * In reality this field should not be in that structure but in a soft register. + */ + result = tonga_read_smc_sram_dword(hwmgr->smumgr, + data->arb_table_start, &tmp, data->sram_end); + + if (0 != result) + return result; + + tmp &= 0x00FFFFFF; + tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; + + return tonga_write_smc_sram_dword(hwmgr->smumgr, + data->arb_table_start, tmp, data->sram_end); +} + +int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table) +{ + const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + uint32_t i, j; + + for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) { + if (data->tonga_mc_reg_table.validflag & 1<address[] array out of boundary", return -1); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +/*convert register values from driver to SMC format */ +void tonga_convert_mc_registers( + const phw_tonga_mc_reg_entry * pEntry, + SMU72_Discrete_MCRegisterSet *pData, + uint32_t numEntries, uint32_t validflag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < numEntries; j++) { + if (validflag & 1<value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]); + i++; + } + } +} + +/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */ +int tonga_convert_mc_reg_table_entry_to_smc( + struct pp_hwmgr *hwmgr, + const uint32_t memory_clock, + SMU72_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + uint32_t i = 0; + + for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) { + if (memory_clock <= + data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0)) + --i; + + tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag); + + return 0; +} + +int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU72_Discrete_MCRegisters *mc_reg_table) +{ + int result = 0; + tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = tonga_convert_mc_reg_table_entry_to_smc( + hwmgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_reg_table->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters)); + result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses!", return result;); + + result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state!", return result;); + + return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start, + (uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end); +} + +/** + * Programs static screed detection parameters + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + /* Set static screen threshold unit*/ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, + data->static_screen_threshold_unit); + /* Set static screen threshold*/ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, + data->static_screen_threshold); + + return 0; +} + +/** + * Setup display gap for glitch free memory clock switching. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_enable_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + + display_gap = PHM_SET_FIELD(display_gap, + CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE); + + display_gap = PHM_SET_FIELD(display_gap, + CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL, display_gap); + + return 0; +} + +/** + * Programs activity state transition voting clients + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_program_voting_clients(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); + + /* Clear reset for voting clients before enabling DPM */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); + + return 0; +} + + +int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = tonga_check_for_dpm_stopped(hwmgr); + + if (cf_tonga_voltage_control(hwmgr)) { + tmp_result = tonga_enable_voltage_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable voltage control!", result = tmp_result); + + tmp_result = tonga_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to contruct voltage tables!", result = tmp_result); + } + + tmp_result = tonga_initialize_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize MC reg table!", result = tmp_result); + + tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program static screen threshold parameters!", result = tmp_result); + + tmp_result = tonga_enable_display_gap(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable display gap!", result = tmp_result); + + tmp_result = tonga_program_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program voting clients!", result = tmp_result); + + tmp_result = tonga_process_firmware_header(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to process firmware header!", result = tmp_result); + + tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize switch from ArbF0 to F1!", result = tmp_result); + + tmp_result = tonga_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize SMC table!", result = tmp_result); + + tmp_result = tonga_init_arb_table_index(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize ARB table index!", result = tmp_result); + + tmp_result = tonga_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate initialize MC Reg table!", result = tmp_result); + + tmp_result = tonga_notify_smc_display_change(hwmgr, false); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to notify no display!", result = tmp_result); + + /* enable SCLK control */ + tmp_result = tonga_enable_sclk_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SCLK control!", result = tmp_result); + + /* enable DPM */ + tmp_result = tonga_start_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start DPM!", result = tmp_result); + + return result; +} + +int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = tonga_check_for_dpm_running(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "SMC is still running!", return 0); + + tmp_result = tonga_stop_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to stop DPM!", result = tmp_result); + + tmp_result = tonga_reset_to_default(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to reset to default!", result = tmp_result); + + return result; +} + +int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr) +{ + int result; + + result = tonga_set_boot_state(hwmgr); + if (0 != result) + printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n"); + + return result; +} + +int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; + } + + if (NULL != hwmgr->backend) { + kfree(hwmgr->backend); + hwmgr->backend = NULL; + } + + return 0; +} + +/** + * Initializes the Volcanic Islands Hardware Manager + * + * @param hwmgr the address of the powerplay hardware manager. + * @return 1 if success; otherwise appropriate error code. + */ +int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + int result = 0; + SMU72_Discrete_DpmTable *table = NULL; + tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phw_tonga_ulv_parm *ulv; + + PP_ASSERT_WITH_CODE((NULL != hwmgr), + "Invalid Parameter!", return -1;); + + data->dll_defaule_on = 0; + data->sram_end = SMC_RAM_END; + + data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT; + data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT; + data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT; + data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT; + data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT; + data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT; + data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT; + data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT; + + data->vddc_vddci_delta = VDDC_VDDCI_DELTA; + data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA; + data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableVoltageIsland); + + data->sclk_dpm_key_disabled = 0; + data->mclk_dpm_key_disabled = 0; + data->pcie_dpm_key_disabled = 0; + data->pcc_monitor_enabled = 0; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UnTabledHardwareInterface); + + data->gpio_debug = 0; + data->engine_clock_data = 0; + data->memory_clock_data = 0; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPatchPowerState); + + /* need to set voltage control types before EVV patching*/ + data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE; + data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; + data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; + data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; + + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { + data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDGFX)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { + data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2; + } + } + + if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDGFX); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) { + data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO; + } + } + + if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) + data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) + data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2; + } + + if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + if (pptable_info->cac_dtp_table->usClockStretchAmount != 0) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + + /* Initializes DPM default values*/ + tonga_initialize_dpm_defaults(hwmgr); + + /* Get leakage voltage based on leakage ID.*/ + PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)), + "Get EVV Voltage Failed. Abort Driver loading!", return -1); + + tonga_complete_dependency_tables(hwmgr); + + /* Parse pptable data read from VBIOS*/ + tonga_set_private_var_based_on_pptale(hwmgr); + + /* ULV Support*/ + ulv = &(data->ulv); + ulv->ulv_supported = 0; + + /* Initalize Dynamic State Adjustment Rule Settings*/ + result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); + if (result) + printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n"); + data->uvd_enabled = 0; + + table = &(data->smc_state_table); + + /* + * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, + * Peak Current Control feature is enabled and we should program PCC HW register + */ + if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { + uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); + + switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { + case 0: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); + break; + case 1: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); + break; + case 2: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); + break; + case 3: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); + break; + case 4: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); + break; + default: + printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register! \ + Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n"); + break; + } + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCNB_PWRMGT_CNTL, temp_reg); + } + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableSMU7ThermalManagement); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMU7); + + data->vddc_phase_shed_control = 0; + + if (0 == result) { + struct cgs_system_info sys_info = {0}; + + data->is_tlu_enabled = 0; + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + TONGA_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_gen_cap = 0x30007; + else + data->pcie_gen_cap = (uint32_t)sys_info.value; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_lane_cap = 0x2f0000; + else + data->pcie_lane_cap = (uint32_t)sys_info.value; + } else { + /* Ignore return value in here, we are cleaning up a mess. */ + tonga_hwmgr_backend_fini(hwmgr); + } + + return result; +} + +static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = tonga_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = tonga_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = tonga_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + return ret; +} + +static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *prequest_ps, + const struct pp_power_state *pcurrent_ps) +{ + struct tonga_power_state *tonga_ps = + cast_phw_tonga_power_state(&prequest_ps->hardware); + + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; + bool disable_mclk_switching; + bool disable_mclk_switching_for_frame_lock; + struct cgs_display_info info = {0}; + const struct phm_clock_and_voltage_limits *max_limits; + uint32_t i; + tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + int32_t count; + int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + + data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); + + PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2, + "VI should always have 2 performance levels", + ); + + max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + &(hwmgr->dyn_state.max_clock_voltage_on_ac) : + &(hwmgr->dyn_state.max_clock_voltage_on_dc); + + if (PP_PowerSource_DC == hwmgr->power_source) { + for (i = 0; i < tonga_ps->performance_level_count; i++) { + if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk) + tonga_ps->performance_levels[i].memory_clock = max_limits->mclk; + if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk) + tonga_ps->performance_levels[i].engine_clock = max_limits->sclk; + } + } + + tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk; + tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk; + + tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk; + + cgs_get_active_displays_info(hwmgr->device, &info); + + /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ + + /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */ + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + + max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); + stable_pstate_sclk = (max_limits->sclk * 75) / 100; + + for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { + if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) { + stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk; + break; + } + } + + if (count < 0) + stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk; + + stable_pstate_mclk = max_limits->mclk; + + minimum_clocks.engineClock = stable_pstate_sclk; + minimum_clocks.memoryClock = stable_pstate_mclk; + } + + if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) + minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; + + if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; + + if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock), + "Overdrive sclk exceeds limit", + hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock); + + if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) + tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive; + } + + if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock), + "Overdrive mclk exceeds limit", + hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock); + + if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) + tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive; + } + + disable_mclk_switching_for_frame_lock = phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + + disable_mclk_switching = (1 < info.display_count) || + disable_mclk_switching_for_frame_lock; + + sclk = tonga_ps->performance_levels[0].engine_clock; + mclk = tonga_ps->performance_levels[0].memory_clock; + + if (disable_mclk_switching) + mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock; + + if (sclk < minimum_clocks.engineClock) + sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock; + + if (mclk < minimum_clocks.memoryClock) + mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock; + + tonga_ps->performance_levels[0].engine_clock = sclk; + tonga_ps->performance_levels[0].memory_clock = mclk; + + tonga_ps->performance_levels[1].engine_clock = + (tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ? + tonga_ps->performance_levels[1].engine_clock : + tonga_ps->performance_levels[0].engine_clock; + + if (disable_mclk_switching) { + if (mclk < tonga_ps->performance_levels[1].memory_clock) + mclk = tonga_ps->performance_levels[1].memory_clock; + + tonga_ps->performance_levels[0].memory_clock = mclk; + tonga_ps->performance_levels[1].memory_clock = mclk; + } else { + if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock) + tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + for (i=0; i < tonga_ps->performance_level_count; i++) { + tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk; + tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk; + tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; + tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; + } + } + + return 0; +} + +int tonga_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct tonga_power_state); +} + +static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct tonga_power_state *tonga_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + tonga_ps = cast_phw_tonga_power_state(&ps->hardware); + + if (low) + return tonga_ps->performance_levels[0].memory_clock; + else + return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock; +} + +static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct tonga_power_state *tonga_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + tonga_ps = cast_phw_tonga_power_state(&ps->hardware); + + if (low) + return tonga_ps->performance_levels[0].engine_clock; + else + return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock; +} + +static uint16_t tonga_get_current_pcie_speed( + struct pp_hwmgr *hwmgr) +{ + uint32_t speed_cntl = 0; + + speed_cntl = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__PCIE, + ixPCIE_LC_SPEED_CNTL); + return((uint16_t)PHM_GET_FIELD(speed_cntl, + PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); +} + +static int tonga_get_current_pcie_lane_number( + struct pp_hwmgr *hwmgr) +{ + uint32_t link_width; + + link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__PCIE, + PCIE_LC_LINK_WIDTH_CNTL, + LC_LINK_WIDTH_RD); + + PP_ASSERT_WITH_CODE((7 >= link_width), + "Invalid PCIe lane width!", return 0); + + return decode_pcie_lane_width(link_width); +} + +static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps; + ATOM_FIRMWARE_INFO_V2_2 *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + /* First retrieve the Boot clocks and VDDC from the firmware info table. + * We assume here that fw_info is unchanged if this call fails. + */ + fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( + hwmgr->device, index, + &size, &frev, &crev); + if (!fw_info) + /* During a test, there is no firmware info table. */ + return 0; + + /* Patch the state. */ + data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock); + data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock); + data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage); + data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage); + data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage); + data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr); + data->vbios_boot_state.pcie_lane_bootup_value = + (uint16_t)tonga_get_current_pcie_lane_number(hwmgr); + + /* set boot power state */ + ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; + ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; + ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; + ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; + + return 0; +} + +static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, + void *state, struct pp_power_state *power_state, + void *pp_table, uint32_t classification_flag) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + struct tonga_power_state *tonga_ps = + (struct tonga_power_state *)(&(power_state->hardware)); + + struct tonga_performance_level *performance_level; + + ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; + + ATOM_Tonga_POWERPLAYTABLE *powerplay_table = + (ATOM_Tonga_POWERPLAYTABLE *)pp_table; + + ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = + (ATOM_Tonga_SCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); + + ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = + (ATOM_Tonga_MCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); + + /* The following fields are not initialized here: id orderedList allStatesList */ + power_state->classification.ui_label = + (le16_to_cpu(state_entry->usClassification) & + ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> + ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; + power_state->classification.flags = classification_flag; + /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ + + power_state->classification.temporary_state = false; + power_state->classification.to_be_deleted = false; + + power_state->validation.disallowOnDC = + (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC)); + + power_state->pcie.lanes = 0; + + power_state->display.disableFrameModulation = false; + power_state->display.limitRefreshrate = false; + power_state->display.enableVariBright = + (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT)); + + power_state->validation.supportedPowerLevels = 0; + power_state->uvd_clocks.VCLK = 0; + power_state->uvd_clocks.DCLK = 0; + power_state->temperatures.min = 0; + power_state->temperatures.max = 0; + + performance_level = &(tonga_ps->performance_levels + [tonga_ps->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS), + "Performance levels exceeds SMC limit!", + return -1); + + PP_ASSERT_WITH_CODE( + (tonga_ps->performance_level_count <= + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -1); + + /* Performance levels are arranged from low to high. */ + performance_level->memory_clock = + le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk); + + performance_level->engine_clock = + le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk); + + performance_level->pcie_gen = get_pcie_gen_support( + data->pcie_gen_cap, + state_entry->ucPCIEGenLow); + + performance_level->pcie_lane = get_pcie_lane_support( + data->pcie_lane_cap, + state_entry->ucPCIELaneHigh); + + performance_level = + &(tonga_ps->performance_levels[tonga_ps->performance_level_count++]); + + performance_level->memory_clock = + le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk); + + performance_level->engine_clock = + le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk); + + performance_level->pcie_gen = get_pcie_gen_support( + data->pcie_gen_cap, + state_entry->ucPCIEGenHigh); + + performance_level->pcie_lane = get_pcie_lane_support( + data->pcie_lane_cap, + state_entry->ucPCIELaneHigh); + + return 0; +} + +static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *ps) +{ + int result; + struct tonga_power_state *tonga_ps; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = + table_info->vdd_dep_on_mclk; + + ps->hardware.magic = PhwTonga_Magic; + + tonga_ps = cast_phw_tonga_power_state(&(ps->hardware)); + + result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps, + tonga_get_pp_table_entry_callback_func); + + /* This is the earliest time we have all the dependency table and the VBIOS boot state + * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state + * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state + */ + if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { + if (dep_mclk_table->entries[0].clk != + data->vbios_boot_state.mclk_bootup_value) + printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + "does not match VBIOS boot MCLK level"); + if (dep_mclk_table->entries[0].vddci != + data->vbios_boot_state.vddci_bootup_value) + printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + "does not match VBIOS boot VDDCI level"); + } + + /* set DC compatible flag if this state supports DC */ + if (!ps->validation.disallowOnDC) + tonga_ps->dc_compatible = true; + + if (ps->classification.flags & PP_StateClassificationFlag_ACPI) + data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen; + else if (ps->classification.flags & PP_StateClassificationFlag_Boot) { + if (data->bacos.best_match == 0xffff) { + /* For V.I. use boot state as base BACO state */ + data->bacos.best_match = PP_StateClassificationFlag_Boot; + data->bacos.performance_level = tonga_ps->performance_levels[0]; + } + } + + tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK; + tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK; + + if (!result) { + uint32_t i; + + switch (ps->classification.ui_label) { + case PP_StateUILabel_Performance: + data->use_pcie_performance_levels = true; + + for (i = 0; i < tonga_ps->performance_level_count; i++) { + if (data->pcie_gen_performance.max < + tonga_ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.max = + tonga_ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_performance.min > + tonga_ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.min = + tonga_ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_performance.max < + tonga_ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.max = + tonga_ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_performance.min > + tonga_ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.min = + tonga_ps->performance_levels[i].pcie_lane; + } + break; + case PP_StateUILabel_Battery: + data->use_pcie_power_saving_levels = true; + + for (i = 0; i < tonga_ps->performance_level_count; i++) { + if (data->pcie_gen_power_saving.max < + tonga_ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.max = + tonga_ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_power_saving.min > + tonga_ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.min = + tonga_ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_power_saving.max < + tonga_ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.max = + tonga_ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_power_saving.min > + tonga_ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.min = + tonga_ps->performance_levels[i].pcie_lane; + } + break; + default: + break; + } + } + return 0; +} + +static void +tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) +{ + uint32_t sclk, mclk, activity_percent; + uint32_t offset; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency)); + + sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency)); + + mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100); + + + offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity); + activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); + activity_percent += 0x80; + activity_percent >>= 8; + + seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); + +} + +static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table); + uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock; + struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table); + uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct cgs_display_info info = {0}; + + data->need_update_smu7_dpm_table = 0; + + for (i = 0; i < psclk_table->count; i++) { + if (sclk == psclk_table->dpm_levels[i].value) + break; + } + + if (i >= psclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + else { + /* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/ + if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i=0; i < pmclk_table->count; i++) { + if (mclk == pmclk_table->dpm_levels[i].value) + break; + } + + if (i >= pmclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; + + return 0; +} + +static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps) +{ + uint32_t i; + uint32_t sclk, max_sclk = 0; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_dpm_table *pdpm_table = &data->dpm_table; + + for (i = 0; i < hw_ps->performance_level_count; i++) { + sclk = hw_ps->performance_levels[i].engine_clock; + if (max_sclk < sclk) + max_sclk = sclk; + } + + for (i = 0; i < pdpm_table->sclk_table.count; i++) { + if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk) + return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ? + pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value : + pdpm_table->pcie_speed_table.dpm_levels[i].value); + } + + return 0; +} + +static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state); + const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state); + + uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps); + uint16_t current_link_speed; + + if (data->force_pcie_gen == PP_PCIEGenInvalid) + current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps); + else + current_link_speed = data->force_pcie_gen; + + data->force_pcie_gen = PP_PCIEGenInvalid; + data->pspp_notify_required = false; + if (target_link_speed > current_link_speed) { + switch(target_link_speed) { + case PP_PCIEGen3: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) + break; + data->force_pcie_gen = PP_PCIEGen2; + if (current_link_speed == PP_PCIEGen2) + break; + case PP_PCIEGen2: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) + break; + default: + data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr); + break; + } + } else { + if (target_link_speed < current_link_speed) + data->pspp_notify_required = true; + } + + return 0; +} + +static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + PP_ASSERT_WITH_CODE( + true == tonga_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_FreezeLevel), + "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + DPMTABLE_OD_UPDATE_MCLK)) { + PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_FreezeLevel), + "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + return 0; +} + +static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input) +{ + int result = 0; + + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock; + uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock; + struct tonga_dpm_table *pdpm_table = &data->dpm_table; + + struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table; + uint32_t dpm_count, clock_percent; + uint32_t i; + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + /* Need to do calculation based on the golden DPM table + * as the Heatmap GPU Clock axis is also based on the default values + */ + PP_ASSERT_WITH_CODE( + (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0), + "Divide by 0!", + return -1); + dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2; + for (i = dpm_count; i > 1; i--) { + if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) { + clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) / + pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; + + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value + + (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; + + } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) { + clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) / + pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; + + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value - + (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; + } else + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { + pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + + PP_ASSERT_WITH_CODE( + (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0), + "Divide by 0!", + return -1); + dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2; + for (i = dpm_count; i > 1; i--) { + if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) { + clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) / + pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; + + pdpm_table->mclk_table.dpm_levels[i].value = + pgolden_dpm_table->mclk_table.dpm_levels[i].value + + (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; + + } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) { + clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) / + pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; + + pdpm_table->mclk_table.dpm_levels[i].value = + pgolden_dpm_table->mclk_table.dpm_levels[i].value - + (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; + } else + pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { + result = tonga_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + /*populate MCLK dpm table to SMU7 */ + result = tonga_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + return result; +} + +static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr, + struct tonga_single_dpm_table * pdpm_table, + uint32_t low_limit, uint32_t high_limit) +{ + uint32_t i; + + for (i = 0; i < pdpm_table->count; i++) { + if ((pdpm_table->dpm_levels[i].value < low_limit) || + (pdpm_table->dpm_levels[i].value > high_limit)) + pdpm_table->dpm_levels[i].enabled = false; + else + pdpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state) +{ + int result = 0; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + uint32_t high_limit_count; + + PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1), + "power state did not have any performance level", + return -1); + + high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1; + + tonga_trim_single_dpm_states(hwmgr, + &(data->dpm_table.sclk_table), + hw_state->performance_levels[0].engine_clock, + hw_state->performance_levels[high_limit_count].engine_clock); + + tonga_trim_single_dpm_states(hwmgr, + &(data->dpm_table.mclk_table), + hw_state->performance_levels[0].memory_clock, + hw_state->performance_levels[high_limit_count].memory_clock); + + return result; +} + +static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) +{ + int result; + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); + + result = tonga_trim_dpm_states(hwmgr, tonga_ps); + if (0 != result) + return result; + + data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); + data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); + data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + if (data->uvd_enabled) + data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; + + data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); + + return 0; +} + +int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable : + (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable); +} + +int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable : + (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable); +} + +int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (!bgate) { + data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1); + mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << data->smc_state_table.UvdBootLevel)); + } + + return tonga_enable_disable_uvd_dpm(hwmgr, !bgate); +} + +int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state); + const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state); + + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) { + data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1); + + mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)(1 << data->smc_state_table.VceBootLevel)); + + tonga_enable_disable_vce_dpm(hwmgr, true); + } else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0) + tonga_enable_disable_vce_dpm(hwmgr, false); + + return 0; +} + +static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters)); + + result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table)); + + if(result != 0) + return result; + + + address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]); + + return tonga_copy_bytes_to_smc(hwmgr->smumgr, address, + (uint8_t *)&data->mc_reg_table.data[0], + sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, + data->sram_end); +} + +static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return tonga_program_memory_timing_parameters(hwmgr); + + return 0; +} + +static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + + PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + + PP_ASSERT_WITH_CODE( + true == tonga_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + data->need_update_smu7_dpm_table = 0; + + return 0; +} + +static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); + uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps); + uint8_t request; + + if (data->pspp_notify_required || + data->pcie_performance_request) { + if (target_link_speed == PP_PCIEGen3) + request = PCIE_PERF_REQ_GEN3; + else if (target_link_speed == PP_PCIEGen2) + request = PCIE_PERF_REQ_GEN2; + else + request = PCIE_PERF_REQ_GEN1; + + if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) { + data->pcie_performance_request = false; + return 0; + } + + if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) { + if (PP_PCIEGen2 == target_link_speed) + printk("PSPP request to switch to Gen2 from Gen3 Failed!"); + else + printk("PSPP request to switch to Gen1 from Gen2 Failed!"); + } + } + + data->pcie_performance_request = false; + return 0; +} + +static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + int tmp_result, result = 0; + + tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result); + } + + tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + + tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result); + + tmp_result = tonga_update_vce_dpm(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result); + + tmp_result = tonga_update_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result); + + tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result); + + tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result); + + tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result); + } + + return result; +} + +/** +* Set maximum target operating fan output PWM +* +* @param pHwMgr: the address of the powerplay hardware manager. +* @param usMaxFanPwm: max operating fan PWM in percents +* @return The response that came from the SMC. +*/ +static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1); +} + +int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) +{ + uint32_t num_active_displays = 0; + struct cgs_display_info info = {0}; + info.mode_info = NULL; + + cgs_get_active_displays_info(hwmgr->device, &info); + + num_active_displays = info.display_count; + + if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ + tonga_notify_smc_display_change(hwmgr, false); + else + tonga_notify_smc_display_change(hwmgr, true); + + return 0; +} + +/** +* Programs the display gap +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always OK +*/ +int tonga_program_display_gap(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + uint32_t num_active_displays = 0; + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + uint32_t display_gap2; + uint32_t pre_vbi_time_in_us; + uint32_t frame_time_in_us; + uint32_t ref_clock; + uint32_t refresh_rate = 0; + struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info; + + info.mode_info = &mode_info; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_active_displays = info.display_count; + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); + + ref_clock = mode_info.ref_clock; + refresh_rate = mode_info.refresh_rate; + + if(0 == refresh_rate) + refresh_rate = 60; + + frame_time_in_us = 1000000 / refresh_rate; + + pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); + + if (num_active_displays == 1) + tonga_notify_smc_display_change(hwmgr, true); + + return 0; +} + +int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + + tonga_program_display_gap(hwmgr); + + /* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */ + return 0; +} + +/** +* Set maximum target operating fan output RPM +* +* @param pHwMgr: the address of the powerplay hardware manager. +* @param usMaxFanRpm: max operating fan RPM value. +* @return The response that came from the SMC. +*/ +static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1); +} + +uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr) +{ + uint32_t reference_clock; + uint32_t tc; + uint32_t divide; + + ATOM_FIRMWARE_INFO *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); + + if (tc) + return TCLK; + + fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index, + &size, &frev, &crev); + + if (!fw_info) + return 0; + + reference_clock = le16_to_cpu(fw_info->usMinPixelClockPLL_Output); + + divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); + + if (0 != divide) + return reference_clock / 4; + + return reference_clock; +} + +int tonga_dpm_set_interrupt_state(void *private_data, + unsigned src_id, unsigned type, + int enabled) +{ + uint32_t cg_thermal_int; + struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; + + if (hwmgr == NULL) + return -EINVAL; + + switch (type) { + case AMD_THERMAL_IRQ_LOW_TO_HIGH: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + + case AMD_THERMAL_IRQ_HIGH_TO_LOW: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + default: + break; + } + return 0; +} + +int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, + const void *thermal_interrupt_info) +{ + int result; + const struct pp_interrupt_registration_info *info = + (const struct pp_interrupt_registration_info *)thermal_interrupt_info; + + if (info == NULL) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, + tonga_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, + tonga_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + return 0; +} + +bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + bool is_update_required = false; + struct cgs_display_info info = {0,0,NULL}; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + is_update_required = true; +/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL + if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + cgs_get_min_clock_settings(hwmgr->device, &min_clocks); + if(min_clocks.engineClockInSR != data->display_timing.minClockInSR) + is_update_required = true; +*/ + return is_update_required; +} + +static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1, + const struct tonga_performance_level *pl2) +{ + return ((pl1->memory_clock == pl2->memory_clock) && + (pl1->engine_clock == pl2->engine_clock) && + (pl1->pcie_gen == pl2->pcie_gen) && + (pl1->pcie_lane == pl2->pcie_lane)); +} + +int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) +{ + const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1); + const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2); + int i; + + if (equal == NULL || psa == NULL || psb == NULL) + return -EINVAL; + + /* If the two states don't even have the same number of performance levels they cannot be the same state. */ + if (psa->performance_level_count != psb->performance_level_count) { + *equal = false; + return 0; + } + + for (i = 0; i < psa->performance_level_count; i++) { + if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { + /* If we have found even one performance level pair that is different the states are different. */ + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK)); + *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK)); + *equal &= (psa->sclk_threshold == psb->sclk_threshold); + *equal &= (psa->acp_clk == psb->acp_clk); + + return 0; +} + +static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + if (mode) { + /* stop auto-manage */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + tonga_fan_ctrl_stop_smc_fan_control(hwmgr); + tonga_fan_ctrl_set_static_mode(hwmgr, mode); + } else + /* restart auto-manage */ + tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr); + + return 0; +} + +static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->fan_ctrl_is_in_default_mode) + return hwmgr->fan_ctrl_default_mode; + else + return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE); +} + +static const struct pp_hwmgr_func tonga_hwmgr_funcs = { + .backend_init = &tonga_hwmgr_backend_init, + .backend_fini = &tonga_hwmgr_backend_fini, + .asic_setup = &tonga_setup_asic_task, + .dynamic_state_management_enable = &tonga_enable_dpm_tasks, + .apply_state_adjust_rules = tonga_apply_state_adjust_rules, + .force_dpm_level = &tonga_force_dpm_level, + .power_state_set = tonga_set_power_state_tasks, + .get_power_state_size = tonga_get_power_state_size, + .get_mclk = tonga_dpm_get_mclk, + .get_sclk = tonga_dpm_get_sclk, + .patch_boot_state = tonga_dpm_patch_boot_state, + .get_pp_table_entry = tonga_get_pp_table_entry, + .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries, + .print_current_perforce_level = tonga_print_current_perforce_level, + .powerdown_uvd = tonga_phm_powerdown_uvd, + .powergate_uvd = tonga_phm_powergate_uvd, + .powergate_vce = tonga_phm_powergate_vce, + .disable_clock_power_gating = tonga_phm_disable_clock_power_gating, + .notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment, + .display_config_changed = tonga_display_configuration_changed_task, + .set_max_fan_pwm_output = tonga_set_max_fan_pwm_output, + .set_max_fan_rpm_output = tonga_set_max_fan_rpm_output, + .get_temperature = tonga_thermal_get_temperature, + .stop_thermal_controller = tonga_thermal_stop_thermal_controller, + .get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info, + .get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent, + .reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default, + .get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm, + .uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller, + .register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt, + .check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration, + .check_states_equal = tonga_check_states_equal, + .set_fan_control_mode = tonga_set_fan_control_mode, + .get_fan_control_mode = tonga_get_fan_control_mode, +}; + +int tonga_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + tonga_hwmgr *data; + + data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + memset(data, 0x00, sizeof(tonga_hwmgr)); + + hwmgr->backend = data; + hwmgr->hwmgr_func = &tonga_hwmgr_funcs; + hwmgr->pptable_func = &tonga_pptable_funcs; + pp_tonga_thermal_initialize(hwmgr); + return 0; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h new file mode 100644 index 000000000000..49168d262ccc --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h @@ -0,0 +1,408 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef TONGA_HWMGR_H +#define TONGA_HWMGR_H + +#include "hwmgr.h" +#include "smu72_discrete.h" +#include "ppatomctrl.h" +#include "ppinterrupt.h" +#include "tonga_powertune.h" + +#define TONGA_MAX_HARDWARE_POWERLEVELS 2 +#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 + +struct tonga_performance_level { + uint32_t memory_clock; + uint32_t engine_clock; + uint16_t pcie_gen; + uint16_t pcie_lane; +}; + +struct _phw_tonga_bacos { + uint32_t best_match; + uint32_t baco_flags; + struct tonga_performance_level performance_level; +}; +typedef struct _phw_tonga_bacos phw_tonga_bacos; + +struct _phw_tonga_uvd_clocks { + uint32_t VCLK; + uint32_t DCLK; +}; + +typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks; + +struct _phw_tonga_vce_clocks { + uint32_t EVCLK; + uint32_t ECCLK; +}; + +typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks; + +struct tonga_power_state { + uint32_t magic; + phw_tonga_uvd_clocks uvd_clocks; + phw_tonga_vce_clocks vce_clocks; + uint32_t sam_clk; + uint32_t acp_clk; + uint16_t performance_level_count; + bool dc_compatible; + uint32_t sclk_threshold; + struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS]; +}; + +struct _phw_tonga_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; +typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level; + +#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define TONGA_MINIMUM_ENGINE_CLOCK 2500 + +struct tonga_single_dpm_table { + uint32_t count; + phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct tonga_dpm_table { + struct tonga_single_dpm_table sclk_table; + struct tonga_single_dpm_table mclk_table; + struct tonga_single_dpm_table pcie_speed_table; + struct tonga_single_dpm_table vddc_table; + struct tonga_single_dpm_table vdd_gfx_table; + struct tonga_single_dpm_table vdd_ci_table; + struct tonga_single_dpm_table mvdd_table; +}; +typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table; + + +struct _phw_tonga_clock_regisiters { + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t vDLL_CNTL; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_SS1; + uint32_t vMPLL_SS2; +}; +typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers; + +struct _phw_tonga_voltage_smio_registers { + uint32_t vs0_vid_lower_smio_cntl; +}; +typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers; + + +struct _phw_tonga_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry; + +struct _phw_tonga_mc_reg_table { + uint8_t last; /* number of registers*/ + uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ + uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ + phw_tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table; + +#define DISABLE_MC_LOADMICROCODE 1 +#define DISABLE_MC_CFGPROGRAMMING 2 + +/*Ultra Low Voltage parameter structure */ +struct _phw_tonga_ulv_parm{ + bool ulv_supported; + uint32_t ch_ulv_parameter; + uint32_t ulv_volt_change_delay; + struct tonga_performance_level ulv_power_level; +}; +typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm; + +#define TONGA_MAX_LEAKAGE_COUNT 8 + +struct _phw_tonga_leakage_voltage { + uint16_t count; + uint16_t leakage_id[TONGA_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[TONGA_MAX_LEAKAGE_COUNT]; +}; +typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage; + +struct _phw_tonga_display_timing { + uint32_t min_clock_insr; + uint32_t num_existing_displays; +}; +typedef struct _phw_tonga_display_timing phw_tonga_display_timing; + +struct _phw_tonga_dpmlevel_enable_mask { + uint32_t uvd_dpm_enable_mask; + uint32_t vce_dpm_enable_mask; + uint32_t acp_dpm_enable_mask; + uint32_t samu_dpm_enable_mask; + uint32_t sclk_dpm_enable_mask; + uint32_t mclk_dpm_enable_mask; + uint32_t pcie_dpm_enable_mask; +}; +typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask; + +struct _phw_tonga_pcie_perf_range { + uint16_t max; + uint16_t min; +}; +typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range; + +struct _phw_tonga_vbios_boot_state { + uint16_t mvdd_bootup_value; + uint16_t vddc_bootup_value; + uint16_t vddci_bootup_value; + uint16_t vddgfx_bootup_value; + uint32_t sclk_bootup_value; + uint32_t mclk_bootup_value; + uint16_t pcie_gen_bootup_value; + uint16_t pcie_lane_bootup_value; +}; +typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 + +/* We need to review which fields are needed. */ +/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */ +struct tonga_hwmgr { + struct tonga_dpm_table dpm_table; + struct tonga_dpm_table golden_dpm_table; + + uint32_t voting_rights_clients0; + uint32_t voting_rights_clients1; + uint32_t voting_rights_clients2; + uint32_t voting_rights_clients3; + uint32_t voting_rights_clients4; + uint32_t voting_rights_clients5; + uint32_t voting_rights_clients6; + uint32_t voting_rights_clients7; + uint32_t static_screen_threshold_unit; + uint32_t static_screen_threshold; + uint32_t voltage_control; + uint32_t vdd_gfx_control; + + uint32_t vddc_vddci_delta; + uint32_t vddc_vddgfx_delta; + + struct pp_interrupt_registration_info internal_high_thermal_interrupt_info; + struct pp_interrupt_registration_info internal_low_thermal_interrupt_info; + struct pp_interrupt_registration_info smc_to_host_interrupt_info; + uint32_t active_auto_throttle_sources; + + struct pp_interrupt_registration_info external_throttle_interrupt; + irq_handler_func_t external_throttle_callback; + void *external_throttle_context; + + struct pp_interrupt_registration_info ctf_interrupt_info; + irq_handler_func_t ctf_callback; + void *ctf_context; + + phw_tonga_clock_registers clock_registers; + phw_tonga_voltage_smio_registers voltage_smio_registers; + + bool is_memory_GDDR5; + uint16_t acpi_vddc; + bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */ + uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */ + uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */ + uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */ + uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */ + uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */ + phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/ + phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */ + phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */ + + uint32_t mvdd_control; + uint32_t vddc_mask_low; + uint32_t mvdd_mask_low; + uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/ + uint16_t min_vddc_in_pp_table; + uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */ + uint16_t min_vddci_in_pp_table; + uint32_t mclk_strobe_mode_threshold; + uint32_t mclk_stutter_mode_threshold; + uint32_t mclk_edc_enable_threshold; + uint32_t mclk_edc_wr_enable_threshold; + bool is_uvd_enabled; + bool is_xdma_enabled; + phw_tonga_vbios_boot_state vbios_boot_state; + + bool battery_state; + bool is_tlu_enabled; + bool pcie_performance_request; + + /* -------------- SMC SRAM Address of firmware header tables ----------------*/ + uint32_t sram_end; /* The first address after the SMC SRAM. */ + uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */ + uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */ + uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */ + uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */ + uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */ + SMU72_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */ + SMU72_Discrete_MCRegisters mc_reg_table; + SMU72_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */ + /* -------------- Stuff originally coming from Evergreen --------------------*/ + phw_tonga_mc_reg_table tonga_mc_reg_table; + uint32_t vdd_ci_control; + pp_atomctrl_voltage_table vddc_voltage_table; + pp_atomctrl_voltage_table vddci_voltage_table; + pp_atomctrl_voltage_table vddgfx_voltage_table; + pp_atomctrl_voltage_table mvdd_voltage_table; + + uint32_t mgcg_cgtt_local2; + uint32_t mgcg_cgtt_local3; + uint32_t gpio_debug; + uint32_t mc_micro_code_feature; + uint32_t highest_mclk; + uint16_t acpi_vdd_ci; + uint8_t mvdd_high_index; + uint8_t mvdd_low_index; + bool dll_defaule_on; + bool performance_request_registered; + + /* ----------------- Low Power Features ---------------------*/ + phw_tonga_bacos bacos; + phw_tonga_ulv_parm ulv; + /* ----------------- CAC Stuff ---------------------*/ + uint32_t cac_table_start; + bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */ + bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */ + bool cac_enabled; + /* ----------------- DPM2 Parameters ---------------------*/ + uint32_t power_containment_features; + bool enable_bapm_feature; + bool enable_tdc_limit_feature; + bool enable_pkg_pwr_tracking_feature; + bool disable_uvd_power_tune_feature; + phw_tonga_pt_defaults *power_tune_defaults; + SMU72_Discrete_PmFuses power_tune_table; + uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ + uint32_t fast_watemark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + + /* ----------------- Phase Shedding ---------------------*/ + bool vddc_phase_shed_control; + /* --------------------- DI/DT --------------------------*/ + phw_tonga_display_timing display_timing; + /* --------- ReadRegistry data for memory and engine clock margins ---- */ + uint32_t engine_clock_data; + uint32_t memory_clock_data; + /* -------- Thermal Temperature Setting --------------*/ + phw_tonga_dpmlevel_enable_mask dpm_level_enable_mask; + uint32_t need_update_smu7_dpm_table; + uint32_t sclk_dpm_key_disabled; + uint32_t mclk_dpm_key_disabled; + uint32_t pcie_dpm_key_disabled; + uint32_t min_engine_clocks; /* used to store the previous dal min sclock */ + phw_tonga_pcie_perf_range pcie_gen_performance; + phw_tonga_pcie_perf_range pcie_lane_performance; + phw_tonga_pcie_perf_range pcie_gen_power_saving; + phw_tonga_pcie_perf_range pcie_lane_power_saving; + bool use_pcie_performance_levels; + bool use_pcie_power_saving_levels; + uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */ + uint32_t mclk_activity_target; + uint32_t low_sclk_interrupt_threshold; + uint32_t last_mclk_dpm_enable_mask; + bool uvd_enabled; + uint32_t pcc_monitor_enabled; + + /* --------- Power Gating States ------------*/ + bool uvd_power_gated; /* 1: gated, 0:not gated */ + bool vce_power_gated; /* 1: gated, 0:not gated */ + bool samu_power_gated; /* 1: gated, 0:not gated */ + bool acp_power_gated; /* 1: gated, 0:not gated */ + bool pg_acp_init; + +}; + +typedef struct tonga_hwmgr tonga_hwmgr; + +#define TONGA_DPM2_NEAR_TDP_DEC 10 +#define TONGA_DPM2_ABOVE_SAFE_INC 5 +#define TONGA_DPM2_BELOW_SAFE_INC 20 + +#define TONGA_DPM2_LTA_WINDOW_SIZE 7 /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */ + +#define TONGA_DPM2_LTS_TRUNCATE 0 + +#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT 80 /* Maximum 100 */ + +#define TONGA_DPM2_MAXPS_PERCENT_H 90 /* Maximum 0xFF */ +#define TONGA_DPM2_MAXPS_PERCENT_M 90 /* Maximum 0xFF */ + +#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN 50 + +#define TONGA_DPM2_SQ_RAMP_MAX_POWER 0x3FFF +#define TONGA_DPM2_SQ_RAMP_MIN_POWER 0x12 +#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 +#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E +#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF + +#define TONGA_VOLTAGE_CONTROL_NONE 0x0 +#define TONGA_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define TONGA_VOLTAGE_CONTROL_BY_SVID2 0x2 +#define TONGA_VOLTAGE_CONTROL_MERGED 0x3 + +#define TONGA_Q88_FORMAT_CONVERSION_UNIT 256 /*To convert to Q8.8 format for firmware */ + +#define TONGA_UNUSED_GPIO_PIN 0x7F + +#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X) +#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X) + +#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X) +#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X) + +#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X)) +#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X)) + +#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X)) + +int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); +int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input); +int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable); +int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); +uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h new file mode 100644 index 000000000000..8e6670b3cb67 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h @@ -0,0 +1,66 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef TONGA_POWERTUNE_H +#define TONGA_POWERTUNE_H + +enum _phw_tonga_ptc_config_reg_type { + TONGA_CONFIGREG_MMR = 0, + TONGA_CONFIGREG_SMC_IND, + TONGA_CONFIGREG_DIDT_IND, + TONGA_CONFIGREG_CACHE, + + TONGA_CONFIGREG_MAX +}; +typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type; + +/* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 + +struct _phw_tonga_pt_config_reg { + uint32_t Offset; + uint32_t Mask; + uint32_t Shift; + uint32_t Value; + phw_tonga_ptc_config_reg_type Type; +}; +typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg; + +struct _phw_tonga_pt_defaults { + uint8_t svi_load_line_en; + uint8_t svi_load_line_vddC; + uint8_t tdc_vddc_throttle_release_limit_perc; + uint8_t tdc_mawt; + uint8_t tdc_waterfall_ctl; + uint8_t dte_ambient_temp_base; + uint32_t display_cac; + uint32_t bamp_temp_gradient; + uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; + uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; +}; +typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults; + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h new file mode 100644 index 000000000000..9a4456e6521b --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h @@ -0,0 +1,406 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef TONGA_PPTABLE_H +#define TONGA_PPTABLE_H + +/** \file + * This is a PowerPlay table header file + */ +#pragma pack(push, 1) + +#include "hwmgr.h" + +#define ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f +#define ATOM_TONGA_PP_FANPARAMETERS_NOFAN 0x80 /* No fan is connected to this controller. */ + +#define ATOM_TONGA_PP_THERMALCONTROLLER_NONE 0 +#define ATOM_TONGA_PP_THERMALCONTROLLER_LM96163 17 +#define ATOM_TONGA_PP_THERMALCONTROLLER_TONGA 21 +#define ATOM_TONGA_PP_THERMALCONTROLLER_FIJI 22 + +/* + * Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. + * We probably should reserve the bit 0x80 for this use. + * To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here). + * The driver can pick the correct internal controller based on the ASIC. + */ + +#define ATOM_TONGA_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 /* ADT7473 Fan Control + Internal Thermal Controller */ +#define ATOM_TONGA_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D /* EMC2103 Fan Control + Internal Thermal Controller */ + +/*/* ATOM_TONGA_POWERPLAYTABLE::ulPlatformCaps */ +#define ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL 0x1 /* This cap indicates whether vddgfx will be a separated power rail. */ +#define ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY 0x2 /* This cap indicates whether this is a mobile part and CCC need to show Powerplay page. */ +#define ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x4 /* This cap indicates whether power source notificaiton is done by SBIOS directly. */ +#define ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND 0x8 /* Enable the option to overwrite voltage island feature to be disabled, regardless of VddGfx power rail support. */ +#define ____RETIRE16____ 0x10 +#define ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC 0x20 /* This cap indicates whether power source notificaiton is done by GPIO directly. */ +#define ____RETIRE64____ 0x40 +#define ____RETIRE128____ 0x80 +#define ____RETIRE256____ 0x100 +#define ____RETIRE512____ 0x200 +#define ____RETIRE1024____ 0x400 +#define ____RETIRE2048____ 0x800 +#define ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL 0x1000 /* This cap indicates dynamic MVDD is required. Uncheck to disable it. */ +#define ____RETIRE2000____ 0x2000 +#define ____RETIRE4000____ 0x4000 +#define ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 /* This cap indicates dynamic VDDCI is required. Uncheck to disable it. */ +#define ____RETIRE10000____ 0x10000 +#define ATOM_TONGA_PP_PLATFORM_CAP_BACO 0x20000 /* Enable to indicate the driver supports BACO state. */ + +#define ATOM_TONGA_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x100000 /* Enable to indicate the driver supports thermal2GPIO17. */ +#define ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL 0x1000000 /* Enable to indicate if thermal and PCC are sharing the same GPIO */ +#define ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE 0x2000000 + +/* ATOM_PPLIB_NONCLOCK_INFO::usClassification */ +#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 +#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1 +#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3 +#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5 +/* 2, 4, 6, 7 are reserved */ + +#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008 +#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010 +#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020 +#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040 +#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080 +#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 + +/* ATOM_PPLIB_NONCLOCK_INFO::usClassification2 */ +#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 + +#define ATOM_Tonga_DISALLOW_ON_DC 0x00004000 +#define ATOM_Tonga_ENABLE_VARIBRIGHT 0x00008000 + +#define ATOM_Tonga_TABLE_REVISION_TONGA 7 + +typedef struct _ATOM_Tonga_POWERPLAYTABLE { + ATOM_COMMON_TABLE_HEADER sHeader; + + UCHAR ucTableRevision; + USHORT usTableSize; /*the size of header structure */ + + ULONG ulGoldenPPID; + ULONG ulGoldenRevision; + USHORT usFormatID; + + USHORT usVoltageTime; /*in microseconds */ + ULONG ulPlatformCaps; /*See ATOM_Tonga_CAPS_* */ + + ULONG ulMaxODEngineClock; /*For Overdrive. */ + ULONG ulMaxODMemoryClock; /*For Overdrive. */ + + USHORT usPowerControlLimit; + USHORT usUlvVoltageOffset; /*in mv units */ + + USHORT usStateArrayOffset; /*points to ATOM_Tonga_State_Array */ + USHORT usFanTableOffset; /*points to ATOM_Tonga_Fan_Table */ + USHORT usThermalControllerOffset; /*points to ATOM_Tonga_Thermal_Controller */ + USHORT usReserv; /*CustomThermalPolicy removed for Tonga. Keep this filed as reserved. */ + + USHORT usMclkDependencyTableOffset; /*points to ATOM_Tonga_MCLK_Dependency_Table */ + USHORT usSclkDependencyTableOffset; /*points to ATOM_Tonga_SCLK_Dependency_Table */ + USHORT usVddcLookupTableOffset; /*points to ATOM_Tonga_Voltage_Lookup_Table */ + USHORT usVddgfxLookupTableOffset; /*points to ATOM_Tonga_Voltage_Lookup_Table */ + + USHORT usMMDependencyTableOffset; /*points to ATOM_Tonga_MM_Dependency_Table */ + + USHORT usVCEStateTableOffset; /*points to ATOM_Tonga_VCE_State_Table; */ + + USHORT usPPMTableOffset; /*points to ATOM_Tonga_PPM_Table */ + USHORT usPowerTuneTableOffset; /*points to ATOM_PowerTune_Table */ + + USHORT usHardLimitTableOffset; /*points to ATOM_Tonga_Hard_Limit_Table */ + + USHORT usPCIETableOffset; /*points to ATOM_Tonga_PCIE_Table */ + + USHORT usGPIOTableOffset; /*points to ATOM_Tonga_GPIO_Table */ + + USHORT usReserved[6]; /*TODO: modify reserved size to fit structure aligning */ +} ATOM_Tonga_POWERPLAYTABLE; + +typedef struct _ATOM_Tonga_State { + UCHAR ucEngineClockIndexHigh; + UCHAR ucEngineClockIndexLow; + + UCHAR ucMemoryClockIndexHigh; + UCHAR ucMemoryClockIndexLow; + + UCHAR ucPCIEGenLow; + UCHAR ucPCIEGenHigh; + + UCHAR ucPCIELaneLow; + UCHAR ucPCIELaneHigh; + + USHORT usClassification; + ULONG ulCapsAndSettings; + USHORT usClassification2; + UCHAR ucUnused[4]; +} ATOM_Tonga_State; + +typedef struct _ATOM_Tonga_State_Array { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Tonga_State states[1]; /* Dynamically allocate entries. */ +} ATOM_Tonga_State_Array; + +typedef struct _ATOM_Tonga_MCLK_Dependency_Record { + UCHAR ucVddcInd; /* Vddc voltage */ + USHORT usVddci; + USHORT usVddgfxOffset; /* Offset relative to Vddc voltage */ + USHORT usMvdd; + ULONG ulMclk; + USHORT usReserved; +} ATOM_Tonga_MCLK_Dependency_Record; + +typedef struct _ATOM_Tonga_MCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Tonga_MCLK_Dependency_Table; + +typedef struct _ATOM_Tonga_SCLK_Dependency_Record { + UCHAR ucVddInd; /* Base voltage */ + USHORT usVddcOffset; /* Offset relative to base voltage */ + ULONG ulSclk; + USHORT usEdcCurrent; + UCHAR ucReliabilityTemperature; + UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */ +} ATOM_Tonga_SCLK_Dependency_Record; + +typedef struct _ATOM_Tonga_SCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Tonga_SCLK_Dependency_Table; + +typedef struct _ATOM_Tonga_PCIE_Record { + UCHAR ucPCIEGenSpeed; + UCHAR usPCIELaneWidth; + UCHAR ucReserved[2]; +} ATOM_Tonga_PCIE_Record; + +typedef struct _ATOM_Tonga_PCIE_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Tonga_PCIE_Table; + +typedef struct _ATOM_Tonga_MM_Dependency_Record { + UCHAR ucVddcInd; /* VDDC voltage */ + USHORT usVddgfxOffset; /* Offset relative to VDDC voltage */ + ULONG ulDClk; /* UVD D-clock */ + ULONG ulVClk; /* UVD V-clock */ + ULONG ulEClk; /* VCE clock */ + ULONG ulAClk; /* ACP clock */ + ULONG ulSAMUClk; /* SAMU clock */ +} ATOM_Tonga_MM_Dependency_Record; + +typedef struct _ATOM_Tonga_MM_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Tonga_MM_Dependency_Table; + +typedef struct _ATOM_Tonga_Voltage_Lookup_Record { + USHORT usVdd; /* Base voltage */ + USHORT usCACLow; + USHORT usCACMid; + USHORT usCACHigh; +} ATOM_Tonga_Voltage_Lookup_Record; + +typedef struct _ATOM_Tonga_Voltage_Lookup_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Tonga_Voltage_Lookup_Table; + +typedef struct _ATOM_Tonga_Fan_Table { + UCHAR ucRevId; /* Change this if the table format changes or version changes so that the other fields are not the same. */ + UCHAR ucTHyst; /* Temperature hysteresis. Integer. */ + USHORT usTMin; /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */ + USHORT usTMed; /* The middle temperature where we change slopes. */ + USHORT usTHigh; /* The high point above TMed for adjusting the second slope. */ + USHORT usPWMMin; /* The minimum PWM value in percent (0.01% increments). */ + USHORT usPWMMed; /* The PWM value (in percent) at TMed. */ + USHORT usPWMHigh; /* The PWM value at THigh. */ + USHORT usTMax; /* The max temperature */ + UCHAR ucFanControlMode; /* Legacy or Fuzzy Fan mode */ + USHORT usFanPWMMax; /* Maximum allowed fan power in percent */ + USHORT usFanOutputSensitivity; /* Sensitivity of fan reaction to temepature changes */ + USHORT usFanRPMMax; /* The default value in RPM */ + ULONG ulMinFanSCLKAcousticLimit; /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */ + UCHAR ucTargetTemperature; /* Advanced fan controller target temperature. */ + UCHAR ucMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. This should be set to the highest PWM that will run the fan at its lowest RPM. */ + USHORT usReserved; +} ATOM_Tonga_Fan_Table; + +typedef struct _ATOM_Fiji_Fan_Table { + UCHAR ucRevId; /* Change this if the table format changes or version changes so that the other fields are not the same. */ + UCHAR ucTHyst; /* Temperature hysteresis. Integer. */ + USHORT usTMin; /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */ + USHORT usTMed; /* The middle temperature where we change slopes. */ + USHORT usTHigh; /* The high point above TMed for adjusting the second slope. */ + USHORT usPWMMin; /* The minimum PWM value in percent (0.01% increments). */ + USHORT usPWMMed; /* The PWM value (in percent) at TMed. */ + USHORT usPWMHigh; /* The PWM value at THigh. */ + USHORT usTMax; /* The max temperature */ + UCHAR ucFanControlMode; /* Legacy or Fuzzy Fan mode */ + USHORT usFanPWMMax; /* Maximum allowed fan power in percent */ + USHORT usFanOutputSensitivity; /* Sensitivity of fan reaction to temepature changes */ + USHORT usFanRPMMax; /* The default value in RPM */ + ULONG ulMinFanSCLKAcousticLimit; /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */ + UCHAR ucTargetTemperature; /* Advanced fan controller target temperature. */ + UCHAR ucMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. This should be set to the highest PWM that will run the fan at its lowest RPM. */ + USHORT usFanGainEdge; + USHORT usFanGainHotspot; + USHORT usFanGainLiquid; + USHORT usFanGainVrVddc; + USHORT usFanGainVrMvdd; + USHORT usFanGainPlx; + USHORT usFanGainHbm; + USHORT usReserved; +} ATOM_Fiji_Fan_Table; + +typedef struct _ATOM_Tonga_Thermal_Controller { + UCHAR ucRevId; + UCHAR ucType; /* one of ATOM_TONGA_PP_THERMALCONTROLLER_* */ + UCHAR ucI2cLine; /* as interpreted by DAL I2C */ + UCHAR ucI2cAddress; + UCHAR ucFanParameters; /* Fan Control Parameters. */ + UCHAR ucFanMinRPM; /* Fan Minimum RPM (hundreds) -- for display purposes only. */ + UCHAR ucFanMaxRPM; /* Fan Maximum RPM (hundreds) -- for display purposes only. */ + UCHAR ucReserved; + UCHAR ucFlags; /* to be defined */ +} ATOM_Tonga_Thermal_Controller; + +typedef struct _ATOM_Tonga_VCE_State_Record { + UCHAR ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Tonga_MM_Dependency_Table' type */ + UCHAR ucFlag; /* 2 bits indicates memory p-states */ + UCHAR ucSCLKIndex; /*index into ATOM_Tonga_SCLK_Dependency_Table */ + UCHAR ucMCLKIndex; /*index into ATOM_Tonga_MCLK_Dependency_Table */ +} ATOM_Tonga_VCE_State_Record; + +typedef struct _ATOM_Tonga_VCE_State_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; + ATOM_Tonga_VCE_State_Record entries[1]; +} ATOM_Tonga_VCE_State_Table; + +typedef struct _ATOM_Tonga_PowerTune_Table { + UCHAR ucRevId; + USHORT usTDP; + USHORT usConfigurableTDP; + USHORT usTDC; + USHORT usBatteryPowerLimit; + USHORT usSmallPowerLimit; + USHORT usLowCACLeakage; + USHORT usHighCACLeakage; + USHORT usMaximumPowerDeliveryLimit; + USHORT usTjMax; + USHORT usPowerTuneDataSetID; + USHORT usEDCLimit; + USHORT usSoftwareShutdownTemp; + USHORT usClockStretchAmount; + USHORT usReserve[2]; +} ATOM_Tonga_PowerTune_Table; + +typedef struct _ATOM_Fiji_PowerTune_Table { + UCHAR ucRevId; + USHORT usTDP; + USHORT usConfigurableTDP; + USHORT usTDC; + USHORT usBatteryPowerLimit; + USHORT usSmallPowerLimit; + USHORT usLowCACLeakage; + USHORT usHighCACLeakage; + USHORT usMaximumPowerDeliveryLimit; + USHORT usTjMax; /* For Fiji, this is also usTemperatureLimitEdge; */ + USHORT usPowerTuneDataSetID; + USHORT usEDCLimit; + USHORT usSoftwareShutdownTemp; + USHORT usClockStretchAmount; + USHORT usTemperatureLimitHotspot; /*The following are added for Fiji */ + USHORT usTemperatureLimitLiquid1; + USHORT usTemperatureLimitLiquid2; + USHORT usTemperatureLimitVrVddc; + USHORT usTemperatureLimitVrMvdd; + USHORT usTemperatureLimitPlx; + UCHAR ucLiquid1_I2C_address; /*Liquid */ + UCHAR ucLiquid2_I2C_address; + UCHAR ucLiquid_I2C_Line; + UCHAR ucVr_I2C_address; /*VR */ + UCHAR ucVr_I2C_Line; + UCHAR ucPlx_I2C_address; /*PLX */ + UCHAR ucPlx_I2C_Line; + USHORT usReserved; +} ATOM_Fiji_PowerTune_Table; + +#define ATOM_PPM_A_A 1 +#define ATOM_PPM_A_I 2 +typedef struct _ATOM_Tonga_PPM_Table { + UCHAR ucRevId; + UCHAR ucPpmDesign; /*A+I or A+A */ + USHORT usCpuCoreNumber; + ULONG ulPlatformTDP; + ULONG ulSmallACPlatformTDP; + ULONG ulPlatformTDC; + ULONG ulSmallACPlatformTDC; + ULONG ulApuTDP; + ULONG ulDGpuTDP; + ULONG ulDGpuUlvPower; + ULONG ulTjmax; +} ATOM_Tonga_PPM_Table; + +typedef struct _ATOM_Tonga_Hard_Limit_Record { + ULONG ulSCLKLimit; + ULONG ulMCLKLimit; + USHORT usVddcLimit; + USHORT usVddciLimit; + USHORT usVddgfxLimit; +} ATOM_Tonga_Hard_Limit_Record; + +typedef struct _ATOM_Tonga_Hard_Limit_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; + ATOM_Tonga_Hard_Limit_Record entries[1]; +} ATOM_Tonga_Hard_Limit_Table; + +typedef struct _ATOM_Tonga_GPIO_Table { + UCHAR ucRevId; + UCHAR ucVRHotTriggeredSclkDpmIndex; /* If VRHot signal is triggered SCLK will be limited to this DPM level */ + UCHAR ucReserve[5]; +} ATOM_Tonga_GPIO_Table; + +typedef struct _PPTable_Generic_SubTable_Header { + UCHAR ucRevId; +} PPTable_Generic_SubTable_Header; + + +#pragma pack(pop) + + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c new file mode 100644 index 000000000000..34f4bef3691f --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c @@ -0,0 +1,1142 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include + +#include "tonga_processpptables.h" +#include "ppatomctrl.h" +#include "atombios.h" +#include "pp_debug.h" +#include "hwmgr.h" +#include "cgs_common.h" +#include "tonga_pptable.h" + +/** + * Private Function used during initialization. + * @param hwmgr Pointer to the hardware manager. + * @param setIt A flag indication if the capability should be set (TRUE) or reset (FALSE). + * @param cap Which capability to set/reset. + */ +static void set_hw_cap(struct pp_hwmgr *hwmgr, bool setIt, enum phm_platform_caps cap) +{ + if (setIt) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap); +} + + +/** + * Private Function used during initialization. + * @param hwmgr Pointer to the hardware manager. + * @param powerplay_caps the bit array (from BIOS) of capability bits. + * @exception the current implementation always returns 1. + */ +static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps) +{ + PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE16____), + "ATOM_PP_PLATFORM_CAP_ASPM_L1 is not supported!", continue); + PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE64____), + "ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY is not supported!", continue); + PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE512____), + "ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL is not supported!", continue); + PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE1024____), + "ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 is not supported!", continue); + PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE2048____), + "ATOM_PP_PLATFORM_CAP_HTLINKCONTROL is not supported!", continue); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY), + PHM_PlatformCaps_PowerPlaySupport + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE), + PHM_PlatformCaps_BiosPowerSourceControl + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC), + PHM_PlatformCaps_AutomaticDCTransition + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL), + PHM_PlatformCaps_EnableMVDDControl + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL), + PHM_PlatformCaps_ControlVDDCI + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL), + PHM_PlatformCaps_ControlVDDGFX + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_BACO), + PHM_PlatformCaps_BACO + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND), + PHM_PlatformCaps_DisableVoltageIsland + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL), + PHM_PlatformCaps_CombinePCCWithThermalSignal + ); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE), + PHM_PlatformCaps_LoadPostProductionFirmware + ); + + return 0; +} + +/** + * Private Function to get the PowerPlay Table Address. + */ +const void *get_powerplay_table(struct pp_hwmgr *hwmgr) +{ + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + + u16 size; + u8 frev, crev; + void *table_address; + + table_address = (ATOM_Tonga_POWERPLAYTABLE *) + cgs_atom_get_data_table(hwmgr->device, index, &size, &frev, &crev); + + hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ + + return table_address; +} + +static int get_vddc_lookup_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table **lookup_table, + const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables, + uint32_t max_levels + ) +{ + uint32_t table_size, i; + phm_ppt_v1_voltage_lookup_table *table; + + PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries), + "Invalid CAC Leakage PowerPlay Table!", return 1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels; + + table = (phm_ppt_v1_voltage_lookup_table *) + kzalloc(table_size, GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + memset(table, 0x00, table_size); + + table->count = vddc_lookup_pp_tables->ucNumEntries; + + for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) { + table->entries[i].us_calculated = 0; + table->entries[i].us_vdd = + vddc_lookup_pp_tables->entries[i].usVdd; + table->entries[i].us_cac_low = + vddc_lookup_pp_tables->entries[i].usCACLow; + table->entries[i].us_cac_mid = + vddc_lookup_pp_tables->entries[i].usCACMid; + table->entries[i].us_cac_high = + vddc_lookup_pp_tables->entries[i].usCACHigh; + } + + *lookup_table = table; + + return 0; +} + +/** + * Private Function used during initialization. + * Initialize Platform Power Management Parameter table + * @param hwmgr Pointer to the hardware manager. + * @param atom_ppm_table Pointer to PPM table in VBIOS + */ +static int get_platform_power_management_table( + struct pp_hwmgr *hwmgr, + ATOM_Tonga_PPM_Table *atom_ppm_table) +{ + struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL); + struct phm_ppt_v1_information *pp_table_information = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (NULL == ptr) + return -ENOMEM; + + ptr->ppm_design + = atom_ppm_table->ucPpmDesign; + ptr->cpu_core_number + = atom_ppm_table->usCpuCoreNumber; + ptr->platform_tdp + = atom_ppm_table->ulPlatformTDP; + ptr->small_ac_platform_tdp + = atom_ppm_table->ulSmallACPlatformTDP; + ptr->platform_tdc + = atom_ppm_table->ulPlatformTDC; + ptr->small_ac_platform_tdc + = atom_ppm_table->ulSmallACPlatformTDC; + ptr->apu_tdp + = atom_ppm_table->ulApuTDP; + ptr->dgpu_tdp + = atom_ppm_table->ulDGpuTDP; + ptr->dgpu_ulv_power + = atom_ppm_table->ulDGpuUlvPower; + ptr->tj_max + = atom_ppm_table->ulTjmax; + + pp_table_information->ppm_parameter_table = ptr; + + return 0; +} + +/** + * Private Function used during initialization. + * Initialize TDP limits for DPM2 + * @param hwmgr Pointer to the hardware manager. + * @param powerplay_table Pointer to the PowerPlay Table. + */ +static int init_dpm_2_parameters( + struct pp_hwmgr *hwmgr, + const ATOM_Tonga_POWERPLAYTABLE *powerplay_table + ) +{ + int result = 0; + struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); + ATOM_Tonga_PPM_Table *atom_ppm_table; + uint32_t disable_ppm = 0; + uint32_t disable_power_control = 0; + + pp_table_information->us_ulv_voltage_offset = + le16_to_cpu(powerplay_table->usUlvVoltageOffset); + + pp_table_information->ppm_parameter_table = NULL; + pp_table_information->vddc_lookup_table = NULL; + pp_table_information->vddgfx_lookup_table = NULL; + /* TDP limits */ + hwmgr->platform_descriptor.TDPODLimit = + le16_to_cpu(powerplay_table->usPowerControlLimit); + hwmgr->platform_descriptor.TDPAdjustment = 0; + hwmgr->platform_descriptor.VidAdjustment = 0; + hwmgr->platform_descriptor.VidAdjustmentPolarity = 0; + hwmgr->platform_descriptor.VidMinLimit = 0; + hwmgr->platform_descriptor.VidMaxLimit = 1500000; + hwmgr->platform_descriptor.VidStep = 6250; + + disable_power_control = 0; + if (0 == disable_power_control) { + /* enable TDP overdrive (PowerControl) feature as well if supported */ + if (hwmgr->platform_descriptor.TDPODLimit != 0) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerControl); + } + + if (0 != powerplay_table->usVddcLookupTableOffset) { + const ATOM_Tonga_Voltage_Lookup_Table *pVddcCACTable = + (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usVddcLookupTableOffset)); + + result = get_vddc_lookup_table(hwmgr, + &pp_table_information->vddc_lookup_table, pVddcCACTable, 16); + } + + if (0 != powerplay_table->usVddgfxLookupTableOffset) { + const ATOM_Tonga_Voltage_Lookup_Table *pVddgfxCACTable = + (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usVddgfxLookupTableOffset)); + + result = get_vddc_lookup_table(hwmgr, + &pp_table_information->vddgfx_lookup_table, pVddgfxCACTable, 16); + } + + disable_ppm = 0; + if (0 == disable_ppm) { + atom_ppm_table = (ATOM_Tonga_PPM_Table *) + (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset)); + + if (0 != powerplay_table->usPPMTableOffset) { + if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnablePlatformPowerManagement); + } + } + } + + return result; +} + +static int get_valid_clk( + struct pp_hwmgr *hwmgr, + struct phm_clock_array **clk_table, + const phm_ppt_v1_clock_voltage_dependency_table * clk_volt_pp_table + ) +{ + uint32_t table_size, i; + struct phm_clock_array *table; + + PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(uint32_t) * clk_volt_pp_table->count; + + table = (struct phm_clock_array *)kzalloc(table_size, GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + memset(table, 0x00, table_size); + + table->count = (uint32_t)clk_volt_pp_table->count; + + for (i = 0; i < table->count; i++) + table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk; + + *clk_table = table; + + return 0; +} + +static int get_hard_limits( + struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *limits, + const ATOM_Tonga_Hard_Limit_Table * limitable + ) +{ + PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1); + + /* currently we always take entries[0] parameters */ + limits->sclk = (uint32_t)limitable->entries[0].ulSCLKLimit; + limits->mclk = (uint32_t)limitable->entries[0].ulMCLKLimit; + limits->vddc = (uint16_t)limitable->entries[0].usVddcLimit; + limits->vddci = (uint16_t)limitable->entries[0].usVddciLimit; + limits->vddgfx = (uint16_t)limitable->entries[0].usVddgfxLimit; + + return 0; +} + +static int get_mclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table, + const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table + ) +{ + uint32_t table_size, i; + phm_ppt_v1_clock_voltage_dependency_table *mclk_table; + + PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) + * mclk_dep_table->ucNumEntries; + + mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (NULL == mclk_table) + return -ENOMEM; + + memset(mclk_table, 0x00, table_size); + + mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries; + + for (i = 0; i < mclk_dep_table->ucNumEntries; i++) { + mclk_table->entries[i].vddInd = + mclk_dep_table->entries[i].ucVddcInd; + mclk_table->entries[i].vdd_offset = + mclk_dep_table->entries[i].usVddgfxOffset; + mclk_table->entries[i].vddci = + mclk_dep_table->entries[i].usVddci; + mclk_table->entries[i].mvdd = + mclk_dep_table->entries[i].usMvdd; + mclk_table->entries[i].clk = + mclk_dep_table->entries[i].ulMclk; + } + + *pp_tonga_mclk_dep_table = mclk_table; + + return 0; +} + +static int get_sclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, + const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table + ) +{ + uint32_t table_size, i; + phm_ppt_v1_clock_voltage_dependency_table *sclk_table; + + PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) + * sclk_dep_table->ucNumEntries; + + sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (NULL == sclk_table) + return -ENOMEM; + + memset(sclk_table, 0x00, table_size); + + sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries; + + for (i = 0; i < sclk_dep_table->ucNumEntries; i++) { + sclk_table->entries[i].vddInd = + sclk_dep_table->entries[i].ucVddInd; + sclk_table->entries[i].vdd_offset = + sclk_dep_table->entries[i].usVddcOffset; + sclk_table->entries[i].clk = + sclk_dep_table->entries[i].ulSclk; + sclk_table->entries[i].cks_enable = + (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; + sclk_table->entries[i].cks_voffset = + (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F); + } + + *pp_tonga_sclk_dep_table = sclk_table; + + return 0; +} + +static int get_pcie_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_pcie_table **pp_tonga_pcie_table, + const ATOM_Tonga_PCIE_Table * atom_pcie_table + ) +{ + uint32_t table_size, i, pcie_count; + phm_ppt_v1_pcie_table *pcie_table; + struct phm_ppt_v1_information *pp_table_information = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + PP_ASSERT_WITH_CODE((0 != atom_pcie_table->ucNumEntries), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries; + + pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL); + + if (NULL == pcie_table) + return -ENOMEM; + + memset(pcie_table, 0x00, table_size); + + /* + * Make sure the number of pcie entries are less than or equal to sclk dpm levels. + * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1. + */ + pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1; + if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count) + pcie_count = (uint32_t)atom_pcie_table->ucNumEntries; + else + printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \ + Disregarding the excess entries... \n"); + + pcie_table->count = pcie_count; + + for (i = 0; i < pcie_count; i++) { + pcie_table->entries[i].gen_speed = + atom_pcie_table->entries[i].ucPCIEGenSpeed; + pcie_table->entries[i].lane_width = + atom_pcie_table->entries[i].usPCIELaneWidth; + } + + *pp_tonga_pcie_table = pcie_table; + + return 0; +} + +static int get_cac_tdp_table( + struct pp_hwmgr *hwmgr, + struct phm_cac_tdp_table **cac_tdp_table, + const PPTable_Generic_SubTable_Header * table + ) +{ + uint32_t table_size; + struct phm_cac_tdp_table *tdp_table; + + table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table); + tdp_table = kzalloc(table_size, GFP_KERNEL); + + if (NULL == tdp_table) + return -ENOMEM; + + memset(tdp_table, 0x00, table_size); + + hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL); + + if (NULL == hwmgr->dyn_state.cac_dtp_table) + return -ENOMEM; + + memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size); + + if (table->ucRevId < 3) { + const ATOM_Tonga_PowerTune_Table *tonga_table = + (ATOM_Tonga_PowerTune_Table *)table; + tdp_table->usTDP = tonga_table->usTDP; + tdp_table->usConfigurableTDP = + tonga_table->usConfigurableTDP; + tdp_table->usTDC = tonga_table->usTDC; + tdp_table->usBatteryPowerLimit = + tonga_table->usBatteryPowerLimit; + tdp_table->usSmallPowerLimit = + tonga_table->usSmallPowerLimit; + tdp_table->usLowCACLeakage = + tonga_table->usLowCACLeakage; + tdp_table->usHighCACLeakage = + tonga_table->usHighCACLeakage; + tdp_table->usMaximumPowerDeliveryLimit = + tonga_table->usMaximumPowerDeliveryLimit; + tdp_table->usDefaultTargetOperatingTemp = + tonga_table->usTjMax; + tdp_table->usTargetOperatingTemp = + tonga_table->usTjMax; /*Set the initial temp to the same as default */ + tdp_table->usPowerTuneDataSetID = + tonga_table->usPowerTuneDataSetID; + tdp_table->usSoftwareShutdownTemp = + tonga_table->usSoftwareShutdownTemp; + tdp_table->usClockStretchAmount = + tonga_table->usClockStretchAmount; + } else { /* Fiji and newer */ + const ATOM_Fiji_PowerTune_Table *fijitable = + (ATOM_Fiji_PowerTune_Table *)table; + tdp_table->usTDP = fijitable->usTDP; + tdp_table->usConfigurableTDP = fijitable->usConfigurableTDP; + tdp_table->usTDC = fijitable->usTDC; + tdp_table->usBatteryPowerLimit = fijitable->usBatteryPowerLimit; + tdp_table->usSmallPowerLimit = fijitable->usSmallPowerLimit; + tdp_table->usLowCACLeakage = fijitable->usLowCACLeakage; + tdp_table->usHighCACLeakage = fijitable->usHighCACLeakage; + tdp_table->usMaximumPowerDeliveryLimit = + fijitable->usMaximumPowerDeliveryLimit; + tdp_table->usDefaultTargetOperatingTemp = + fijitable->usTjMax; + tdp_table->usTargetOperatingTemp = + fijitable->usTjMax; /*Set the initial temp to the same as default */ + tdp_table->usPowerTuneDataSetID = + fijitable->usPowerTuneDataSetID; + tdp_table->usSoftwareShutdownTemp = + fijitable->usSoftwareShutdownTemp; + tdp_table->usClockStretchAmount = + fijitable->usClockStretchAmount; + tdp_table->usTemperatureLimitHotspot = + fijitable->usTemperatureLimitHotspot; + tdp_table->usTemperatureLimitLiquid1 = + fijitable->usTemperatureLimitLiquid1; + tdp_table->usTemperatureLimitLiquid2 = + fijitable->usTemperatureLimitLiquid2; + tdp_table->usTemperatureLimitVrVddc = + fijitable->usTemperatureLimitVrVddc; + tdp_table->usTemperatureLimitVrMvdd = + fijitable->usTemperatureLimitVrMvdd; + tdp_table->usTemperatureLimitPlx = + fijitable->usTemperatureLimitPlx; + tdp_table->ucLiquid1_I2C_address = + fijitable->ucLiquid1_I2C_address; + tdp_table->ucLiquid2_I2C_address = + fijitable->ucLiquid2_I2C_address; + tdp_table->ucLiquid_I2C_Line = + fijitable->ucLiquid_I2C_Line; + tdp_table->ucVr_I2C_address = fijitable->ucVr_I2C_address; + tdp_table->ucVr_I2C_Line = fijitable->ucVr_I2C_Line; + tdp_table->ucPlx_I2C_address = fijitable->ucPlx_I2C_address; + tdp_table->ucPlx_I2C_Line = fijitable->ucPlx_I2C_Line; + } + + *cac_tdp_table = tdp_table; + + return 0; +} + +static int get_mm_clock_voltage_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_mm_clock_voltage_dependency_table **tonga_mm_table, + const ATOM_Tonga_MM_Dependency_Table * mm_dependency_table + ) +{ + uint32_t table_size, i; + const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record; + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table; + + PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries), + "Invalid PowerPlay Table!", return -1); + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) + * mm_dependency_table->ucNumEntries; + mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (NULL == mm_table) + return -ENOMEM; + + memset(mm_table, 0x00, table_size); + + mm_table->count = mm_dependency_table->ucNumEntries; + + for (i = 0; i < mm_dependency_table->ucNumEntries; i++) { + mm_dependency_record = &mm_dependency_table->entries[i]; + mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd; + mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset; + mm_table->entries[i].aclk = mm_dependency_record->ulAClk; + mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk; + mm_table->entries[i].eclk = mm_dependency_record->ulEClk; + mm_table->entries[i].vclk = mm_dependency_record->ulVClk; + mm_table->entries[i].dclk = mm_dependency_record->ulDClk; + } + + *tonga_mm_table = mm_table; + + return 0; +} + +/** + * Private Function used during initialization. + * Initialize clock voltage dependency + * @param hwmgr Pointer to the hardware manager. + * @param powerplay_table Pointer to the PowerPlay Table. + */ +static int init_clock_voltage_dependency( + struct pp_hwmgr *hwmgr, + const ATOM_Tonga_POWERPLAYTABLE *powerplay_table + ) +{ + int result = 0; + struct phm_ppt_v1_information *pp_table_information = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + const ATOM_Tonga_MM_Dependency_Table *mm_dependency_table = + (const ATOM_Tonga_MM_Dependency_Table *)(((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usMMDependencyTableOffset)); + const PPTable_Generic_SubTable_Header *pPowerTuneTable = + (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPowerTuneTableOffset)); + const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = + (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); + const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = + (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); + const ATOM_Tonga_Hard_Limit_Table *pHardLimits = + (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usHardLimitTableOffset)); + const ATOM_Tonga_PCIE_Table *pcie_table = + (const ATOM_Tonga_PCIE_Table *)(((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPCIETableOffset)); + + pp_table_information->vdd_dep_on_sclk = NULL; + pp_table_information->vdd_dep_on_mclk = NULL; + pp_table_information->mm_dep_table = NULL; + pp_table_information->pcie_table = NULL; + + if (powerplay_table->usMMDependencyTableOffset != 0) + result = get_mm_clock_voltage_table(hwmgr, + &pp_table_information->mm_dep_table, mm_dependency_table); + + if (result == 0 && powerplay_table->usPowerTuneTableOffset != 0) + result = get_cac_tdp_table(hwmgr, + &pp_table_information->cac_dtp_table, pPowerTuneTable); + + if (result == 0 && powerplay_table->usSclkDependencyTableOffset != 0) + result = get_sclk_voltage_dependency_table(hwmgr, + &pp_table_information->vdd_dep_on_sclk, sclk_dep_table); + + if (result == 0 && powerplay_table->usMclkDependencyTableOffset != 0) + result = get_mclk_voltage_dependency_table(hwmgr, + &pp_table_information->vdd_dep_on_mclk, mclk_dep_table); + + if (result == 0 && powerplay_table->usPCIETableOffset != 0) + result = get_pcie_table(hwmgr, + &pp_table_information->pcie_table, pcie_table); + + if (result == 0 && powerplay_table->usHardLimitTableOffset != 0) + result = get_hard_limits(hwmgr, + &pp_table_information->max_clock_voltage_on_dc, pHardLimits); + + hwmgr->dyn_state.max_clock_voltage_on_dc.sclk = + pp_table_information->max_clock_voltage_on_dc.sclk; + hwmgr->dyn_state.max_clock_voltage_on_dc.mclk = + pp_table_information->max_clock_voltage_on_dc.mclk; + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = + pp_table_information->max_clock_voltage_on_dc.vddc; + hwmgr->dyn_state.max_clock_voltage_on_dc.vddci = + pp_table_information->max_clock_voltage_on_dc.vddci; + + if (result == 0 && (NULL != pp_table_information->vdd_dep_on_mclk) + && (0 != pp_table_information->vdd_dep_on_mclk->count)) + result = get_valid_clk(hwmgr, &pp_table_information->valid_mclk_values, + pp_table_information->vdd_dep_on_mclk); + + if (result == 0 && (NULL != pp_table_information->vdd_dep_on_sclk) + && (0 != pp_table_information->vdd_dep_on_sclk->count)) + result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values, + pp_table_information->vdd_dep_on_sclk); + + return result; +} + +/** Retrieves the (signed) Overdrive limits from VBIOS. + * The max engine clock, memory clock and max temperature come from the firmware info table. + * + * The information is placed into the platform descriptor. + * + * @param hwmgr source of the VBIOS table and owner of the platform descriptor to be updated. + * @param powerplay_table the address of the PowerPlay table. + * + * @return 1 as long as the firmware info table was present and of a supported version. + */ +static int init_over_drive_limits( + struct pp_hwmgr *hwmgr, + const ATOM_Tonga_POWERPLAYTABLE *powerplay_table) +{ + hwmgr->platform_descriptor.overdriveLimit.engineClock = + le16_to_cpu(powerplay_table->ulMaxODEngineClock); + hwmgr->platform_descriptor.overdriveLimit.memoryClock = + le16_to_cpu(powerplay_table->ulMaxODMemoryClock); + + hwmgr->platform_descriptor.minOverdriveVDDC = 0; + hwmgr->platform_descriptor.maxOverdriveVDDC = 0; + hwmgr->platform_descriptor.overdriveVDDCStep = 0; + + if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 \ + && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ACOverdriveSupport); + } + + return 0; +} + +/** + * Private Function used during initialization. + * Inspect the PowerPlay table for obvious signs of corruption. + * @param hwmgr Pointer to the hardware manager. + * @param powerplay_table Pointer to the PowerPlay Table. + * @exception This implementation always returns 1. + */ +static int init_thermal_controller( + struct pp_hwmgr *hwmgr, + const ATOM_Tonga_POWERPLAYTABLE *powerplay_table + ) +{ + const PPTable_Generic_SubTable_Header *fan_table; + ATOM_Tonga_Thermal_Controller *thermal_controller; + + thermal_controller = (ATOM_Tonga_Thermal_Controller *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usThermalControllerOffset)); + PP_ASSERT_WITH_CODE((0 != powerplay_table->usThermalControllerOffset), + "Thermal controller table not set!", return -1); + + hwmgr->thermal_controller.ucType = thermal_controller->ucType; + hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine; + hwmgr->thermal_controller.ucI2cAddress = thermal_controller->ucI2cAddress; + + hwmgr->thermal_controller.fanInfo.bNoFan = + (0 != (thermal_controller->ucFanParameters & ATOM_TONGA_PP_FANPARAMETERS_NOFAN)); + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + thermal_controller->ucFanParameters & + ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + + hwmgr->thermal_controller.fanInfo.ulMinRPM + = thermal_controller->ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM + = thermal_controller->ucFanMaxRPM * 100UL; + + set_hw_cap( + hwmgr, + ATOM_TONGA_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, + PHM_PlatformCaps_ThermalController + ); + + if (0 == powerplay_table->usFanTableOffset) + return 0; + + fan_table = (const PPTable_Generic_SubTable_Header *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usFanTableOffset)); + + PP_ASSERT_WITH_CODE((0 != powerplay_table->usFanTableOffset), + "Fan table not set!", return -1); + PP_ASSERT_WITH_CODE((0 < fan_table->ucRevId), + "Unsupported fan table format!", return -1); + + hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay + = 100000; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + if (fan_table->ucRevId < 8) { + const ATOM_Tonga_Fan_Table *tonga_fan_table = + (ATOM_Tonga_Fan_Table *)fan_table; + hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst + = tonga_fan_table->ucTHyst; + hwmgr->thermal_controller.advanceFanControlParameters.usTMin + = tonga_fan_table->usTMin; + hwmgr->thermal_controller.advanceFanControlParameters.usTMed + = tonga_fan_table->usTMed; + hwmgr->thermal_controller.advanceFanControlParameters.usTHigh + = tonga_fan_table->usTHigh; + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin + = tonga_fan_table->usPWMMin; + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed + = tonga_fan_table->usPWMMed; + hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh + = tonga_fan_table->usPWMHigh; + hwmgr->thermal_controller.advanceFanControlParameters.usTMax + = 10900; /* hard coded */ + hwmgr->thermal_controller.advanceFanControlParameters.usTMax + = tonga_fan_table->usTMax; + hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode + = tonga_fan_table->ucFanControlMode; + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM + = tonga_fan_table->usFanPWMMax; + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity + = 4836; + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity + = tonga_fan_table->usFanOutputSensitivity; + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM + = tonga_fan_table->usFanRPMMax; + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit + = (tonga_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */ + hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature + = tonga_fan_table->ucTargetTemperature; + hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit + = tonga_fan_table->ucMinimumPWMLimit; + } else { + const ATOM_Fiji_Fan_Table *fiji_fan_table = + (ATOM_Fiji_Fan_Table *)fan_table; + hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst + = fiji_fan_table->ucTHyst; + hwmgr->thermal_controller.advanceFanControlParameters.usTMin + = fiji_fan_table->usTMin; + hwmgr->thermal_controller.advanceFanControlParameters.usTMed + = fiji_fan_table->usTMed; + hwmgr->thermal_controller.advanceFanControlParameters.usTHigh + = fiji_fan_table->usTHigh; + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin + = fiji_fan_table->usPWMMin; + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed + = fiji_fan_table->usPWMMed; + hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh + = fiji_fan_table->usPWMHigh; + hwmgr->thermal_controller.advanceFanControlParameters.usTMax + = fiji_fan_table->usTMax; + hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode + = fiji_fan_table->ucFanControlMode; + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM + = fiji_fan_table->usFanPWMMax; + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity + = 4836; + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity + = fiji_fan_table->usFanOutputSensitivity; + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM + = fiji_fan_table->usFanRPMMax; + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit + = (fiji_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */ + hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature + = fiji_fan_table->ucTargetTemperature; + hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit + = fiji_fan_table->ucMinimumPWMLimit; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge + = fiji_fan_table->usFanGainEdge; + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot + = fiji_fan_table->usFanGainHotspot; + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid + = fiji_fan_table->usFanGainLiquid; + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc + = fiji_fan_table->usFanGainVrVddc; + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd + = fiji_fan_table->usFanGainVrMvdd; + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx + = fiji_fan_table->usFanGainPlx; + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm + = fiji_fan_table->usFanGainHbm; + } + + return 0; +} + +/** + * Private Function used during initialization. + * Inspect the PowerPlay table for obvious signs of corruption. + * @param hwmgr Pointer to the hardware manager. + * @param powerplay_table Pointer to the PowerPlay Table. + * @exception 2 if the powerplay table is incorrect. + */ +static int check_powerplay_tables( + struct pp_hwmgr *hwmgr, + const ATOM_Tonga_POWERPLAYTABLE *powerplay_table + ) +{ + const ATOM_Tonga_State_Array *state_arrays; + + state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usStateArrayOffset)); + + PP_ASSERT_WITH_CODE((ATOM_Tonga_TABLE_REVISION_TONGA <= + powerplay_table->sHeader.ucTableFormatRevision), + "Unsupported PPTable format!", return -1); + PP_ASSERT_WITH_CODE((0 != powerplay_table->usStateArrayOffset), + "State table is not set!", return -1); + PP_ASSERT_WITH_CODE((0 < powerplay_table->sHeader.usStructureSize), + "Invalid PowerPlay Table!", return -1); + PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries), + "Invalid PowerPlay Table!", return -1); + + return 0; +} + +int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr) +{ + int result = 0; + const ATOM_Tonga_POWERPLAYTABLE *powerplay_table; + + hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v1_information), GFP_KERNEL); + + PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable), + "Failed to allocate hwmgr->pptable!", return -ENOMEM); + + memset(hwmgr->pptable, 0x00, sizeof(struct phm_ppt_v1_information)); + + powerplay_table = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE((NULL != powerplay_table), + "Missing PowerPlay Table!", return -1); + + result = check_powerplay_tables(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "check_powerplay_tables failed", return result); + + result = set_platform_caps(hwmgr, + le32_to_cpu(powerplay_table->ulPlatformCaps)); + + PP_ASSERT_WITH_CODE((result == 0), + "set_platform_caps failed", return result); + + result = init_thermal_controller(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_thermal_controller failed", return result); + + result = init_over_drive_limits(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_over_drive_limits failed", return result); + + result = init_clock_voltage_dependency(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_clock_voltage_dependency failed", return result); + + result = init_dpm_2_parameters(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_dpm_2_parameters failed", return result); + + return result; +} + +int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct phm_ppt_v1_information *pp_table_information = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (NULL != hwmgr->soft_pp_table) { + kfree(hwmgr->soft_pp_table); + hwmgr->soft_pp_table = NULL; + } + + if (NULL != pp_table_information->vdd_dep_on_sclk) + pp_table_information->vdd_dep_on_sclk = NULL; + + if (NULL != pp_table_information->vdd_dep_on_mclk) + pp_table_information->vdd_dep_on_mclk = NULL; + + if (NULL != pp_table_information->valid_mclk_values) + pp_table_information->valid_mclk_values = NULL; + + if (NULL != pp_table_information->valid_sclk_values) + pp_table_information->valid_sclk_values = NULL; + + if (NULL != pp_table_information->vddc_lookup_table) + pp_table_information->vddc_lookup_table = NULL; + + if (NULL != pp_table_information->vddgfx_lookup_table) + pp_table_information->vddgfx_lookup_table = NULL; + + if (NULL != pp_table_information->mm_dep_table) + pp_table_information->mm_dep_table = NULL; + + if (NULL != pp_table_information->cac_dtp_table) + pp_table_information->cac_dtp_table = NULL; + + if (NULL != hwmgr->dyn_state.cac_dtp_table) + hwmgr->dyn_state.cac_dtp_table = NULL; + + if (NULL != pp_table_information->ppm_parameter_table) + pp_table_information->ppm_parameter_table = NULL; + + if (NULL != pp_table_information->pcie_table) + pp_table_information->pcie_table = NULL; + + if (NULL != hwmgr->pptable) { + kfree(hwmgr->pptable); + hwmgr->pptable = NULL; + } + + return result; +} + +const struct pp_table_func tonga_pptable_funcs = { + .pptable_init = tonga_pp_tables_initialize, + .pptable_fini = tonga_pp_tables_uninitialize, +}; + +int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) +{ + const ATOM_Tonga_State_Array * state_arrays; + const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE((NULL != pp_table), + "Missing PowerPlay Table!", return -1); + PP_ASSERT_WITH_CODE((pp_table->sHeader.ucTableFormatRevision >= + ATOM_Tonga_TABLE_REVISION_TONGA), + "Incorrect PowerPlay table revision!", return -1); + + state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) + + le16_to_cpu(pp_table->usStateArrayOffset)); + + return (uint32_t)(state_arrays->ucNumEntries); +} + +/** +* Private function to convert flags stored in the BIOS to software flags in PowerPlay. +*/ +static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr, + uint16_t classification, uint16_t classification2) +{ + uint32_t result = 0; + + if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT) + result |= PP_StateClassificationFlag_Boot; + + if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL) + result |= PP_StateClassificationFlag_Thermal; + + if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) + result |= PP_StateClassificationFlag_LimitedPowerSource; + + if (classification & ATOM_PPLIB_CLASSIFICATION_REST) + result |= PP_StateClassificationFlag_Rest; + + if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED) + result |= PP_StateClassificationFlag_Forced; + + if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI) + result |= PP_StateClassificationFlag_ACPI; + + if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) + result |= PP_StateClassificationFlag_LimitedPowerSource_2; + + return result; +} + +/** +* Create a Power State out of an entry in the PowerPlay table. +* This function is called by the hardware back-end. +* @param hwmgr Pointer to the hardware manager. +* @param entry_index The index of the entry to be extracted from the table. +* @param power_state The address of the PowerState instance being created. +* @return -1 if the entry cannot be retrieved. +*/ +int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, + uint32_t entry_index, struct pp_power_state *power_state, + int (*call_back_func)(struct pp_hwmgr *, void *, + struct pp_power_state *, void *, uint32_t)) +{ + int result = 0; + const ATOM_Tonga_State_Array * state_arrays; + const ATOM_Tonga_State *state_entry; + const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;); + power_state->classification.bios_index = entry_index; + + if (pp_table->sHeader.ucTableFormatRevision >= + ATOM_Tonga_TABLE_REVISION_TONGA) { + state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) + + le16_to_cpu(pp_table->usStateArrayOffset)); + + PP_ASSERT_WITH_CODE((0 < pp_table->usStateArrayOffset), + "Invalid PowerPlay Table State Array Offset.", return -1); + PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries), + "Invalid PowerPlay Table State Array.", return -1); + PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries), + "Invalid PowerPlay Table State Array Entry.", return -1); + + state_entry = &(state_arrays->states[entry_index]); + + result = call_back_func(hwmgr, (void *)state_entry, power_state, + (void *)pp_table, + make_classification_flags(hwmgr, + le16_to_cpu(state_entry->usClassification), + le16_to_cpu(state_entry->usClassification2))); + } + + if (!result && (power_state->classification.flags & + PP_StateClassificationFlag_Boot)) + result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware)); + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h new file mode 100644 index 000000000000..d24b8887f466 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h @@ -0,0 +1,35 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef TONGA_PROCESSPPTABLES_H +#define TONGA_PROCESSPPTABLES_H + +#include "hwmgr.h" + +extern const struct pp_table_func tonga_pptable_funcs; +extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr); +extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index, + struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *, + struct pp_power_state *, void *, uint32_t)); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c new file mode 100644 index 000000000000..a188174747c9 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c @@ -0,0 +1,590 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include "tonga_thermal.h" +#include "tonga_hwmgr.h" +#include "tonga_smumgr.h" +#include "tonga_ppsmc.h" +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + +/** +* Get Fan Speed Control Parameters. +* @param hwmgr the address of the powerplay hardware manager. +* @param pSpeed is the address of the structure where the result is to be placed. +* @exception Always succeeds except if we cannot zero out the output structure. +*/ +int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info) +{ + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + fan_speed_info->supports_percent_read = true; + fan_speed_info->supports_percent_write = true; + fan_speed_info->min_percent = 0; + fan_speed_info->max_percent = 100; + + if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + fan_speed_info->supports_rpm_read = true; + fan_speed_info->supports_rpm_write = true; + fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; + fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; + } else { + fan_speed_info->min_rpm = 0; + fan_speed_info->max_rpm = 0; + } + + return 0; +} + +/** +* Get Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param pSpeed is the address of the structure where the result is to be placed. +* @exception Fails is the 100% setting appears to be 0. +*/ +int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY); + + if (0 == duty100) + return -EINVAL; + + + tmp64 = (uint64_t)duty * 100; + do_div(tmp64, duty100); + *speed = (uint32_t)tmp64; + + if (*speed > 100) + *speed = 100; + + return 0; +} + +/** +* Get Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the address of the structure where the result is to be placed. +* @exception Returns not supported if no fan is found or if pulses per revolution are not set +*/ +int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + return 0; +} + +/** +* Set Fan Speed Control to static mode, so that the user can decide what speed to use. +* @param hwmgr the address of the powerplay hardware manager. +* mode the fan control mode, 0 default, 1 by percent, 5, by RPM +* @exception Should always succeed. +*/ +int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + + if (hwmgr->fan_ctrl_is_in_default_mode) { + hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE); + hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN); + hwmgr->fan_ctrl_is_in_default_mode = false; + } + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode); + + return 0; +} + +/** +* Reset Fan Speed Control to default mode. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Should always succeed. +*/ +int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->fan_ctrl_is_in_default_mode) { + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin); + hwmgr->fan_ctrl_is_in_default_mode = true; + } + + return 0; +} + +int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + int result; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) { + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); + result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; +/* + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM)) + hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM); + else + hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM); +*/ + } else { + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); + result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; + } +/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. + if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0) + result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \ + hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL); +*/ + return result; +} + + +int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL; +} + +/** +* Set Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (0% - 100%) to be set. +* @exception Fails is the 100% setting appears to be 0. +*/ +int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -EINVAL; + + if (speed > 100) + speed = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + tonga_fan_ctrl_stop_smc_fan_control(hwmgr); + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) + return -EINVAL; + + tmp64 = (uint64_t)speed * 100; + do_div(tmp64, duty100); + duty = (uint32_t)tmp64; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); + + return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reset Fan Speed to default. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Always succeeds. +*/ +int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) +{ + int result; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { + result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + if (0 == result) + result = tonga_fan_ctrl_start_smc_fan_control(hwmgr); + } else + result = tonga_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (min - max) to be set. +* @exception Fails is the speed not lie between min and max. +*/ +int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + return 0; +} + +/** +* Reads the remote temperature from the SIslands thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int temp; + + temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP); + +/* Bit 9 means the reading is lower than the lowest usable value. */ + if (0 != (0x200 & temp)) + temp = TONGA_THERMAL_MAXIMUM_TEMP_READING; + else + temp = (temp & 0x1ff); + + temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return temp; +} + +/** +* Set the requested temperature range for high and low alert signals +* +* @param hwmgr The address of the hardware manager. +* @param range Temperature range to be programmed for high and low alert signals +* @exception PP_Result_BadInput if the input data is not valid. +*/ +static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp) +{ + uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + if (low < low_temp) + low = low_temp; + if (high > high_temp) + high = high_temp; + + if (low > high) + return -EINVAL; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + + return 0; +} + +/** +* Programs thermal controller one-time setting registers +* +* @param hwmgr The address of the hardware manager. +*/ +static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_CTRL, EDGE_PER_REV, + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); + + return 0; +} + +/** +* Enable thermal alerts on the RV770 thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); + alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to enable internal thermal interrupts */ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1; +} + +/** +* Disable thermal alerts on the RV770 thermal controller. +* @param hwmgr The address of the hardware manager. +*/ +static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); + alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to disable internal thermal interrupts */ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1; +} + +/** +* Uninitialize the thermal controller. +* Currently just disables alerts. +* @param hwmgr The address of the hardware manager. +*/ +int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + int result = tonga_thermal_disable_alert(hwmgr); + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + tonga_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (0 == data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = tonga_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + fan_table.FanControl_GL_Flag = 1; + + res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end); +/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ + hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1); + + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \ + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1); + + if (0 != res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); +*/ + return 0; +} + +/** +* Start the fan control on the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ +/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table. + * Make sure that we still think controlling the fan is OK. +*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { + tonga_fan_ctrl_start_smc_fan_control(hwmgr); + tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + } + + return 0; +} + +/** +* Set temperature range for high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + + if (range == NULL) + return -EINVAL; + + return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max); +} + +/** +* Programs one-time setting registers +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from initialize thermal controller routine +*/ +int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + return tonga_thermal_initialize(hwmgr); +} + +/** +* Enable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from enable alert routine +*/ +int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + return tonga_thermal_enable_alert(hwmgr); +} + +/** +* Disable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from disable alert routine +*/ +static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + return tonga_thermal_disable_alert(hwmgr); +} + +static struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = { + { NULL, tf_tonga_thermal_initialize }, + { NULL, tf_tonga_thermal_set_temperature_range }, + { NULL, tf_tonga_thermal_enable_alert }, +/* We should restrict performance levels to low before we halt the SMC. + * On the other hand we are still in boot state when we do this so it would be pointless. + * If this assumption changes we have to revisit this table. + */ + { NULL, tf_tonga_thermal_setup_fan_table}, + { NULL, tf_tonga_thermal_start_smc_fan_control}, + { NULL, NULL } +}; + +static struct phm_master_table_header tonga_thermal_start_thermal_controller_master = { + 0, + PHM_MasterTableFlag_None, + tonga_thermal_start_thermal_controller_master_list +}; + +static struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = { + { NULL, tf_tonga_thermal_disable_alert}, + { NULL, tf_tonga_thermal_set_temperature_range}, + { NULL, tf_tonga_thermal_enable_alert}, + { NULL, NULL } +}; + +struct phm_master_table_header tonga_thermal_set_temperature_range_master = { + 0, + PHM_MasterTableFlag_None, + tonga_thermal_set_temperature_range_master_list +}; + +int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + tonga_fan_ctrl_set_default_mode(hwmgr); + return 0; +} + +/** +* Initializes the thermal controller related functions in the Hardware Manager structure. +* @param hwmgr The address of the hardware manager. +* @exception Any error code from the low-level communication. +*/ +int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + int result; + + result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range)); + + if (0 == result) { + result = phm_construct_table(hwmgr, + &tonga_thermal_start_thermal_controller_master, + &(hwmgr->start_thermal_controller)); + if (0 != result) + phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); + } + + if (0 == result) + hwmgr->fan_ctrl_is_in_default_mode = true; + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h new file mode 100644 index 000000000000..aa335f267e25 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h @@ -0,0 +1,61 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef TONGA_THERMAL_H +#define TONGA_THERMAL_H + +#include "hwmgr.h" + +#define TONGA_THERMAL_HIGH_ALERT_MASK 0x1 +#define TONGA_THERMAL_LOW_ALERT_MASK 0x2 + +#define TONGA_THERMAL_MINIMUM_TEMP_READING -256 +#define TONGA_THERMAL_MAXIMUM_TEMP_READING 255 + +#define TONGA_THERMAL_MINIMUM_ALERT_TEMP 0 +#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + + +extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); +extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); +extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); + +extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); +extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); +extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); +extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr); +extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); +extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h new file mode 100644 index 000000000000..e61a3e67852e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -0,0 +1,299 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _AMD_POWERPLAY_H_ +#define _AMD_POWERPLAY_H_ + +#include +#include +#include +#include "amd_shared.h" +#include "cgs_common.h" + +enum amd_pp_event { + AMD_PP_EVENT_INITIALIZE = 0, + AMD_PP_EVENT_UNINITIALIZE, + AMD_PP_EVENT_POWER_SOURCE_CHANGE, + AMD_PP_EVENT_SUSPEND, + AMD_PP_EVENT_RESUME, + AMD_PP_EVENT_ENTER_REST_STATE, + AMD_PP_EVENT_EXIT_REST_STATE, + AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, + AMD_PP_EVENT_THERMAL_NOTIFICATION, + AMD_PP_EVENT_VBIOS_NOTIFICATION, + AMD_PP_EVENT_ENTER_THERMAL_STATE, + AMD_PP_EVENT_EXIT_THERMAL_STATE, + AMD_PP_EVENT_ENTER_FORCED_STATE, + AMD_PP_EVENT_EXIT_FORCED_STATE, + AMD_PP_EVENT_ENTER_EXCLUSIVE_MODE, + AMD_PP_EVENT_EXIT_EXCLUSIVE_MODE, + AMD_PP_EVENT_ENTER_SCREEN_SAVER, + AMD_PP_EVENT_EXIT_SCREEN_SAVER, + AMD_PP_EVENT_VPU_RECOVERY_BEGIN, + AMD_PP_EVENT_VPU_RECOVERY_END, + AMD_PP_EVENT_ENABLE_POWER_PLAY, + AMD_PP_EVENT_DISABLE_POWER_PLAY, + AMD_PP_EVENT_CHANGE_POWER_SOURCE_UI_LABEL, + AMD_PP_EVENT_ENABLE_USER2D_PERFORMANCE, + AMD_PP_EVENT_DISABLE_USER2D_PERFORMANCE, + AMD_PP_EVENT_ENABLE_USER3D_PERFORMANCE, + AMD_PP_EVENT_DISABLE_USER3D_PERFORMANCE, + AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST, + AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST, + AMD_PP_EVENT_ENABLE_REDUCED_REFRESH_RATE, + AMD_PP_EVENT_DISABLE_REDUCED_REFRESH_RATE, + AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING, + AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING, + AMD_PP_EVENT_ENABLE_CGPG, + AMD_PP_EVENT_DISABLE_CGPG, + AMD_PP_EVENT_ENTER_TEXT_MODE, + AMD_PP_EVENT_EXIT_TEXT_MODE, + AMD_PP_EVENT_VIDEO_START, + AMD_PP_EVENT_VIDEO_STOP, + AMD_PP_EVENT_ENABLE_USER_STATE, + AMD_PP_EVENT_DISABLE_USER_STATE, + AMD_PP_EVENT_READJUST_POWER_STATE, + AMD_PP_EVENT_START_INACTIVITY, + AMD_PP_EVENT_STOP_INACTIVITY, + AMD_PP_EVENT_LINKED_ADAPTERS_READY, + AMD_PP_EVENT_ADAPTER_SAFE_TO_DISABLE, + AMD_PP_EVENT_COMPLETE_INIT, + AMD_PP_EVENT_CRITICAL_THERMAL_FAULT, + AMD_PP_EVENT_BACKLIGHT_CHANGED, + AMD_PP_EVENT_ENABLE_VARI_BRIGHT, + AMD_PP_EVENT_DISABLE_VARI_BRIGHT, + AMD_PP_EVENT_ENABLE_VARI_BRIGHT_ON_POWER_XPRESS, + AMD_PP_EVENT_DISABLE_VARI_BRIGHT_ON_POWER_XPRESS, + AMD_PP_EVENT_SET_VARI_BRIGHT_LEVEL, + AMD_PP_EVENT_VARI_BRIGHT_MONITOR_MEASUREMENT, + AMD_PP_EVENT_SCREEN_ON, + AMD_PP_EVENT_SCREEN_OFF, + AMD_PP_EVENT_PRE_DISPLAY_CONFIG_CHANGE, + AMD_PP_EVENT_ENTER_ULP_STATE, + AMD_PP_EVENT_EXIT_ULP_STATE, + AMD_PP_EVENT_REGISTER_IP_STATE, + AMD_PP_EVENT_UNREGISTER_IP_STATE, + AMD_PP_EVENT_ENTER_MGPU_MODE, + AMD_PP_EVENT_EXIT_MGPU_MODE, + AMD_PP_EVENT_ENTER_MULTI_GPU_MODE, + AMD_PP_EVENT_PRE_SUSPEND, + AMD_PP_EVENT_PRE_RESUME, + AMD_PP_EVENT_ENTER_BACOS, + AMD_PP_EVENT_EXIT_BACOS, + AMD_PP_EVENT_RESUME_BACO, + AMD_PP_EVENT_RESET_BACO, + AMD_PP_EVENT_PRE_DISPLAY_PHY_ACCESS, + AMD_PP_EVENT_POST_DISPLAY_PHY_CCESS, + AMD_PP_EVENT_START_COMPUTE_APPLICATION, + AMD_PP_EVENT_STOP_COMPUTE_APPLICATION, + AMD_PP_EVENT_REDUCE_POWER_LIMIT, + AMD_PP_EVENT_ENTER_FRAME_LOCK, + AMD_PP_EVENT_EXIT_FRAME_LOOCK, + AMD_PP_EVENT_LONG_IDLE_REQUEST_BACO, + AMD_PP_EVENT_LONG_IDLE_ENTER_BACO, + AMD_PP_EVENT_LONG_IDLE_EXIT_BACO, + AMD_PP_EVENT_HIBERNATE, + AMD_PP_EVENT_CONNECTED_STANDBY, + AMD_PP_EVENT_ENTER_SELF_REFRESH, + AMD_PP_EVENT_EXIT_SELF_REFRESH, + AMD_PP_EVENT_START_AVFS_BTC, + AMD_PP_EVENT_MAX +}; + +enum amd_dpm_forced_level { + AMD_DPM_FORCED_LEVEL_AUTO = 0, + AMD_DPM_FORCED_LEVEL_LOW = 1, + AMD_DPM_FORCED_LEVEL_HIGH = 2, +}; + +struct amd_pp_init { + struct cgs_device *device; + uint32_t chip_family; + uint32_t chip_id; + uint32_t rev_id; +}; +enum amd_pp_display_config_type{ + AMD_PP_DisplayConfigType_None = 0, + AMD_PP_DisplayConfigType_DP54 , + AMD_PP_DisplayConfigType_DP432 , + AMD_PP_DisplayConfigType_DP324 , + AMD_PP_DisplayConfigType_DP27, + AMD_PP_DisplayConfigType_DP243, + AMD_PP_DisplayConfigType_DP216, + AMD_PP_DisplayConfigType_DP162, + AMD_PP_DisplayConfigType_HDMI6G , + AMD_PP_DisplayConfigType_HDMI297 , + AMD_PP_DisplayConfigType_HDMI162, + AMD_PP_DisplayConfigType_LVDS, + AMD_PP_DisplayConfigType_DVI, + AMD_PP_DisplayConfigType_WIRELESS, + AMD_PP_DisplayConfigType_VGA +}; + +struct single_display_configuration +{ + uint32_t controller_index; + uint32_t controller_id; + uint32_t signal_type; + uint32_t display_state; + /* phy id for the primary internal transmitter */ + uint8_t primary_transmitter_phyi_d; + /* bitmap with the active lanes */ + uint8_t primary_transmitter_active_lanemap; + /* phy id for the secondary internal transmitter (for dual-link dvi) */ + uint8_t secondary_transmitter_phy_id; + /* bitmap with the active lanes */ + uint8_t secondary_transmitter_active_lanemap; + /* misc phy settings for SMU. */ + uint32_t config_flags; + uint32_t display_type; + uint32_t view_resolution_cx; + uint32_t view_resolution_cy; + enum amd_pp_display_config_type displayconfigtype; + uint32_t vertical_refresh; /* for active display */ +}; + +#define MAX_NUM_DISPLAY 32 + +struct amd_pp_display_configuration { + bool nb_pstate_switch_disable;/* controls NB PState switch */ + bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */ + bool cpu_pstate_disable; + uint32_t cpu_pstate_separation_time; + + uint32_t num_display; /* total number of display*/ + uint32_t num_path_including_non_display; + uint32_t crossfire_display_index; + uint32_t min_mem_set_clock; + uint32_t min_core_set_clock; + /* unit 10KHz x bit*/ + uint32_t min_bus_bandwidth; + /* minimum required stutter sclk, in 10khz uint32_t ulMinCoreSetClk;*/ + uint32_t min_core_set_clock_in_sr; + + struct single_display_configuration displays[MAX_NUM_DISPLAY]; + + uint32_t vrefresh; /* for active display*/ + + uint32_t min_vblank_time; /* for active display*/ + bool multi_monitor_in_sync; + /* Controller Index of primary display - used in MCLK SMC switching hang + * SW Workaround*/ + uint32_t crtc_index; + /* htotal*1000/pixelclk - used in MCLK SMC switching hang SW Workaround*/ + uint32_t line_time_in_us; + bool invalid_vblank_time; + + uint32_t display_clk; + /* + * for given display configuration if multimonitormnsync == false then + * Memory clock DPMS with this latency or below is allowed, DPMS with + * higher latency not allowed. + */ + uint32_t dce_tolerable_mclk_in_active_latency; +}; + +struct amd_pp_dal_clock_info { + uint32_t engine_max_clock; + uint32_t memory_max_clock; + uint32_t level; +}; + +enum { + PP_GROUP_UNKNOWN = 0, + PP_GROUP_GFX = 1, + PP_GROUP_SYS, + PP_GROUP_MAX +}; + +#define PP_GROUP_MASK 0xF0000000 +#define PP_GROUP_SHIFT 28 + +#define PP_BLOCK_MASK 0x0FFFFF00 +#define PP_BLOCK_SHIFT 8 + +#define PP_BLOCK_GFX_CG 0x01 +#define PP_BLOCK_GFX_MG 0x02 +#define PP_BLOCK_SYS_BIF 0x01 +#define PP_BLOCK_SYS_MC 0x02 +#define PP_BLOCK_SYS_ROM 0x04 +#define PP_BLOCK_SYS_DRM 0x08 +#define PP_BLOCK_SYS_HDP 0x10 +#define PP_BLOCK_SYS_SDMA 0x20 + +#define PP_STATE_MASK 0x0000000F +#define PP_STATE_SHIFT 0 +#define PP_STATE_SUPPORT_MASK 0x000000F0 +#define PP_STATE_SUPPORT_SHIFT 0 + +#define PP_STATE_CG 0x01 +#define PP_STATE_LS 0x02 +#define PP_STATE_DS 0x04 +#define PP_STATE_SD 0x08 +#define PP_STATE_SUPPORT_CG 0x10 +#define PP_STATE_SUPPORT_LS 0x20 +#define PP_STATE_SUPPORT_DS 0x40 +#define PP_STATE_SUPPORT_SD 0x80 + +#define PP_CG_MSG_ID(group, block, support, state) (group << PP_GROUP_SHIFT |\ + block << PP_BLOCK_SHIFT |\ + support << PP_STATE_SUPPORT_SHIFT |\ + state << PP_STATE_SHIFT) + +struct amd_powerplay_funcs { + int (*get_temperature)(void *handle); + int (*load_firmware)(void *handle); + int (*wait_for_fw_loading_complete)(void *handle); + int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level); + enum amd_dpm_forced_level (*get_performance_level)(void *handle); + enum amd_pm_state_type (*get_current_power_state)(void *handle); + int (*get_sclk)(void *handle, bool low); + int (*get_mclk)(void *handle, bool low); + int (*powergate_vce)(void *handle, bool gate); + int (*powergate_uvd)(void *handle, bool gate); + int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id, + void *input, void *output); + void (*print_current_performance_level)(void *handle, + struct seq_file *m); + int (*set_fan_control_mode)(void *handle, uint32_t mode); + int (*get_fan_control_mode)(void *handle); + int (*set_fan_speed_percent)(void *handle, uint32_t percent); + int (*get_fan_speed_percent)(void *handle, uint32_t *speed); +}; + +struct amd_powerplay { + void *pp_handle; + const struct amd_ip_funcs *ip_funcs; + const struct amd_powerplay_funcs *pp_funcs; +}; + +int amd_powerplay_init(struct amd_pp_init *pp_init, + struct amd_powerplay *amd_pp); +int amd_powerplay_fini(void *handle); + +int amd_powerplay_display_configuration_change(void *handle, const void *input); + +int amd_powerplay_get_display_power_level(void *handle, + struct amd_pp_dal_clock_info *output); + + +#endif /* _AMD_POWERPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h similarity index 99% rename from drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h rename to drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h index 273616ab43db..9b698780aed8 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h @@ -164,6 +164,7 @@ enum DPM_ARRAY { #define PPSMC_MSG_SetLoggerAddressHigh ((uint16_t) 0x26C) #define PPSMC_MSG_SetLoggerAddressLow ((uint16_t) 0x26D) #define PPSMC_MSG_SetWatermarkFrequency ((uint16_t) 0x26E) +#define PPSMC_MSG_SetDisplaySizePowerParams ((uint16_t) 0x26F) /* REMOVE LATER*/ #define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h b/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h new file mode 100644 index 000000000000..b9d84de8a44d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h @@ -0,0 +1,109 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _EVENT_MANAGER_H_ +#define _EVENT_MANAGER_H_ + +#include "power_state.h" +#include "pp_power_source.h" +#include "hardwaremanager.h" +#include "pp_asicblocks.h" + +struct pp_eventmgr; +enum amd_pp_event; + +enum PEM_EventDataValid { + PEM_EventDataValid_RequestedStateID = 0, + PEM_EventDataValid_RequestedUILabel, + PEM_EventDataValid_NewPowerState, + PEM_EventDataValid_RequestedPowerSource, + PEM_EventDataValid_RequestedClocks, + PEM_EventDataValid_CurrentTemperature, + PEM_EventDataValid_AsicBlocks, + PEM_EventDataValid_ODParameters, + PEM_EventDataValid_PXAdapterPrefs, + PEM_EventDataValid_PXUserPrefs, + PEM_EventDataValid_PXSwitchReason, + PEM_EventDataValid_PXSwitchPhase, + PEM_EventDataValid_HdVideo, + PEM_EventDataValid_BacklightLevel, + PEM_EventDatavalid_VariBrightParams, + PEM_EventDataValid_VariBrightLevel, + PEM_EventDataValid_VariBrightImmediateChange, + PEM_EventDataValid_PercentWhite, + PEM_EventDataValid_SdVideo, + PEM_EventDataValid_HTLinkChangeReason, + PEM_EventDataValid_HWBlocks, + PEM_EventDataValid_RequestedThermalState, + PEM_EventDataValid_MvcVideo, + PEM_EventDataValid_Max +}; + +typedef enum PEM_EventDataValid PEM_EventDataValid; + +/* Number of bits in ULONG variable */ +#define PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD (sizeof(unsigned long)*8) + +/* Number of ULONG entries used by event data valid bits */ +#define PEM_MAX_NUM_EVENTDATAVALID_ULONG_ENTRIES \ + ((PEM_EventDataValid_Max + PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD - 1) / \ + PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD) + +static inline void pem_set_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field) +{ + fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] |= + (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)); +} + +static inline void pem_unset_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field) +{ + fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &= + ~(1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)); +} + +static inline unsigned long pem_is_event_data_valid(const unsigned long *fields, PEM_EventDataValid valid_field) +{ + return fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] & + (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)); +} + +struct pem_event_data { + unsigned long valid_fields[100]; + unsigned long requested_state_id; + enum PP_StateUILabel requested_ui_label; + struct pp_power_state *pnew_power_state; + enum pp_power_source requested_power_source; + struct PP_Clocks requested_clocks; + bool skip_state_adjust_rules; + struct phm_asic_blocks asic_blocks; + /* to doPP_ThermalState requestedThermalState; + enum ThermalStateRequestSrc requestThermalStateSrc; + PP_Temperature currentTemperature;*/ + +}; + +int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, + struct pem_event_data *event_data); + +bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr); + +#endif /* _EVENT_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h new file mode 100644 index 000000000000..10437dcfd365 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h @@ -0,0 +1,125 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _EVENTMGR_H_ +#define _EVENTMGR_H_ + +#include +#include "pp_instance.h" +#include "hardwaremanager.h" +#include "eventmanager.h" +#include "pp_feature.h" +#include "pp_power_source.h" +#include "power_state.h" + +typedef int (*pem_event_action)(struct pp_eventmgr *eventmgr, + struct pem_event_data *event_data); + +struct action_chain { + const char *description; /* action chain description for debugging purpose */ + const pem_event_action **action_chain; /* pointer to chain of event actions */ +}; + +struct pem_power_source_ui_state_info { + enum PP_StateUILabel current_ui_label; + enum PP_StateUILabel default_ui_lable; + unsigned long configurable_ui_mapping; +}; + +struct pp_clock_range { + uint32_t min_sclk_khz; + uint32_t max_sclk_khz; + + uint32_t min_mclk_khz; + uint32_t max_mclk_khz; + + uint32_t min_vclk_khz; + uint32_t max_vclk_khz; + + uint32_t min_dclk_khz; + uint32_t max_dclk_khz; + + uint32_t min_aclk_khz; + uint32_t max_aclk_khz; + + uint32_t min_eclk_khz; + uint32_t max_eclk_khz; +}; + +enum pp_state { + UNINITIALIZED, + INACTIVE, + ACTIVE +}; + +enum pp_ring_index { + PP_RING_TYPE_GFX_INDEX = 0, + PP_RING_TYPE_DMA_INDEX, + PP_RING_TYPE_DMA1_INDEX, + PP_RING_TYPE_UVD_INDEX, + PP_RING_TYPE_VCE0_INDEX, + PP_RING_TYPE_VCE1_INDEX, + PP_RING_TYPE_CP1_INDEX, + PP_RING_TYPE_CP2_INDEX, + PP_NUM_RINGS, +}; + +struct pp_request { + uint32_t flags; + uint32_t sclk; + uint32_t sclk_throttle; + uint32_t mclk; + uint32_t vclk; + uint32_t dclk; + uint32_t eclk; + uint32_t aclk; + uint32_t iclk; + uint32_t vp8clk; + uint32_t rsv[32]; +}; + +struct pp_eventmgr { + struct pp_hwmgr *hwmgr; + struct pp_smumgr *smumgr; + + struct pp_feature_info features[PP_Feature_Max]; + const struct action_chain *event_chain[AMD_PP_EVENT_MAX]; + struct phm_platform_descriptor *platform_descriptor; + struct pp_clock_range clock_range; + enum pp_power_source current_power_source; + struct pem_power_source_ui_state_info ui_state_info[PP_PowerSource_Max]; + enum pp_state states[PP_NUM_RINGS]; + struct pp_request hi_req; + struct list_head context_list; + struct mutex lock; + bool block_adjust_power_state; + bool enable_cg; + bool enable_gfx_cgpg; + int (*pp_eventmgr_init)(struct pp_eventmgr *eventmgr); + void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr); +}; + +int eventmgr_init(struct pp_instance *handle); +int eventmgr_fini(struct pp_eventmgr *eventmgr); + +#endif /* _EVENTMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h new file mode 100644 index 000000000000..7ae494569a60 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h @@ -0,0 +1,412 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + + +#ifndef _FIJI_PP_SMC_H_ +#define _FIJI_PP_SMC_H_ + +#pragma pack(push, 1) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 + +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 + +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 + +/* Defines for DPM 2.0 */ +#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 +#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 +#define PPSMC_DPM2FLAGS_OCP 0x04 + +/* Defines for display watermark level */ +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + +/* In the HW performance level's state flags: */ +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 +#define PPSMC_STATEFLAG_POWERSHIFT 0x08 +#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + +/* Fan control algorithm: */ +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +/* Gemini Modes*/ +#define PPSMC_GeminiModeNone 0 /*Single GPU board*/ +#define PPSMC_GeminiModeMaster 1 /*Master GPU on a Gemini board*/ +#define PPSMC_GeminiModeSlave 2 /*Slave GPU on a Gemini board*/ + + +/* Return codes for driver to SMC communication. */ +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) + +#define PPSMC_Result_NotNow ((uint16_t)0x03) + +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + + +#define PPSMC_MSG_Halt ((uint16_t)0x10) +#define PPSMC_MSG_Resume ((uint16_t)0x11) +#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) +#define PPSMC_MSG_LevelUp ((uint16_t)0x18) +#define PPSMC_MSG_LevelDown ((uint16_t)0x19) +#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) +#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) + +#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) +#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) + +#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) +#define PPSMC_MSG_EnableCac ((uint16_t)0x53) +#define PPSMC_MSG_DisableCac ((uint16_t)0x54) +#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) +#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) +#define PPSMC_CACHistoryStart ((uint16_t)0x57) +#define PPSMC_CACHistoryStop ((uint16_t)0x58) +#define PPSMC_TDPClampingActive ((uint16_t)0x59) +#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) +#define PPSMC_StartFanControl ((uint16_t)0x5B) +#define PPSMC_StopFanControl ((uint16_t)0x5C) +#define PPSMC_NoDisplay ((uint16_t)0x5D) +#define PPSMC_HasDisplay ((uint16_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) +#define PPSMC_MSG_EnableULV ((uint16_t)0x62) +#define PPSMC_MSG_DisableULV ((uint16_t)0x63) +#define PPSMC_MSG_EnterULV ((uint16_t)0x64) +#define PPSMC_MSG_ExitULV ((uint16_t)0x65) +#define PPSMC_PowerShiftActive ((uint16_t)0x6A) +#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) +#define PPSMC_OCPActive ((uint16_t)0x6C) +#define PPSMC_OCPInactive ((uint16_t)0x6D) +#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) +#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) +#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) +#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) +#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) +#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) +#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) +#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) +#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) +#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) +#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) +#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) +#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) +#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) + +#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) +#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) +#define PPSMC_FlushDataCache ((uint16_t)0x80) +#define PPSMC_FlushInstrCache ((uint16_t)0x81) + +#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) + +#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) + +#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) +#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) +#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) + +#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) + +#define PPSMC_MSG_BREAK ((uint16_t)0xF8) + +/* Trinity Specific Messages*/ +#define PPSMC_MSG_Test ((uint16_t) 0x100) +#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101) +#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102) +#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103) +#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) +#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105) +#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106) +#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107) +#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108) +#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109) +#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a) +#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b) +#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e) +#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f) +#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110) +#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111) +#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112) +#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113) +#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114) +#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117) +#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118) +#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119) +#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a) +#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b) +#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c) +#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d) +#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e) +#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f) +#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120) +#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121) +#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122) +#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123) +#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124) +#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125) +#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126) +#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127) +#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128) + +#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129) +#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A) +#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B) +#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C) +#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) +#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) +#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) +#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) +#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) +#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) +#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) +#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134) +#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) +#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) +#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) +#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) +#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b) +#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) +#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e) +#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f) +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) +#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142) +#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143) +#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144) +#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) +#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) +#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) +#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) +#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) +#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b) + +#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c) +#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d) + +#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) +#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) +#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150) +#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151) +#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152) +#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153) +#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) +#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) +#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) +#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157) +#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158) +#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) +#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) +#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) +#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c) +#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) +#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) +#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) +#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160) +#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161) +#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) +#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163) +#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164) +#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165) +#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166) +#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) +#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168) +#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) +#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) +#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b) +#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c) +#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d) +#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e) +#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f) +#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170) +#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171) +#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172) +#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173) +#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174) +#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175) +#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176) +#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177) +#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178) +#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179) +#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a) +#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b) +#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c) +#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d) +#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e) +#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f) +#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182) +#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184) +#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) +#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) +#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) +#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) +#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) +#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) +#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) +#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) +#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D) +#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E) +#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) +#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) +#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) +#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192) +#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193) +#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194) +#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195) +#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207) +#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196) +#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198) +#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199) +#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) +#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B) +#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) +#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) + +#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) +#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) +#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202) +#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203) +#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) +#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206) +#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209) +#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A) + +#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240) +#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241) +#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242) +#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243) +#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244) +#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245) +#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246) + +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) +#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259) +#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A) +#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B) +#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C) +#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D) +#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260) +#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261) +#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262) +#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263) +#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264) +#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265) +#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266) +#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267) +#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268) +#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269) +#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A) +#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B) +#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C) +#define PPSMC_MSG_GetHbmCode ((uint16_t) 0x26D) +#define PPSMC_MSG_GetVrVddcTemperature ((uint16_t) 0x26E) +#define PPSMC_MSG_GetVrMvddTemperature ((uint16_t) 0x26F) +#define PPSMC_MSG_GetLiquidTemperature ((uint16_t) 0x270) +#define PPSMC_MSG_GetPlxTemperature ((uint16_t) 0x271) +#define PPSMC_MSG_RequestI2CControl ((uint16_t) 0x272) +#define PPSMC_MSG_ReleaseI2CControl ((uint16_t) 0x273) +#define PPSMC_MSG_LedConfig ((uint16_t) 0x274) +#define PPSMC_MSG_SetHbmFanCode ((uint16_t) 0x275) +#define PPSMC_MSG_SetHbmThrottleCode ((uint16_t) 0x276) + +#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400) +#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401) +#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402) +#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) +#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) + +/* AVFS Only - Remove Later */ +#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x666) + +/* If the SMC firmware has an event status soft register this is what the individual bits mean.*/ +#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 +#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 +#define PPSMC_EVENT_STATUS_DC 0x00000004 + +typedef uint16_t PPSMC_Msg; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h new file mode 100644 index 000000000000..0262ad35502a --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h @@ -0,0 +1,10299 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _FIJI_PWRVIRUS_H_ +#define _FIJI_PWRVIRUS_H_ + +#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a +#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b +#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c +#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d + +enum PWR_Command +{ + PwrCmdNull = 0, + PwrCmdWrite, + PwrCmdEnd, + PwrCmdMax +}; +typedef enum PWR_Command PWR_Command; + +struct PWR_Command_Table +{ + PWR_Command command; + ULONG data; + ULONG reg; +}; +typedef struct PWR_Command_Table PWR_Command_Table; + +#define PWR_VIRUS_TABLE_SIZE 10243 +static PWR_Command_Table PwrVirusTable[PWR_VIRUS_TABLE_SIZE] = +{ + { PwrCmdWrite, 0x100100b6, mmPCIE_INDEX }, + { PwrCmdWrite, 0x00000000, mmPCIE_DATA }, + { PwrCmdWrite, 0x100100b6, mmPCIE_INDEX }, + { PwrCmdWrite, 0x0300078c, mmPCIE_DATA }, + { PwrCmdWrite, 0x00000000, mmBIF_CLK_CTRL }, + { PwrCmdWrite, 0x00000001, mmBIF_CLK_CTRL }, + { PwrCmdWrite, 0x00000000, mmBIF_CLK_CTRL }, + { PwrCmdWrite, 0x00000003, mmBIF_FB_EN }, + { PwrCmdWrite, 0x00000000, mmBIF_FB_EN }, + { PwrCmdWrite, 0x00000001, mmBIF_DOORBELL_APER_EN }, + { PwrCmdWrite, 0x00000000, mmBIF_DOORBELL_APER_EN }, + { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX }, + { PwrCmdWrite, 0x00000000, mmPCIE_DATA }, + { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX }, + { PwrCmdWrite, 0x22000000, mmPCIE_DATA }, + { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX }, + { PwrCmdWrite, 0x00000000, mmPCIE_DATA }, + /* + { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION }, + { PwrCmdWrite, 0x00000000, mmMC_CITF_CNTL }, + { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION }, + { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION }, + { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION }, + { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION }, + { PwrCmdWrite, 0x00000000, mmMC_VM_FB_OFFSET },*/ + { PwrCmdWrite, 0x00000000, mmRLC_CSIB_ADDR_LO }, + { PwrCmdWrite, 0x00000000, mmRLC_CSIB_ADDR_HI }, + { PwrCmdWrite, 0x00000000, mmRLC_CSIB_LENGTH }, + /* + { PwrCmdWrite, 0x00000000, mmMC_VM_MX_L1_TLB_CNTL }, + { PwrCmdWrite, 0x00000001, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR }, + { PwrCmdWrite, 0x00000000, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR }, + { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION }, + { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },*/ + { PwrCmdWrite, 0x00000000, mmVM_CONTEXT0_CNTL }, + { PwrCmdWrite, 0x00000000, mmVM_CONTEXT1_CNTL }, + /* + { PwrCmdWrite, 0x00000000, mmMC_VM_AGP_BASE }, + { PwrCmdWrite, 0x00000002, mmMC_VM_AGP_BOT }, + { PwrCmdWrite, 0x00000000, mmMC_VM_AGP_TOP },*/ + { PwrCmdWrite, 0x04000000, mmATC_VM_APERTURE0_LOW_ADDR }, + { PwrCmdWrite, 0x0400ff20, mmATC_VM_APERTURE0_HIGH_ADDR }, + { PwrCmdWrite, 0x00000002, mmATC_VM_APERTURE0_CNTL }, + { PwrCmdWrite, 0x0000ffff, mmATC_VM_APERTURE0_CNTL2 }, + { PwrCmdWrite, 0x00000001, mmATC_VM_APERTURE1_LOW_ADDR }, + { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_HIGH_ADDR }, + { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_CNTL }, + { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_CNTL2 }, + //{ PwrCmdWrite, 0x00000000, mmMC_ARB_RAMCFG }, + { PwrCmdWrite, 0x12011003, mmGB_ADDR_CONFIG }, + { PwrCmdWrite, 0x00800010, mmGB_TILE_MODE0 }, + { PwrCmdWrite, 0x00800810, mmGB_TILE_MODE1 }, + { PwrCmdWrite, 0x00801010, mmGB_TILE_MODE2 }, + { PwrCmdWrite, 0x00801810, mmGB_TILE_MODE3 }, + { PwrCmdWrite, 0x00802810, mmGB_TILE_MODE4 }, + { PwrCmdWrite, 0x00802808, mmGB_TILE_MODE5 }, + { PwrCmdWrite, 0x00802814, mmGB_TILE_MODE6 }, + { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE7 }, + { PwrCmdWrite, 0x00000004, mmGB_TILE_MODE8 }, + { PwrCmdWrite, 0x02000008, mmGB_TILE_MODE9 }, + { PwrCmdWrite, 0x02000010, mmGB_TILE_MODE10 }, + { PwrCmdWrite, 0x06000014, mmGB_TILE_MODE11 }, + { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE12 }, + { PwrCmdWrite, 0x02400008, mmGB_TILE_MODE13 }, + { PwrCmdWrite, 0x02400010, mmGB_TILE_MODE14 }, + { PwrCmdWrite, 0x02400030, mmGB_TILE_MODE15 }, + { PwrCmdWrite, 0x06400014, mmGB_TILE_MODE16 }, + { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE17 }, + { PwrCmdWrite, 0x0040000c, mmGB_TILE_MODE18 }, + { PwrCmdWrite, 0x0100000c, mmGB_TILE_MODE19 }, + { PwrCmdWrite, 0x0100001c, mmGB_TILE_MODE20 }, + { PwrCmdWrite, 0x01000034, mmGB_TILE_MODE21 }, + { PwrCmdWrite, 0x01000024, mmGB_TILE_MODE22 }, + { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE23 }, + { PwrCmdWrite, 0x0040001c, mmGB_TILE_MODE24 }, + { PwrCmdWrite, 0x01000020, mmGB_TILE_MODE25 }, + { PwrCmdWrite, 0x01000038, mmGB_TILE_MODE26 }, + { PwrCmdWrite, 0x02c00008, mmGB_TILE_MODE27 }, + { PwrCmdWrite, 0x02c00010, mmGB_TILE_MODE28 }, + { PwrCmdWrite, 0x06c00014, mmGB_TILE_MODE29 }, + { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE30 }, + { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE31 }, + { PwrCmdWrite, 0x000000a8, mmGB_MACROTILE_MODE0 }, + { PwrCmdWrite, 0x000000a4, mmGB_MACROTILE_MODE1 }, + { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE2 }, + { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE3 }, + { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE4 }, + { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE5 }, + { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE6 }, + { PwrCmdWrite, 0x00000000, mmGB_MACROTILE_MODE7 }, + { PwrCmdWrite, 0x000000ee, mmGB_MACROTILE_MODE8 }, + { PwrCmdWrite, 0x000000ea, mmGB_MACROTILE_MODE9 }, + { PwrCmdWrite, 0x000000e9, mmGB_MACROTILE_MODE10 }, + { PwrCmdWrite, 0x000000e5, mmGB_MACROTILE_MODE11 }, + { PwrCmdWrite, 0x000000e4, mmGB_MACROTILE_MODE12 }, + { PwrCmdWrite, 0x000000e0, mmGB_MACROTILE_MODE13 }, + { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE14 }, + { PwrCmdWrite, 0x00000000, mmGB_MACROTILE_MODE15 }, + { PwrCmdWrite, 0x00900000, mmHDP_NONSURFACE_BASE }, + { PwrCmdWrite, 0x00008000, mmHDP_NONSURFACE_INFO }, + { PwrCmdWrite, 0x3fffffff, mmHDP_NONSURFACE_SIZE }, + { PwrCmdWrite, 0x00000003, mmBIF_FB_EN }, + //{ PwrCmdWrite, 0x00000000, mmMC_VM_FB_OFFSET }, + { PwrCmdWrite, 0x00000000, mmSRBM_CNTL }, + { PwrCmdWrite, 0x00020000, mmSRBM_CNTL }, + { PwrCmdWrite, 0x80000000, mmATC_VMID0_PASID_MAPPING }, + { PwrCmdWrite, 0x00000000, mmATC_VMID_PASID_MAPPING_UPDATE_STATUS }, + { PwrCmdWrite, 0x00000000, mmRLC_CNTL }, + { PwrCmdWrite, 0x00000000, mmRLC_CNTL }, + { PwrCmdWrite, 0x00000000, mmRLC_CNTL }, + { PwrCmdWrite, 0xe0000000, mmGRBM_GFX_INDEX }, + { PwrCmdWrite, 0x00000000, mmCGTS_TCC_DISABLE }, + { PwrCmdWrite, 0x00000000, mmTCP_ADDR_CONFIG }, + { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG }, + { PwrCmdWrite, 0x76543210, mmTCP_CHAN_STEER_LO }, + { PwrCmdWrite, 0xfedcba98, mmTCP_CHAN_STEER_HI }, + { PwrCmdWrite, 0x00000000, mmDB_DEBUG2 }, + { PwrCmdWrite, 0x00000000, mmDB_DEBUG }, + { PwrCmdWrite, 0x00002b16, mmCP_QUEUE_THRESHOLDS }, + { PwrCmdWrite, 0x00006030, mmCP_MEQ_THRESHOLDS }, + { PwrCmdWrite, 0x01000104, mmSPI_CONFIG_CNTL_1 }, + { PwrCmdWrite, 0x98184020, mmPA_SC_FIFO_SIZE }, + { PwrCmdWrite, 0x00000001, mmVGT_NUM_INSTANCES }, + { PwrCmdWrite, 0x00000000, mmCP_PERFMON_CNTL }, + { PwrCmdWrite, 0x01180000, mmSQ_CONFIG }, + { PwrCmdWrite, 0x00000000, mmVGT_CACHE_INVALIDATION }, + { PwrCmdWrite, 0x00000000, mmSQ_THREAD_TRACE_BASE }, + { PwrCmdWrite, 0x0000df80, mmSQ_THREAD_TRACE_MASK }, + { PwrCmdWrite, 0x02249249, mmSQ_THREAD_TRACE_MODE }, + { PwrCmdWrite, 0x00000000, mmPA_SC_LINE_STIPPLE_STATE }, + { PwrCmdWrite, 0x00000000, mmCB_PERFCOUNTER0_SELECT1 }, + { PwrCmdWrite, 0x06000100, mmCGTT_VGT_CLK_CTRL }, + { PwrCmdWrite, 0x00000007, mmPA_CL_ENHANCE }, + { PwrCmdWrite, 0x00000001, mmPA_SC_ENHANCE }, + { PwrCmdWrite, 0x00ffffff, mmPA_SC_FORCE_EOV_MAX_CNTS }, + { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000010, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000020, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000030, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000040, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000050, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000060, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000070, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000080, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000090, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x000000a0, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x000000b0, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x000000c0, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x000000d0, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x000000e0, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x000000f0, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmRLC_PG_CNTL }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS2 }, + { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL }, + { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL }, + { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x0000000e, mmSH_MEM_APE1_BASE }, + { PwrCmdWrite, 0x0000020d, mmSH_MEM_APE1_LIMIT }, + { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG }, + { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_RB_VMID }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmRLC_CNTL }, + { PwrCmdWrite, 0x00000000, mmRLC_CNTL }, + { PwrCmdWrite, 0x00000000, mmRLC_SRM_CNTL }, + { PwrCmdWrite, 0x00000002, mmRLC_SRM_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_ME_CNTL }, + { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL }, + { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL }, + { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL }, + { PwrCmdWrite, 0x0840800a, mmCP_RB0_CNTL }, + { PwrCmdWrite, 0xf30fff0f, mmTCC_CTRL }, + { PwrCmdWrite, 0x00000002, mmTCC_EXE_DISABLE }, + { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG }, + { PwrCmdWrite, 0x540ff000, mmCP_CPC_IC_BASE_LO }, + { PwrCmdWrite, 0x000000b4, mmCP_CPC_IC_BASE_HI }, + { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR }, + { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR }, + { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI }, + { PwrCmdWrite, 0x540fe800, mmCP_DFY_ADDR_LO }, + { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e020201, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e040204, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e060205, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54106f00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000400b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00004000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00804fac, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI }, + { PwrCmdWrite, 0x540fef00, mmCP_DFY_ADDR_LO }, + { PwrCmdWrite, 0xc0031502, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00001e00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI }, + { PwrCmdWrite, 0x540ff000, mmCP_DFY_ADDR_LO }, + { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000145, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4080061, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24ccffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3cd08000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9500fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1cd0ffcf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d018001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x050c0019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x84c00000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000023, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000067, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000006a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000006d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000079, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000084, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000008f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000099, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800000a0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800000af, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x388c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x08880002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98800003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000002d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d808001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24cc0700, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x10cc0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d0d000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000005d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14d00011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c01b10, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00e0080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00e0800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x280c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x280c0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x280c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ca88004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800079, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc00006f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28180080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97400001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd4c0380, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdcc0388, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55dc0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdcc038c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce0c0390, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce0c0394, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce4c0398, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56640020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce4c039c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce8c03a0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56a80020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce8c03a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcecc03a8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcecc03ac, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf0c03b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57300020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf0c03b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf4c03b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57740020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf4c03bc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf8c03c0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57b80020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf8c03c4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfcc03c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57fc0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfcc03cc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05dc002f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc12009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d200a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc012009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25e01c00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12200013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25e40300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25e800c0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25ec003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e25c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31100006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9500007b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc1c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4df0388, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d7038c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d5dc01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4e30390, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d70394, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d62001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4e70398, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d7039c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d66401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4eb03a0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d703a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d6a801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4ef03a8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d703ac, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d6ec01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4f303b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d703b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d73001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4f703b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d703bc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d77401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4fb03c0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d703c4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d7b801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4ff03c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d703cc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d7fc01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4d70380, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc0e0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0085, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc006a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900fffa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400051, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04180018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aac0027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04080002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080228, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000367, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9880fff3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04080010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd80c0309, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd80c0319, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9880fffc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00e0100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d0003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d4001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x155c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05e80180, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc410001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000031, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900091a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05280196, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d4fe04, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800001b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000032b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000350, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000352, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000035f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000701, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000047c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000019f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc419325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d98001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0044, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27fc0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9400036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15540008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd40005b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd40005d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840006d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11540015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1998003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1af0007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15dc000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d65400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a38003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd5c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7df1c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800045, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411326a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc415326b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc419326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d326d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425326e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293279, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd000056, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800058, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00059, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x259c8000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce40005a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29988000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd000073, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17300019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25140fff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4153279, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd00005f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26f00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15100010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd000035, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000035, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1af07fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43dc031, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04343000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf413267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd1c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45dc0160, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc810001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b4c0057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f4f400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55180020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1af4007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33740003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26d80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ae8003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9680000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413348, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x958000d8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000315, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04303000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800041, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1714000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25540800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x459801b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d77400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x199c01e2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e5c0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1334e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01334f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd413350, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813351, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd881334d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193273, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3275, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113270, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4153274, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cdcc011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05900008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd00006a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc0006b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d594002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc12e23, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd012e24, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc12e25, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15540002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b340057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b280213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980198, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd40000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd40000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x20cc003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdd430000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc01e0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29dc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2d540002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x078c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001239, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04f80000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd5c005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840007c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400069, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c018a6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4412e22, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800007c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c018a2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd4c005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9680fffc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9680fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013275, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9540188f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc013cfff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x38d00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04cc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdcc30000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c01882, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000304, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x49980198, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x459801a0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000329, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16ec001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1998003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00031, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce00000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a18003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c0000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24dc00ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31e00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580fff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95801827, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14dc0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800006d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a0000ad, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04080000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27fc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1af4003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9740004d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4080060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ca88005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24880001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f4b4009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97400046, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313274, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d33400c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97400009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1eecffdd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf013273, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf013275, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800003c3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429326f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aa80030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28240001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a8004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800035, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19e80042, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc0006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e8e800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de9c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293270, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ce8c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd30011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11e80007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd300001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b30003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4240059, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1660001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e320009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0328000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e72400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0430000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02ac000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d310002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa87600, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280222, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4280058, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x22ec003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce813275, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800007b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8380018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57b00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04343108, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2374007e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32a80003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800003e7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980104, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x49980104, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800003f2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf813279, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf41326e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01326d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x254c0700, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a641fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0726, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a640200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1237b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8813260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4240033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4280034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce40000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c01755, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9680000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce80000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xde830000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce80000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0174c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bb80040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100044, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19180024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x551c003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000043d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00c8000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840006c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28200000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000043f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x282000f0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000053, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x195c00e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc5e124dc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e624001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3255, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980158, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x49980158, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980170, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16200010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x195400e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1154000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18dc00e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05e80488, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18f807f0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e40077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18ec0199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000048e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000494, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800004de, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000685, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000686, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800006ac, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ccc001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1264000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d79400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e7a400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52a8001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15180001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aec0028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d325c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800004cc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc419324e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26e8003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d324d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d290004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f8f4001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f52800f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50e00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800004d1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d0dc002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e5e401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f534002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800004d7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3257, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213259, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52200002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x259c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05e804e3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800004e7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800004f0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000505, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640fff4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26edf000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05a80507, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000050c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000528, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000057d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800005c2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800005f3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1be00fe4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000066, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400068, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c0060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ed6c005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113271, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4153270, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193272, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3273, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d51401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213275, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253276, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400061, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2730000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7db1800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05dc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00062, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd000063, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000064, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400065, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b700057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b680213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x46ec0188, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26e01000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9c131fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x192007ec, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x69dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de20014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x561c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013344, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc13345, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800068, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2010007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2010003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9540000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013344, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013345, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180050, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0052, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280042, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813273, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc13275, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000068, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00124f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x46ec0190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4153249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2154003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bd800e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420004d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd413249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01326f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f598004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1be800e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce80005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801327a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800005f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424004c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41326e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41325e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xda000068, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9540002d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1be000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc63124dc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fc14001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000697, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25100007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31100005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900008e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a9feff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d30b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00ac006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00e0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28880700, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0006de, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x30d4000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41530b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19980028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800006c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8380023, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fa38011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd3800025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x202400d0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28240006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81a2a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000712, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05e80714, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000071c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000720, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000747, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000071d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800007c4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000732, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000745, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000744, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c0000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a64008c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b301fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c0fff1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000723, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41f02f1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000743, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c0ffde, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc8000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c004a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x195800e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418004c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81326e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc0005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dd7fff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e1a001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x46200200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04283247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1af80057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1af40213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f6f400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc6990000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x329c325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x329c3269, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x329c3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc01defff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d8009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000078a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25980000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fff2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03e7ff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f3f0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf013249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03e4000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d30b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bf0003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000b80, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x203c003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300700, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf0130b7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x46200008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x259c0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31980002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19580066, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0120001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11980003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da18001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d24db, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fff8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580137b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00ee000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113269, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19080070, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x190c00e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2510003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2518000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05a80809, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000080e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000080f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000898, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000946, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800009e1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04a80811, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000815, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000834, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3045, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1c091, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000241, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02f0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4252087, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5668001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a80005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00021d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c0005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001a41, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43b02f1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec80278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56f00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31100011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x950001fa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aec0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc01c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0180001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11a40006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de6000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x10e40008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e2e000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2110003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d10ff9e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0245301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801325f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0121fff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29108eff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0127ff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0131fff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801326d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801326e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013279, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0100010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd2400c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0180003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd1c002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04a8089a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000089e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800008fa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31300022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x964012a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02620c0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01c082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41c083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0260400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000903, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31240022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ec30011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32f80000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x67180001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0bfc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd981325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000915, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9c1325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0fff6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f818001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001606, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d838001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3259, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16240014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a2801f0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013259, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00075e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4af0228, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1330000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13f40014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07fc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33e80010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9680ffec, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04a80948, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000094c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000099b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9740002c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x964011fe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0260010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01c080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41c081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0260800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dda801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e838011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001802, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x469c0390, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4280011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0014df, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31280014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce8802ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800062, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31280034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04a809e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800009ec, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a45, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e72401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x66740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97400041, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04383000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4393267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b38007e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x4598001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf4002eb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf4002ec, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf4002ed, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf4002ee, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001715, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0ffbc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a55, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x233c0032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc130b6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf0130b6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49302ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5198001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193269, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2598000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013268, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53b8001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7db9801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000a5e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c01106, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c010fd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50640020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ce4c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc80c0072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x58e801fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18dc01e2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e5c0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9540000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x44cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55900020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4140011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x44cc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd812e01, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd012e02, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd412e03, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc410001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4140028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1e64001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14d00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ab1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a0010ac, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd880003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c0003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d403f7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41b0367, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d85800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d001fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05280adc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000af1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000adf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ae7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd8d2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d803f7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11940014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29544001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29544003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000af4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd44d2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd44dc000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d0003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd8d2c00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000b0a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd44d2c00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28148004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d800ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4593240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0105e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313255, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef3400c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14e80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a8000af, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c01043, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18a01fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3620005c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a00000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2464003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc6290ce7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16ac001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ac003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ee6c00d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a00fff8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000367, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640102e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x199c0037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19a00035, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c0005d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16f8001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9780000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc035f0ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e764009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19b401f8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ae4003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000b7c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19a4003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc01e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13fc0018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dbd800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d98ff15, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x592c00fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd80000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12e00016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x592c007e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12e00015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11a0000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1264001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1620000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12e4001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5924007e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013257, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd413258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00fdb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9780f5ca, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07740003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x269c003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e5e4004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f67000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f674002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ab8c006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000bec, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000b47, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03a8004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc415325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18580037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x262001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d54001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd280200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd680208, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcda80210, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b400014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc6930200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc6970208, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc69b0210, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd900003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd940003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9400040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14fc0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24f800ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33b80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d83c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24e000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x321c0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580ffee, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c30, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9480000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800f29, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800f23, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800f1a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9600f502, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c0f500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000f05, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16e4001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640f4f4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc434000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33740002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b40f4f1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12780001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bb80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00ac005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00e0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc8000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28884900, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ff3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400ee1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c40a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c40c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c40d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d0007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15580010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x255400ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01c411, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81c40f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41c40e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c410, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e80033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18ec0034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c414, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c415, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81c413, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18dc0032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c030011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c038011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431c417, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc435c416, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439c419, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43dc418, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf413261, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf013262, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13263, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf813264, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18dc0030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d77000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51b80020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f97801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f3b000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ca7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18dc0031, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc435c40b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9740fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4280032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000cf4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc032800b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d42011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24cc007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800e6c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x596001fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ce0c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x505c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc0001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e5e800c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x122c0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000d1f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000d57, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0328009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04143000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e51001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d2d0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19640057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19580213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19600199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da6400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1000025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04142000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d80034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05280d83, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000d8a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000db1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000dbc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e010001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d75400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580f3d8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x526c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e2ec01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5ae0073a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580f3c6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc3a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80fffb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980fff5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16200002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01c405, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd441c406, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580f3b1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29540002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580f3a5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00da7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5aac007e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12d80017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56a00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e82400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e58c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19d4003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x20880188, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x20240090, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28240004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf80003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd901a2a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd841325f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429325f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ac0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ac0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b301ff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400039, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0001a2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2220003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18fc0034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24e8000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80e71, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000edd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ea1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000eaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000e7c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000e87, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000e8f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9e001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213262, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253261, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213264, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253263, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e82005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da1801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1800072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8180072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x59a001fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81c400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421c401, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400041, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425c401, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ede, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db09001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db09011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc5a10000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc5a50000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05280eea, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f11, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f2e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f1f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0f26f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7daec01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5af8073a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eba800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0f25c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81c405, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01c406, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41c406, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0f24e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40f247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0f240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ef2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db09002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3db09012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc434000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b780001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf80001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c034001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c038001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01c080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41c081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f88, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51640020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e52401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01c081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1334000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24e02000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f63400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc1c083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000f9d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51e40020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e5a401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13380016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e00039, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12200019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1220001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf81c078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc1c084, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31140005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31140006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05280fb7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28140002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000fc2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000fd1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e80039, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52a8003b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140004b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9500000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x159c0011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x259800ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31a00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31a40001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e25800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c0fff5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580fff4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000fef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d100010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01326f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0340008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000ffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x208801a8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000102f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1cccfe08, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00b33, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da2400f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da28002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e1ac002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d2ac002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3ef40010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b40f11d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf81325e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xde410000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xde010000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c024001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8100086, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5510003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001075, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15800f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15c002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d520002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cde0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e20001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c0030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1325e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001071, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00b01, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc200000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc40003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4080029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18a400e5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12500009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x248c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x200c006d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x200c0228, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc410002b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18881fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d4072c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc00d1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3094000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x38d80000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x311c0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x30940007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1620001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000023, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800010c4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00041, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418002c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x259c007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19a00030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x199c0fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000023, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000aac, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07a810d8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000104c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc400040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x200c007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28240007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x192400fd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06681110, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19180070, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19100078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18f40058, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001117, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001118, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000112d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001130, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001133, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc81c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02a0200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e8e8009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x22a8003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x22a80074, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2774001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13740014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eb6800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25ecffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55700020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15f40010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13740002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x275c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15dc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39e00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dc1c01e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e62000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001165, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e1a0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e0d000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95000007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e02401e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06640008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05d80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dc2401e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05e00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da2000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9600ffe6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce00001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce81c078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1c080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41c082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x22640435, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0528117e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x312c0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001185, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03a0400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d81c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc130b7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0049, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19a000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29a80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de2c00c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc420007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce40003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800011a3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d654001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41326d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c020001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4240081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800011b6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253279, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2730003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3b380006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3f38000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0430000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb10004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e57000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e578002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d67c002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0be40001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d3a4002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x202c002c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e640010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce81325e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07a811cf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00feb8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x954009a7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1c07c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c07d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c08c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c079, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01c07e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18f0012f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18f40612, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc00c1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cf7400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x39600004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0140004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18fc003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400041, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800011ee, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a6c003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800011e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428002c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ac007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ab00030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aac0fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001205, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0fffa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03a2800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03a0800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03a4000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x30d00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000052, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640090f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19180038, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x30dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ab0c006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000127f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d3258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313257, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ab0c012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a0003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f67800f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0012e1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x964008d7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9800036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300677, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800012aa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03a8002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7edec00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4140032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1858003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d0cc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d407f0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2598003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d5d4001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d52000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d514002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193259, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d958001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd5c002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813259, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc1325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1ccc001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b40000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd980003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9c0003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9800040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd9c00040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800051, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b74003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50700020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04e81324, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d71401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x596401fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b74008d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a640000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000132c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000133b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001344, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42530b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a68003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2024003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25980700, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11980014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d19000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce4130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffea, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8240011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffe0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf81a2a4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c007eb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d0d001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x591c01fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45140210, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x595801fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11980009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29dc0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc0001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400069, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a307fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x23304076, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc00e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x10cc0015, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x4514020c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a2001e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a204001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a64003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15dc000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dcdc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e5dc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45140248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013257, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0434000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdb000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45540008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013259, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0337fff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f220009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55300020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d01c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c01d0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06ec0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f01c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8240072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd240001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19682011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5a6c01fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eeac00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfa0000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4380007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17b80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d40038, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9540073d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18c80066, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x30880001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x4220000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24e80007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24ec0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc5310000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001465, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18f02011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5aec01fc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a8146a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f1f0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f1b400c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f1b400d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f334002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97400014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000147b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b400012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e024001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000144a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fbfc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94800007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800014a9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0328007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03a0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd9c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45dc0390, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c428001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c430001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a0800fd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x109c000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce080228, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9880000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0ec75, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26180001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52a80020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x66580001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc80260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec80288, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf080290, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec80298, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf0802a0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf4802a8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc802b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd80802b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x178c000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b8003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cf8c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf8802c0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc802c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf8802d0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf8802d8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25b8ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24cc000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd2800c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc5230309, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e3a400c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001539, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd880353, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49b0353, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f0228, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd14005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000154f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd080238, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3d200008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd900309, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd910ce7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4190ce6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d918005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d918004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd810ce6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdd1054f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000156e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x090c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdcd050e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x110c0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc4001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41230a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41230b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41230c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc41230d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc480329, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc48032a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc4802e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09940001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x44100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580002c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x69100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000157f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4970290, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49b0288, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49b02a0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49f0298, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x041c0040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dcdc002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d924019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d26400c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0fffa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001579, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d010021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d914019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd480298, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd8802a0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x10d40010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12180016, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc51f0309, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d95800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d62000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdd00309, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce113320, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49b02b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18dc01e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd9400e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c0001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800015aa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4a302b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12240004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4ab02a8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce4c0319, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d9d8002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ea14005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800015bc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d25000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0fff4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd0d3330, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce0802b8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd8802b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4ab02e0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aa807f0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f02d0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49702d8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49b02c8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49f02c0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96800028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d4e000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9600000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d964002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cde4002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de94001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd64002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800015cd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d698002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd4802d8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x129c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc50f0319, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11a0000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11140001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1198000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd953300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e0e000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a8000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce953301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce100319, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b70280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73800a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x536c0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9780eb68, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001609, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x30b40000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b400011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b70258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53780020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb3801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7faf8019, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x67b40001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x57b80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4bb0260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fab8001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf880260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x66f40001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97400005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4353247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f7f4009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fff7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x269c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a00018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12200003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a00060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x269c0018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a00007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a40060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11dc0006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12200006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b70228, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f514005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001644, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd080240, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f130005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001688, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f130004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01051e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42d051f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ed2c005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96c0fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01051f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc5170309, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x195c07f0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x196007f6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04340001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x6b740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001665, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4a702a0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4ab0298, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f634014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8113320, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce480298, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce8802a0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc5170319, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b702b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x255c000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f5f4001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8113330, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf4802b0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11340001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x195c07e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x196007ee, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8353300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e1e4001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8353301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce4802d0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8100309, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc48f0250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd4c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x64d80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580005c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd2000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3255, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7df5c00c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25980040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800016f1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a7003e6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a700064, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800016df, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800016f2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18fc0064, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00042, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dd9801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4bf0258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53fc0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e7e401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x667c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eebc00c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fff8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x43300007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7db30011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd3000025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc03ec005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfca200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x203c003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0017f5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18fc01e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00185b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b40ffd5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0ea24, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14d4001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d52400e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49f0258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4a30250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400017, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d534002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4af0270, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dae4005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000174f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00178a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b40fff3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4ab0268, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7daa4005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32a0001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001765, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8013256, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c0017f2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b3034b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f13000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf013248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001855, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32a4001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd080260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce880268, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940ffc0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ec28001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253255, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431324f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e72400c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a80040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9680fff7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aa4003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400049, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aa400e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32680003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a800046, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4293260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1aa400e4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32640004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800017e2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc027ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2e6400ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a4009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a800ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4240009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19e403e6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26680003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12a80004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19e400e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19e40064, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06640003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a640003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800017d0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ea64002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4292083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ea68005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a80ffdf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26a400ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40ffca, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2024007b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800017e3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4a70280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4ab0278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7eae8014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce480278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce880280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43b02eb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42302ec, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf813245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fa3801a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x47b8020c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x15e00008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1220000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2a206032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x513c001e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e3e001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000180f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b3c0077, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ff3000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd200000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd3800002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dc30001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04380032, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf80000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc413248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3269, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27fc000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33fc0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0bfc0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd441326a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x173c0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300303, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f3f0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ff3c004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13084, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001842, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x23fc003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc1326d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0bb80026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd441326e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1fb8ffc6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xddc30000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc0000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001852, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc0000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49f02e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c00018, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c0012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41f02ed, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42302ee, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e2a0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013084, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x313c0bcc, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x393c051f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3d3c050e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x393c0560, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3d3c054f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x393c1538, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3d3c1537, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b740800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e8007c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a8189a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800018c5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800018f2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d0007e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09240002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc42130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a24002c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14cc0004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7cd8c00a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc130b7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce0130b5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bb80002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf800024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9600e8a8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9640e8a5, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800018a9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dad800c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0ffd2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580fff9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x442c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940fff1, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26240007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940fff7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc023007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19e4003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7de1c009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dee000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96000007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x261c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99c0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940fff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18e00064, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06281911, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24cc0003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001915, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x800019af, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001a2b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc48032b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc480333, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc48033b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc480343, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98800011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b3c0057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e3e000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04180000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f438001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00068, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213254, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a1c003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00065, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e1e0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97800062, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x43bc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fcbc001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc7df032b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e1fc00c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0101, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c0102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001994, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001982, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00ffcb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001995, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98800009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x41bc0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x53fc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e7fc011, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd3c00025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x653c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dbd8001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940ff8f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d91800c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580fff8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9580005d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400058, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95c00053, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e41c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a70003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33240003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a400046, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1a7000e4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001a21, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f270009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x266400ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27240003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06640002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001a0f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e730002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4252083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e724005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a40ffdf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x267000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001a22, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940ff9f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001a31, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b180057, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e1a000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x30f00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95800056, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001aa2, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001a90, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf00325b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001aa3, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99800005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd2400025, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x4664001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99800008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2b300008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf000013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x244c00ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc4c0200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc44f0200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc410000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc414000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d158010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x059cc000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccdd0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0037, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000049, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c003a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9500e69a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d0003b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d40021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd840004a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c003c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x14cc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c00028, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0120840, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x282c0040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001ae8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0121841, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x282c001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01c07c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9ac0fffb, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940e66b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800004a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0036, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9900fffe, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18cc0021, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc00047, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc000046, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0039, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c003d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24d003ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d47fea, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x18d87ff4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd00004c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd40004e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd80004d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41c405, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01c406, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x295c0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcdc0001a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11980002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x4110000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0160800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7d15000a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0164010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41c078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c080, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01c084, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400048, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c003b, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801c40a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd901c40d, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801c410, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801c40e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd801c40f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc40c0040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9940ffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04140096, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1c400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc411c401, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9500fffa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424003e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04d00001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x11100002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd01c40c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0180034, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd81c411, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd841c414, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0a540001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x2468000f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc419c416, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x41980003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc41c003f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7dda0001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x12200002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x10cc0002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xccc1c40c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd901c411, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce41c412, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xce292e40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc120000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x31144000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xcc3c000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x9780e601, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x188cfff0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x04e40002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x96400003, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80001b74, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI }, + { PwrCmdWrite, 0x54106500, mmCP_DFY_ADDR_LO }, + { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e020204, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc00a0505, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xbf8c007f, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb8900904, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb8911a04, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb8920304, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb8930b44, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x921c0d0c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x921c1c13, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x921d0c12, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x811c1d1c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x811c111c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x921cff1c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000400, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x921dff10, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000100, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x81181d1c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e040218, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI }, + { PwrCmdWrite, 0x54106900, mmCP_DFY_ADDR_LO }, + { PwrCmdWrite, 0x7e080200, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x7e100204, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xbefc00ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00010000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x24200087, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x262200ff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000001f0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x20222282, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x28182111, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI }, + { PwrCmdWrite, 0x54116f00, mmCP_DFY_ADDR_LO }, + { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb4540fe8, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000041, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000000c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54116f00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb454105e, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000c0, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54117300, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb4541065, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000500, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000001c, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54117700, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb4541069, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000444, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x0000008a, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x54117b00, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 }, + { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL }, + { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x54116f00, mmCP_MQD_BASE_ADDR }, + { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, + { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI }, + { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, + { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, + { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE }, + { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID }, + { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL }, + { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x54117300, mmCP_MQD_BASE_ADDR }, + { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, + { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI }, + { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, + { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, + { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE }, + { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID }, + { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL }, + { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x54117700, mmCP_MQD_BASE_ADDR }, + { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, + { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI }, + { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, + { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, + { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE }, + { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID }, + { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL }, + { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x54117b00, mmCP_MQD_BASE_ADDR }, + { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, + { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI }, + { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, + { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, + { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE }, + { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID }, + { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL }, + { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000104, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000204, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000304, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000404, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000504, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000604, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000704, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000105, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000205, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000305, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000405, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000505, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000605, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000705, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000106, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000206, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000306, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000406, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000506, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000606, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000706, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000107, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000207, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000307, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000407, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000507, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000607, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000707, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000008, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000108, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000208, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000308, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000408, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000508, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000608, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000708, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000009, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000109, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000209, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000309, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000409, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000509, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000609, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000709, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR }, + { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR }, + { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE }, + { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL }, + { PwrCmdWrite, 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdWrite, 0x00000000, mmGRBM_STATUS }, + { PwrCmdEnd, 0x00000000, 0x00000000 }, +}; + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h new file mode 100644 index 000000000000..91795efe1336 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -0,0 +1,385 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _HARDWARE_MANAGER_H_ +#define _HARDWARE_MANAGER_H_ + + + +struct pp_hwmgr; +struct pp_hw_power_state; +struct pp_power_state; +enum amd_dpm_forced_level; +struct PP_TemperatureRange; + +struct phm_fan_speed_info { + uint32_t min_percent; + uint32_t max_percent; + uint32_t min_rpm; + uint32_t max_rpm; + bool supports_percent_read; + bool supports_percent_write; + bool supports_rpm_read; + bool supports_rpm_write; +}; + +/* Automatic Power State Throttling */ +enum PHM_AutoThrottleSource +{ + PHM_AutoThrottleSource_Thermal, + PHM_AutoThrottleSource_External +}; + +typedef enum PHM_AutoThrottleSource PHM_AutoThrottleSource; + +enum phm_platform_caps { + PHM_PlatformCaps_AtomBiosPpV1 = 0, + PHM_PlatformCaps_PowerPlaySupport, + PHM_PlatformCaps_ACOverdriveSupport, + PHM_PlatformCaps_BacklightSupport, + PHM_PlatformCaps_ThermalController, + PHM_PlatformCaps_BiosPowerSourceControl, + PHM_PlatformCaps_DisableVoltageTransition, + PHM_PlatformCaps_DisableEngineTransition, + PHM_PlatformCaps_DisableMemoryTransition, + PHM_PlatformCaps_DynamicPowerManagement, + PHM_PlatformCaps_EnableASPML0s, + PHM_PlatformCaps_EnableASPML1, + PHM_PlatformCaps_OD5inACSupport, + PHM_PlatformCaps_OD5inDCSupport, + PHM_PlatformCaps_SoftStateOD5, + PHM_PlatformCaps_NoOD5Support, + PHM_PlatformCaps_ContinuousHardwarePerformanceRange, + PHM_PlatformCaps_ActivityReporting, + PHM_PlatformCaps_EnableBackbias, + PHM_PlatformCaps_OverdriveDisabledByPowerBudget, + PHM_PlatformCaps_ShowPowerBudgetWarning, + PHM_PlatformCaps_PowerBudgetWaiverAvailable, + PHM_PlatformCaps_GFXClockGatingSupport, + PHM_PlatformCaps_MMClockGatingSupport, + PHM_PlatformCaps_AutomaticDCTransition, + PHM_PlatformCaps_GeminiPrimary, + PHM_PlatformCaps_MemorySpreadSpectrumSupport, + PHM_PlatformCaps_EngineSpreadSpectrumSupport, + PHM_PlatformCaps_StepVddc, + PHM_PlatformCaps_DynamicPCIEGen2Support, + PHM_PlatformCaps_SMC, + PHM_PlatformCaps_FaultyInternalThermalReading, /* Internal thermal controller reports faulty temperature value when DAC2 is active */ + PHM_PlatformCaps_EnableVoltageControl, /* indicates voltage can be controlled */ + PHM_PlatformCaps_EnableSideportControl, /* indicates Sideport can be controlled */ + PHM_PlatformCaps_VideoPlaybackEEUNotification, /* indicates EEU notification of video start/stop is required */ + PHM_PlatformCaps_TurnOffPll_ASPML1, /* PCIE Turn Off PLL in ASPM L1 */ + PHM_PlatformCaps_EnableHTLinkControl, /* indicates HT Link can be controlled by ACPI or CLMC overrided/automated mode. */ + PHM_PlatformCaps_PerformanceStateOnly, /* indicates only performance power state to be used on current system. */ + PHM_PlatformCaps_ExclusiveModeAlwaysHigh, /* In Exclusive (3D) mode always stay in High state. */ + PHM_PlatformCaps_DisableMGClockGating, /* to disable Medium Grain Clock Gating or not */ + PHM_PlatformCaps_DisableMGCGTSSM, /* TO disable Medium Grain Clock Gating Shader Complex control */ + PHM_PlatformCaps_UVDAlwaysHigh, /* In UVD mode always stay in High state */ + PHM_PlatformCaps_DisablePowerGating, /* to disable power gating */ + PHM_PlatformCaps_CustomThermalPolicy, /* indicates only performance power state to be used on current system. */ + PHM_PlatformCaps_StayInBootState, /* Stay in Boot State, do not do clock/voltage or PCIe Lane and Gen switching (RV7xx and up). */ + PHM_PlatformCaps_SMCAllowSeparateSWThermalState, /* SMC use separate SW thermal state, instead of the default SMC thermal policy. */ + PHM_PlatformCaps_MultiUVDStateSupport, /* Powerplay state table supports multi UVD states. */ + PHM_PlatformCaps_EnableSCLKDeepSleepForUVD, /* With HW ECOs, we don't need to disable SCLK Deep Sleep for UVD state. */ + PHM_PlatformCaps_EnableMCUHTLinkControl, /* Enable HT link control by MCU */ + PHM_PlatformCaps_ABM, /* ABM support.*/ + PHM_PlatformCaps_KongThermalPolicy, /* A thermal policy specific for Kong */ + PHM_PlatformCaps_SwitchVDDNB, /* if the users want to switch VDDNB */ + PHM_PlatformCaps_ULPS, /* support ULPS mode either through ACPI state or ULPS state */ + PHM_PlatformCaps_NativeULPS, /* hardware capable of ULPS state (other than through the ACPI state) */ + PHM_PlatformCaps_EnableMVDDControl, /* indicates that memory voltage can be controlled */ + PHM_PlatformCaps_ControlVDDCI, /* Control VDDCI separately from VDDC. */ + PHM_PlatformCaps_DisableDCODT, /* indicates if DC ODT apply or not */ + PHM_PlatformCaps_DynamicACTiming, /* if the SMC dynamically re-programs MC SEQ register values */ + PHM_PlatformCaps_EnableThermalIntByGPIO, /* enable throttle control through GPIO */ + PHM_PlatformCaps_BootStateOnAlert, /* Go to boot state on alerts, e.g. on an AC->DC transition. */ + PHM_PlatformCaps_DontWaitForVBlankOnAlert, /* Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). */ + PHM_PlatformCaps_Force3DClockSupport, /* indicates if the platform supports force 3D clock. */ + PHM_PlatformCaps_MicrocodeFanControl, /* Fan is controlled by the SMC microcode. */ + PHM_PlatformCaps_AdjustUVDPriorityForSP, + PHM_PlatformCaps_DisableLightSleep, /* Light sleep for evergreen family. */ + PHM_PlatformCaps_DisableMCLS, /* MC Light sleep */ + PHM_PlatformCaps_RegulatorHot, /* Enable throttling on 'regulator hot' events. */ + PHM_PlatformCaps_BACO, /* Support Bus Alive Chip Off mode */ + PHM_PlatformCaps_DisableDPM, /* Disable DPM, supported from Llano */ + PHM_PlatformCaps_DynamicM3Arbiter, /* support dynamically change m3 arbitor parameters */ + PHM_PlatformCaps_SclkDeepSleep, /* support sclk deep sleep */ + PHM_PlatformCaps_DynamicPatchPowerState, /* this ASIC supports to patch power state dynamically */ + PHM_PlatformCaps_ThermalAutoThrottling, /* enabling auto thermal throttling, */ + PHM_PlatformCaps_SumoThermalPolicy, /* A thermal policy specific for Sumo */ + PHM_PlatformCaps_PCIEPerformanceRequest, /* support to change RC voltage */ + PHM_PlatformCaps_BLControlledByGPU, /* support varibright */ + PHM_PlatformCaps_PowerContainment, /* support DPM2 power containment (AKA TDP clamping) */ + PHM_PlatformCaps_SQRamping, /* support DPM2 SQ power throttle */ + PHM_PlatformCaps_CAC, /* support Capacitance * Activity power estimation */ + PHM_PlatformCaps_NIChipsets, /* Northern Island and beyond chipsets */ + PHM_PlatformCaps_TrinityChipsets, /* Trinity chipset */ + PHM_PlatformCaps_EvergreenChipsets, /* Evergreen family chipset */ + PHM_PlatformCaps_PowerControl, /* Cayman and beyond chipsets */ + PHM_PlatformCaps_DisableLSClockGating, /* to disable Light Sleep control for HDP memories */ + PHM_PlatformCaps_BoostState, /* this ASIC supports boost state */ + PHM_PlatformCaps_UserMaxClockForMultiDisplays, /* indicates if max memory clock is used for all status when multiple displays are connected */ + PHM_PlatformCaps_RegWriteDelay, /* indicates if back to back reg write delay is required */ + PHM_PlatformCaps_NonABMSupportInPPLib, /* ABM is not supported in PPLIB, (moved from PPLIB to DAL) */ + PHM_PlatformCaps_GFXDynamicMGPowerGating, /* Enable Dynamic MG PowerGating on Trinity */ + PHM_PlatformCaps_DisableSMUUVDHandshake, /* Disable SMU UVD Handshake */ + PHM_PlatformCaps_DTE, /* Support Digital Temperature Estimation */ + PHM_PlatformCaps_W5100Specifc_SmuSkipMsgDTE, /* This is for the feature requested by David B., and Tonny W.*/ + PHM_PlatformCaps_UVDPowerGating, /* enable UVD power gating, supported from Llano */ + PHM_PlatformCaps_UVDDynamicPowerGating, /* enable UVD Dynamic power gating, supported from UVD5 */ + PHM_PlatformCaps_VCEPowerGating, /* Enable VCE power gating, supported for TN and later ASICs */ + PHM_PlatformCaps_SamuPowerGating, /* Enable SAMU power gating, supported for KV and later ASICs */ + PHM_PlatformCaps_UVDDPM, /* UVD clock DPM */ + PHM_PlatformCaps_VCEDPM, /* VCE clock DPM */ + PHM_PlatformCaps_SamuDPM, /* SAMU clock DPM */ + PHM_PlatformCaps_AcpDPM, /* ACP clock DPM */ + PHM_PlatformCaps_SclkDeepSleepAboveLow, /* Enable SCLK Deep Sleep on all DPM states */ + PHM_PlatformCaps_DynamicUVDState, /* Dynamic UVD State */ + PHM_PlatformCaps_WantSAMClkWithDummyBackEnd, /* Set SAM Clk With Dummy Back End */ + PHM_PlatformCaps_WantUVDClkWithDummyBackEnd, /* Set UVD Clk With Dummy Back End */ + PHM_PlatformCaps_WantVCEClkWithDummyBackEnd, /* Set VCE Clk With Dummy Back End */ + PHM_PlatformCaps_WantACPClkWithDummyBackEnd, /* Set SAM Clk With Dummy Back End */ + PHM_PlatformCaps_OD6inACSupport, /* indicates that the ASIC/back end supports OD6 */ + PHM_PlatformCaps_OD6inDCSupport, /* indicates that the ASIC/back end supports OD6 in DC */ + PHM_PlatformCaps_EnablePlatformPowerManagement, /* indicates that Platform Power Management feature is supported */ + PHM_PlatformCaps_SurpriseRemoval, /* indicates that surprise removal feature is requested */ + PHM_PlatformCaps_NewCACVoltage, /* indicates new CAC voltage table support */ + PHM_PlatformCaps_DBRamping, /* for dI/dT feature */ + PHM_PlatformCaps_TDRamping, /* for dI/dT feature */ + PHM_PlatformCaps_TCPRamping, /* for dI/dT feature */ + PHM_PlatformCaps_EnableSMU7ThermalManagement, /* SMC will manage thermal events */ + PHM_PlatformCaps_FPS, /* FPS support */ + PHM_PlatformCaps_ACP, /* ACP support */ + PHM_PlatformCaps_SclkThrottleLowNotification, /* SCLK Throttle Low Notification */ + PHM_PlatformCaps_XDMAEnabled, /* XDMA engine is enabled */ + PHM_PlatformCaps_UseDummyBackEnd, /* use dummy back end */ + PHM_PlatformCaps_EnableDFSBypass, /* Enable DFS bypass */ + PHM_PlatformCaps_VddNBDirectRequest, + PHM_PlatformCaps_PauseMMSessions, + PHM_PlatformCaps_UnTabledHardwareInterface, /* Tableless/direct call hardware interface for CI and newer ASICs */ + PHM_PlatformCaps_SMU7, /* indicates that vpuRecoveryBegin without SMU shutdown */ + PHM_PlatformCaps_RevertGPIO5Polarity, /* indicates revert GPIO5 plarity table support */ + PHM_PlatformCaps_Thermal2GPIO17, /* indicates thermal2GPIO17 table support */ + PHM_PlatformCaps_ThermalOutGPIO, /* indicates ThermalOutGPIO support, pin number is assigned by VBIOS */ + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock, /* Disable memory clock switch during Framelock */ + PHM_PlatformCaps_VRHotGPIOConfigurable, /* indicates VR_HOT GPIO configurable */ + PHM_PlatformCaps_TempInversion, /* enable Temp Inversion feature */ + PHM_PlatformCaps_IOIC3, + PHM_PlatformCaps_ConnectedStandby, + PHM_PlatformCaps_EVV, + PHM_PlatformCaps_EnableLongIdleBACOSupport, + PHM_PlatformCaps_CombinePCCWithThermalSignal, + PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc, + PHM_PlatformCaps_StablePState, + PHM_PlatformCaps_OD6PlusinACSupport, + PHM_PlatformCaps_OD6PlusinDCSupport, + PHM_PlatformCaps_ODThermalLimitUnlock, + PHM_PlatformCaps_ReducePowerLimit, + PHM_PlatformCaps_ODFuzzyFanControlSupport, + PHM_PlatformCaps_GeminiRegulatorFanControlSupport, + PHM_PlatformCaps_ControlVDDGFX, + PHM_PlatformCaps_BBBSupported, + PHM_PlatformCaps_DisableVoltageIsland, + PHM_PlatformCaps_FanSpeedInTableIsRPM, + PHM_PlatformCaps_GFXClockGatingManagedInCAIL, + PHM_PlatformCaps_IcelandULPSSWWorkAround, + PHM_PlatformCaps_FPSEnhancement, + PHM_PlatformCaps_LoadPostProductionFirmware, + PHM_PlatformCaps_VpuRecoveryInProgress, + PHM_PlatformCaps_Falcon_QuickTransition, + PHM_PlatformCaps_AVFS, + PHM_PlatformCaps_ClockStretcher, + PHM_PlatformCaps_TablelessHardwareInterface, + PHM_PlatformCaps_EnableDriverEVV, + PHM_PlatformCaps_Max +}; + +#define PHM_MAX_NUM_CAPS_BITS_PER_FIELD (sizeof(uint32_t)*8) + +/* Number of uint32_t entries used by CAPS table */ +#define PHM_MAX_NUM_CAPS_ULONG_ENTRIES \ + ((PHM_PlatformCaps_Max + ((PHM_MAX_NUM_CAPS_BITS_PER_FIELD) - 1)) / (PHM_MAX_NUM_CAPS_BITS_PER_FIELD)) + +struct pp_hw_descriptor { + uint32_t hw_caps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES]; +}; + +enum PHM_PerformanceLevelDesignation { + PHM_PerformanceLevelDesignation_Activity, + PHM_PerformanceLevelDesignation_PowerContainment +}; + +typedef enum PHM_PerformanceLevelDesignation PHM_PerformanceLevelDesignation; + +struct PHM_PerformanceLevel { + uint32_t coreClock; + uint32_t memory_clock; + uint32_t vddc; + uint32_t vddci; + uint32_t nonLocalMemoryFreq; + uint32_t nonLocalMemoryWidth; +}; + +typedef struct PHM_PerformanceLevel PHM_PerformanceLevel; + +/* Function for setting a platform cap */ +static inline void phm_cap_set(uint32_t *caps, + enum phm_platform_caps c) +{ + caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] |= (1UL << + (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))); +} + +static inline void phm_cap_unset(uint32_t *caps, + enum phm_platform_caps c) +{ + caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] &= ~(1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))); +} + +static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps c) +{ + return (0 != (caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] & + (1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))))); +} + +#define PP_PCIEGenInvalid 0xffff +enum PP_PCIEGen { + PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */ + PP_PCIEGen2, /*PCIE 2.0 - Transfer rate of 5.0 GT/s */ + PP_PCIEGen3 /*PCIE 3.0 - Transfer rate of 8.0 GT/s */ +}; + +typedef enum PP_PCIEGen PP_PCIEGen; + +#define PP_Min_PCIEGen PP_PCIEGen1 +#define PP_Max_PCIEGen PP_PCIEGen3 +#define PP_Min_PCIELane 1 +#define PP_Max_PCIELane 32 + +enum phm_clock_Type { + PHM_DispClock = 1, + PHM_SClock, + PHM_MemClock +}; + +#define MAX_NUM_CLOCKS 16 + +struct PP_Clocks { + uint32_t engineClock; + uint32_t memoryClock; + uint32_t BusBandwidth; + uint32_t engineClockInSR; +}; + +struct phm_platform_descriptor { + uint32_t platformCaps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES]; + uint32_t vbiosInterruptId; + struct PP_Clocks overdriveLimit; + struct PP_Clocks clockStep; + uint32_t hardwareActivityPerformanceLevels; + uint32_t minimumClocksReductionPercentage; + uint32_t minOverdriveVDDC; + uint32_t maxOverdriveVDDC; + uint32_t overdriveVDDCStep; + uint32_t hardwarePerformanceLevels; + uint16_t powerBudget; + uint32_t TDPLimit; + uint32_t nearTDPLimit; + uint32_t nearTDPLimitAdjusted; + uint32_t SQRampingThreshold; + uint32_t CACLeakage; + uint16_t TDPODLimit; + uint32_t TDPAdjustment; + bool TDPAdjustmentPolarity; + uint16_t LoadLineSlope; + uint32_t VidMinLimit; + uint32_t VidMaxLimit; + uint32_t VidStep; + uint32_t VidAdjustment; + bool VidAdjustmentPolarity; +}; + +struct phm_clocks { + uint32_t num_of_entries; + uint32_t clock[MAX_NUM_CLOCKS]; +}; + +enum PP_DAL_POWERLEVEL { + PP_DAL_POWERLEVEL_INVALID = 0, + PP_DAL_POWERLEVEL_ULTRALOW, + PP_DAL_POWERLEVEL_LOW, + PP_DAL_POWERLEVEL_NOMINAL, + PP_DAL_POWERLEVEL_PERFORMANCE, + + PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW, + PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW, + PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL, + PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE, + PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1, + PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1, + PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1, + PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1, +}; + + +extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr); +extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate); +extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate); +extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); +extern int phm_setup_asic(struct pp_hwmgr *hwmgr); +extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); +extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr); +extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr); +extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block); +extern int phm_set_power_state(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *pcurrent_state, + const struct pp_hw_power_state *pnew_power_state); + +extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *adjusted_ps, + const struct pp_power_state *current_ps); + +extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level); +extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr); +extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); +extern int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info); +extern int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range); +extern int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr); + +extern int phm_check_states_equal(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *pstate1, + const struct pp_hw_power_state *pstate2, + bool *equal); + +extern int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, + const struct amd_pp_display_configuration *display_config); + +extern int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, + struct amd_pp_dal_clock_info*info); + +extern int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr); + +extern int phm_power_down_asic(struct pp_hwmgr *hwmgr); + +#endif /* _HARDWARE_MANAGER_H_ */ + diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h new file mode 100644 index 000000000000..aeaa3dbba525 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -0,0 +1,801 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _HWMGR_H_ +#define _HWMGR_H_ + +#include +#include "amd_powerplay.h" +#include "pp_instance.h" +#include "hardwaremanager.h" +#include "pp_power_source.h" +#include "hwmgr_ppt.h" +#include "ppatomctrl.h" +#include "hwmgr_ppt.h" + +struct pp_instance; +struct pp_hwmgr; +struct pp_hw_power_state; +struct pp_power_state; +struct PP_VCEState; +struct phm_fan_speed_info; +struct pp_atomctrl_voltage_table; + + +enum DISPLAY_GAP { + DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ + DISPLAY_GAP_VBLANK = 1, /* Wait for vblank. */ + DISPLAY_GAP_WATERMARK = 2, /* Wait for MCHG watermark. (Note that HW may deassert WM in VBI depending on DC_STUTTER_CNTL.) */ + DISPLAY_GAP_IGNORE = 3 /* Do not wait. */ +}; +typedef enum DISPLAY_GAP DISPLAY_GAP; + + +struct vi_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; + +struct vi_dpm_table { + uint32_t count; + struct vi_dpm_level dpm_level[1]; +}; + +enum PP_Result { + PP_Result_TableImmediateExit = 0x13, +}; + +#define PCIE_PERF_REQ_REMOVE_REGISTRY 0 +#define PCIE_PERF_REQ_FORCE_LOWPOWER 1 +#define PCIE_PERF_REQ_GEN1 2 +#define PCIE_PERF_REQ_GEN2 3 +#define PCIE_PERF_REQ_GEN3 4 + +enum PHM_BackEnd_Magic { + PHM_Dummy_Magic = 0xAA5555AA, + PHM_RV770_Magic = 0xDCBAABCD, + PHM_Kong_Magic = 0x239478DF, + PHM_NIslands_Magic = 0x736C494E, + PHM_Sumo_Magic = 0x8339FA11, + PHM_SIslands_Magic = 0x369431AC, + PHM_Trinity_Magic = 0x96751873, + PHM_CIslands_Magic = 0x38AC78B0, + PHM_Kv_Magic = 0xDCBBABC0, + PHM_VIslands_Magic = 0x20130307, + PHM_Cz_Magic = 0x67DCBA25 +}; + + +#define PHM_PCIE_POWERGATING_TARGET_GFX 0 +#define PHM_PCIE_POWERGATING_TARGET_DDI 1 +#define PHM_PCIE_POWERGATING_TARGET_PLLCASCADE 2 +#define PHM_PCIE_POWERGATING_TARGET_PHY 3 + +typedef int (*phm_table_function)(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result); + +typedef bool (*phm_check_function)(struct pp_hwmgr *hwmgr); + +struct phm_set_power_state_input { + const struct pp_hw_power_state *pcurrent_state; + const struct pp_hw_power_state *pnew_state; +}; + +struct phm_acp_arbiter { + uint32_t acpclk; +}; + +struct phm_uvd_arbiter { + uint32_t vclk; + uint32_t dclk; + uint32_t vclk_ceiling; + uint32_t dclk_ceiling; +}; + +struct phm_vce_arbiter { + uint32_t evclk; + uint32_t ecclk; +}; + +struct phm_gfx_arbiter { + uint32_t sclk; + uint32_t mclk; + uint32_t sclk_over_drive; + uint32_t mclk_over_drive; + uint32_t sclk_threshold; + uint32_t num_cus; +}; + +/* Entries in the master tables */ +struct phm_master_table_item { + phm_check_function isFunctionNeededInRuntimeTable; + phm_table_function tableFunction; +}; + +enum phm_master_table_flag { + PHM_MasterTableFlag_None = 0, + PHM_MasterTableFlag_ExitOnError = 1, +}; + +/* The header of the master tables */ +struct phm_master_table_header { + uint32_t storage_size; + uint32_t flags; + struct phm_master_table_item *master_list; +}; + +struct phm_runtime_table_header { + uint32_t storage_size; + bool exit_error; + phm_table_function *function_list; +}; + +struct phm_clock_array { + uint32_t count; + uint32_t values[1]; +}; + +struct phm_clock_voltage_dependency_record { + uint32_t clk; + uint32_t v; +}; + +struct phm_vceclock_voltage_dependency_record { + uint32_t ecclk; + uint32_t evclk; + uint32_t v; +}; + +struct phm_uvdclock_voltage_dependency_record { + uint32_t vclk; + uint32_t dclk; + uint32_t v; +}; + +struct phm_samuclock_voltage_dependency_record { + uint32_t samclk; + uint32_t v; +}; + +struct phm_acpclock_voltage_dependency_record { + uint32_t acpclk; + uint32_t v; +}; + +struct phm_clock_voltage_dependency_table { + uint32_t count; /* Number of entries. */ + struct phm_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +struct phm_phase_shedding_limits_record { + uint32_t Voltage; + uint32_t Sclk; + uint32_t Mclk; +}; + + +extern int phm_dispatch_table(struct pp_hwmgr *hwmgr, + struct phm_runtime_table_header *rt_table, + void *input, void *output); + +extern int phm_construct_table(struct pp_hwmgr *hwmgr, + struct phm_master_table_header *master_table, + struct phm_runtime_table_header *rt_table); + +extern int phm_destroy_table(struct pp_hwmgr *hwmgr, + struct phm_runtime_table_header *rt_table); + + +struct phm_uvd_clock_voltage_dependency_record { + uint32_t vclk; + uint32_t dclk; + uint32_t v; +}; + +struct phm_uvd_clock_voltage_dependency_table { + uint8_t count; + struct phm_uvd_clock_voltage_dependency_record entries[1]; +}; + +struct phm_acp_clock_voltage_dependency_record { + uint32_t acpclk; + uint32_t v; +}; + +struct phm_acp_clock_voltage_dependency_table { + uint32_t count; + struct phm_acp_clock_voltage_dependency_record entries[1]; +}; + +struct phm_vce_clock_voltage_dependency_record { + uint32_t ecclk; + uint32_t evclk; + uint32_t v; +}; + +struct phm_phase_shedding_limits_table { + uint32_t count; + struct phm_phase_shedding_limits_record entries[1]; +}; + +struct phm_vceclock_voltage_dependency_table { + uint8_t count; /* Number of entries. */ + struct phm_vceclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +struct phm_uvdclock_voltage_dependency_table { + uint8_t count; /* Number of entries. */ + struct phm_uvdclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +struct phm_samuclock_voltage_dependency_table { + uint8_t count; /* Number of entries. */ + struct phm_samuclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +struct phm_acpclock_voltage_dependency_table { + uint32_t count; /* Number of entries. */ + struct phm_acpclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +struct phm_vce_clock_voltage_dependency_table { + uint8_t count; + struct phm_vce_clock_voltage_dependency_record entries[1]; +}; + +struct pp_hwmgr_func { + int (*backend_init)(struct pp_hwmgr *hw_mgr); + int (*backend_fini)(struct pp_hwmgr *hw_mgr); + int (*asic_setup)(struct pp_hwmgr *hw_mgr); + int (*get_power_state_size)(struct pp_hwmgr *hw_mgr); + + int (*apply_state_adjust_rules)(struct pp_hwmgr *hwmgr, + struct pp_power_state *prequest_ps, + const struct pp_power_state *pcurrent_ps); + + int (*force_dpm_level)(struct pp_hwmgr *hw_mgr, + enum amd_dpm_forced_level level); + + int (*dynamic_state_management_enable)( + struct pp_hwmgr *hw_mgr); + + int (*patch_boot_state)(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps); + + int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr, + unsigned long, struct pp_power_state *); + int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr); + int (*powerdown_uvd)(struct pp_hwmgr *hwmgr); + int (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); + int (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); + int (*get_mclk)(struct pp_hwmgr *hwmgr, bool low); + int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); + int (*power_state_set)(struct pp_hwmgr *hwmgr, + const void *state); + void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr, + struct seq_file *m); + int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); + int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); + int (*display_config_changed)(struct pp_hwmgr *hwmgr); + int (*disable_clock_power_gating)(struct pp_hwmgr *hwmgr); + int (*update_clock_gatings)(struct pp_hwmgr *hwmgr, + const uint32_t *msg_id); + int (*set_max_fan_rpm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm); + int (*set_max_fan_pwm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm); + int (*get_temperature)(struct pp_hwmgr *hwmgr); + int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr); + int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); + int (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode); + int (*get_fan_control_mode)(struct pp_hwmgr *hwmgr); + int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent); + int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed); + int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent); + int (*get_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t *speed); + int (*reset_fan_speed_to_default)(struct pp_hwmgr *hwmgr); + int (*uninitialize_thermal_controller)(struct pp_hwmgr *hwmgr); + int (*register_internal_thermal_interrupt)(struct pp_hwmgr *hwmgr, + const void *thermal_interrupt_info); + bool (*check_smc_update_required_for_display_configuration)(struct pp_hwmgr *hwmgr); + int (*check_states_equal)(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *pstate1, + const struct pp_hw_power_state *pstate2, + bool *equal); + int (*set_cpu_power_state)(struct pp_hwmgr *hwmgr); + int (*store_cc6_data)(struct pp_hwmgr *hwmgr, uint32_t separation_time, + bool cc6_disable, bool pstate_disable, + bool pstate_switch_disable); + int (*get_dal_power_level)(struct pp_hwmgr *hwmgr, + struct amd_pp_dal_clock_info *info); + int (*power_off_asic)(struct pp_hwmgr *hwmgr); +}; + +struct pp_table_func { + int (*pptable_init)(struct pp_hwmgr *hw_mgr); + int (*pptable_fini)(struct pp_hwmgr *hw_mgr); + int (*pptable_get_number_of_vce_state_table_entries)(struct pp_hwmgr *hw_mgr); + int (*pptable_get_vce_state_table_entry)( + struct pp_hwmgr *hwmgr, + unsigned long i, + struct PP_VCEState *vce_state, + void **clock_info, + unsigned long *flag); +}; + +union phm_cac_leakage_record { + struct { + uint16_t Vddc; /* in CI, we use it for StdVoltageHiSidd */ + uint32_t Leakage; /* in CI, we use it for StdVoltageLoSidd */ + }; + struct { + uint16_t Vddc1; + uint16_t Vddc2; + uint16_t Vddc3; + }; +}; + +struct phm_cac_leakage_table { + uint32_t count; + union phm_cac_leakage_record entries[1]; +}; + +struct phm_samu_clock_voltage_dependency_record { + uint32_t samclk; + uint32_t v; +}; + + +struct phm_samu_clock_voltage_dependency_table { + uint8_t count; + struct phm_samu_clock_voltage_dependency_record entries[1]; +}; + +struct phm_cac_tdp_table { + uint16_t usTDP; + uint16_t usConfigurableTDP; + uint16_t usTDC; + uint16_t usBatteryPowerLimit; + uint16_t usSmallPowerLimit; + uint16_t usLowCACLeakage; + uint16_t usHighCACLeakage; + uint16_t usMaximumPowerDeliveryLimit; + uint16_t usOperatingTempMinLimit; + uint16_t usOperatingTempMaxLimit; + uint16_t usOperatingTempStep; + uint16_t usOperatingTempHyst; + uint16_t usDefaultTargetOperatingTemp; + uint16_t usTargetOperatingTemp; + uint16_t usPowerTuneDataSetID; + uint16_t usSoftwareShutdownTemp; + uint16_t usClockStretchAmount; + uint16_t usTemperatureLimitHotspot; + uint16_t usTemperatureLimitLiquid1; + uint16_t usTemperatureLimitLiquid2; + uint16_t usTemperatureLimitVrVddc; + uint16_t usTemperatureLimitVrMvdd; + uint16_t usTemperatureLimitPlx; + uint8_t ucLiquid1_I2C_address; + uint8_t ucLiquid2_I2C_address; + uint8_t ucLiquid_I2C_Line; + uint8_t ucVr_I2C_address; + uint8_t ucVr_I2C_Line; + uint8_t ucPlx_I2C_address; + uint8_t ucPlx_I2C_Line; +}; + +struct phm_ppm_table { + uint8_t ppm_design; + uint16_t cpu_core_number; + uint32_t platform_tdp; + uint32_t small_ac_platform_tdp; + uint32_t platform_tdc; + uint32_t small_ac_platform_tdc; + uint32_t apu_tdp; + uint32_t dgpu_tdp; + uint32_t dgpu_ulv_power; + uint32_t tj_max; +}; + +struct phm_vq_budgeting_record { + uint32_t ulCUs; + uint32_t ulSustainableSOCPowerLimitLow; + uint32_t ulSustainableSOCPowerLimitHigh; + uint32_t ulMinSclkLow; + uint32_t ulMinSclkHigh; + uint8_t ucDispConfig; + uint32_t ulDClk; + uint32_t ulEClk; + uint32_t ulSustainableSclk; + uint32_t ulSustainableCUs; +}; + +struct phm_vq_budgeting_table { + uint8_t numEntries; + struct phm_vq_budgeting_record entries[1]; +}; + +struct phm_clock_and_voltage_limits { + uint32_t sclk; + uint32_t mclk; + uint16_t vddc; + uint16_t vddci; + uint16_t vddgfx; +}; + +/* Structure to hold PPTable information */ + +struct phm_ppt_v1_information { + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_mclk; + struct phm_clock_array *valid_sclk_values; + struct phm_clock_array *valid_mclk_values; + struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; + struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; + struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl; + struct phm_ppm_table *ppm_parameter_table; + struct phm_cac_tdp_table *cac_dtp_table; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_dep_table; + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table; + struct phm_ppt_v1_pcie_table *pcie_table; + uint16_t us_ulv_voltage_offset; +}; + +struct phm_dynamic_state_info { + struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *vddc_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *mvdd_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl; + struct phm_clock_array *valid_sclk_values; + struct phm_clock_array *valid_mclk_values; + struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; + struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; + uint32_t mclk_sclk_ratio; + uint32_t sclk_mclk_delta; + uint32_t vddc_vddci_delta; + uint32_t min_vddc_for_pcie_gen2; + struct phm_cac_leakage_table *cac_leakage_table; + struct phm_phase_shedding_limits_table *vddc_phase_shed_limits_table; + + struct phm_vce_clock_voltage_dependency_table + *vce_clock_voltage_dependency_table; + struct phm_uvd_clock_voltage_dependency_table + *uvd_clock_voltage_dependency_table; + struct phm_acp_clock_voltage_dependency_table + *acp_clock_voltage_dependency_table; + struct phm_samu_clock_voltage_dependency_table + *samu_clock_voltage_dependency_table; + + struct phm_ppm_table *ppm_parameter_table; + struct phm_cac_tdp_table *cac_dtp_table; + struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk; + struct phm_vq_budgeting_table *vq_budgeting_table; +}; + +struct pp_fan_info { + bool bNoFan; + uint8_t ucTachometerPulsesPerRevolution; + uint32_t ulMinRPM; + uint32_t ulMaxRPM; +}; + +struct pp_advance_fan_control_parameters { + uint16_t usTMin; /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */ + uint16_t usTMed; /* The middle temperature where we change slopes. */ + uint16_t usTHigh; /* The high temperature for setting the second slope. */ + uint16_t usPWMMin; /* The minimum PWM value in percent (0.01% increments). */ + uint16_t usPWMMed; /* The PWM value (in percent) at TMed. */ + uint16_t usPWMHigh; /* The PWM value at THigh. */ + uint8_t ucTHyst; /* Temperature hysteresis. Integer. */ + uint32_t ulCycleDelay; /* The time between two invocations of the fan control routine in microseconds. */ + uint16_t usTMax; /* The max temperature */ + uint8_t ucFanControlMode; + uint16_t usFanPWMMinLimit; + uint16_t usFanPWMMaxLimit; + uint16_t usFanPWMStep; + uint16_t usDefaultMaxFanPWM; + uint16_t usFanOutputSensitivity; + uint16_t usDefaultFanOutputSensitivity; + uint16_t usMaxFanPWM; /* The max Fan PWM value for Fuzzy Fan Control feature */ + uint16_t usFanRPMMinLimit; /* Minimum limit range in percentage, need to calculate based on minRPM/MaxRpm */ + uint16_t usFanRPMMaxLimit; /* Maximum limit range in percentage, usually set to 100% by default */ + uint16_t usFanRPMStep; /* Step increments/decerements, in percent */ + uint16_t usDefaultMaxFanRPM; /* The max Fan RPM value for Fuzzy Fan Control feature, default from PPTable */ + uint16_t usMaxFanRPM; /* The max Fan RPM value for Fuzzy Fan Control feature, user defined */ + uint16_t usFanCurrentLow; /* Low current */ + uint16_t usFanCurrentHigh; /* High current */ + uint16_t usFanRPMLow; /* Low RPM */ + uint16_t usFanRPMHigh; /* High RPM */ + uint32_t ulMinFanSCLKAcousticLimit; /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */ + uint8_t ucTargetTemperature; /* Advanced fan controller target temperature. */ + uint8_t ucMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. This should be set to the highest PWM that will run the fan at its lowest RPM. */ + uint16_t usFanGainEdge; /* The following is added for Fiji */ + uint16_t usFanGainHotspot; + uint16_t usFanGainLiquid; + uint16_t usFanGainVrVddc; + uint16_t usFanGainVrMvdd; + uint16_t usFanGainPlx; + uint16_t usFanGainHbm; +}; + +struct pp_thermal_controller_info { + uint8_t ucType; + uint8_t ucI2cLine; + uint8_t ucI2cAddress; + struct pp_fan_info fanInfo; + struct pp_advance_fan_control_parameters advanceFanControlParameters; +}; + +struct phm_microcode_version_info { + uint32_t SMC; + uint32_t DMCU; + uint32_t MC; + uint32_t NB; +}; + +/** + * The main hardware manager structure. + */ +struct pp_hwmgr { + uint32_t chip_family; + uint32_t chip_id; + uint32_t hw_revision; + uint32_t sub_sys_id; + uint32_t sub_vendor_id; + + void *device; + struct pp_smumgr *smumgr; + const void *soft_pp_table; + bool need_pp_table_upload; + enum amd_dpm_forced_level dpm_level; + bool block_hw_access; + struct phm_gfx_arbiter gfx_arbiter; + struct phm_acp_arbiter acp_arbiter; + struct phm_uvd_arbiter uvd_arbiter; + struct phm_vce_arbiter vce_arbiter; + uint32_t usec_timeout; + void *pptable; + struct phm_platform_descriptor platform_descriptor; + void *backend; + enum PP_DAL_POWERLEVEL dal_power_level; + struct phm_dynamic_state_info dyn_state; + struct phm_runtime_table_header setup_asic; + struct phm_runtime_table_header power_down_asic; + struct phm_runtime_table_header disable_dynamic_state_management; + struct phm_runtime_table_header enable_dynamic_state_management; + struct phm_runtime_table_header set_power_state; + struct phm_runtime_table_header enable_clock_power_gatings; + struct phm_runtime_table_header display_configuration_changed; + struct phm_runtime_table_header start_thermal_controller; + struct phm_runtime_table_header set_temperature_range; + const struct pp_hwmgr_func *hwmgr_func; + const struct pp_table_func *pptable_func; + struct pp_power_state *ps; + enum pp_power_source power_source; + uint32_t num_ps; + struct pp_thermal_controller_info thermal_controller; + bool fan_ctrl_is_in_default_mode; + uint32_t fan_ctrl_default_mode; + uint32_t tmin; + struct phm_microcode_version_info microcode_version_info; + uint32_t ps_size; + struct pp_power_state *current_ps; + struct pp_power_state *request_ps; + struct pp_power_state *boot_ps; + struct pp_power_state *uvd_ps; + struct amd_pp_display_configuration display_config; +}; + + +extern int hwmgr_init(struct amd_pp_init *pp_init, + struct pp_instance *handle); + +extern int hwmgr_fini(struct pp_hwmgr *hwmgr); + +extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr); + +extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, + uint32_t value, uint32_t mask); + +extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, + uint32_t index, uint32_t value, uint32_t mask); + +extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr, + uint32_t indirect_port, uint32_t index); + +extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr, + uint32_t indirect_port, + uint32_t index, + uint32_t value); + +extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, + uint32_t indirect_port, + uint32_t index, + uint32_t value, + uint32_t mask); + +extern void phm_wait_for_indirect_register_unequal( + struct pp_hwmgr *hwmgr, + uint32_t indirect_port, + uint32_t index, + uint32_t value, + uint32_t mask); + +extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr); +extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr); +extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr); + +extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table); +extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table); +extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table); +extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table); +extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table); +extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max); +extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes); +extern int32_t phm_get_dpm_level_enable_mask_value(void *table); +extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage); +extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci); +extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level); +extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table, + uint16_t virtual_voltage_id, int32_t *sclk); +extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); +extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); +extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); + + +#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU + +#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT +#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK + +#define PHM_SET_FIELD(origval, reg, field, fieldval) \ + (((origval) & ~PHM_FIELD_MASK(reg, field)) | \ + (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field)))) + +#define PHM_GET_FIELD(value, reg, field) \ + (((value) & PHM_FIELD_MASK(reg, field)) >> \ + PHM_FIELD_SHIFT(reg, field)) + + +#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask) \ + phm_wait_on_register(hwmgr, index, value, mask) + +#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask) \ + phm_wait_for_register_unequal(hwmgr, index, value, mask) + +#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask) + +#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask) + +/* Operations on named registers. */ + +#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask) \ + PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask) + +#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \ + PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask) + +#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ + PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +/* Operations on named fields. */ + +#define PHM_READ_FIELD(device, reg, field) \ + PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field) + +#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \ + PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field) + +#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \ + PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field) + +#define PHM_WRITE_FIELD(device, reg, field, fieldval) \ + cgs_write_register(device, mm##reg, PHM_SET_FIELD( \ + cgs_read_register(device, mm##reg), reg, field, fieldval)) + +#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \ + cgs_write_ind_register(device, port, ix##reg, \ + PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field, fieldval)) + +#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \ + cgs_write_ind_register(device, port, ix##reg, \ + PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field, fieldval)) + +#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval) \ + PHM_WAIT_REGISTER(hwmgr, reg, (fieldval) \ + << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) + +#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ + << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) + +#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ + << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) + +#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \ + PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval) \ + << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) + +#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ + << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) + +#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ + << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) + +/* Operations on arrays of registers & fields. */ + +#define PHM_READ_ARRAY_REGISTER(device, reg, offset) \ + cgs_read_register(device, mm##reg + (offset)) + +#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value) \ + cgs_write_register(device, mm##reg + (offset), value) + +#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask) \ + PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) + +#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask) \ + PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) + +#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \ + PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field) + +#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ + PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset, \ + PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), \ + reg, field, fieldvalue)) + +#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ + PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ + (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field)) + +#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue) \ + PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ + (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field)) + +#endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h new file mode 100644 index 000000000000..a3f0ce4d5835 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h @@ -0,0 +1,200 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_POWERSTATE_H +#define PP_POWERSTATE_H + +struct pp_hw_power_state { + unsigned int magic; +}; + +struct pp_power_state; + + +#define PP_INVALID_POWER_STATE_ID (0) + + +/* + * An item of a list containing Power States. + */ + +struct PP_StateLinkedList { + struct pp_power_state *next; + struct pp_power_state *prev; +}; + + +enum PP_StateUILabel { + PP_StateUILabel_None, + PP_StateUILabel_Battery, + PP_StateUILabel_MiddleLow, + PP_StateUILabel_Balanced, + PP_StateUILabel_MiddleHigh, + PP_StateUILabel_Performance, + PP_StateUILabel_BACO +}; + +enum PP_StateClassificationFlag { + PP_StateClassificationFlag_Boot = 0x0001, + PP_StateClassificationFlag_Thermal = 0x0002, + PP_StateClassificationFlag_LimitedPowerSource = 0x0004, + PP_StateClassificationFlag_Rest = 0x0008, + PP_StateClassificationFlag_Forced = 0x0010, + PP_StateClassificationFlag_User3DPerformance = 0x0020, + PP_StateClassificationFlag_User2DPerformance = 0x0040, + PP_StateClassificationFlag_3DPerformance = 0x0080, + PP_StateClassificationFlag_ACOverdriveTemplate = 0x0100, + PP_StateClassificationFlag_Uvd = 0x0200, + PP_StateClassificationFlag_3DPerformanceLow = 0x0400, + PP_StateClassificationFlag_ACPI = 0x0800, + PP_StateClassificationFlag_HD2 = 0x1000, + PP_StateClassificationFlag_UvdHD = 0x2000, + PP_StateClassificationFlag_UvdSD = 0x4000, + PP_StateClassificationFlag_UserDCPerformance = 0x8000, + PP_StateClassificationFlag_DCOverdriveTemplate = 0x10000, + PP_StateClassificationFlag_BACO = 0x20000, + PP_StateClassificationFlag_LimitedPowerSource_2 = 0x40000, + PP_StateClassificationFlag_ULV = 0x80000, + PP_StateClassificationFlag_UvdMVC = 0x100000, +}; + +typedef unsigned int PP_StateClassificationFlags; + +struct PP_StateClassificationBlock { + enum PP_StateUILabel ui_label; + enum PP_StateClassificationFlag flags; + int bios_index; + bool temporary_state; + bool to_be_deleted; +}; + +struct PP_StatePcieBlock { + unsigned int lanes; +}; + +enum PP_RefreshrateSource { + PP_RefreshrateSource_EDID, + PP_RefreshrateSource_Explicit +}; + +struct PP_StateDisplayBlock { + bool disableFrameModulation; + bool limitRefreshrate; + enum PP_RefreshrateSource refreshrateSource; + int explicitRefreshrate; + int edidRefreshrateIndex; + bool enableVariBright; +}; + +struct PP_StateMemroyBlock { + bool dllOff; + uint8_t m3arb; + uint8_t unused[3]; +}; + +struct PP_StateSoftwareAlgorithmBlock { + bool disableLoadBalancing; + bool enableSleepForTimestamps; +}; + +#define PP_TEMPERATURE_UNITS_PER_CENTIGRADES 1000 + +/** + * Type to hold a temperature range. + */ +struct PP_TemperatureRange { + uint32_t min; + uint32_t max; +}; + +struct PP_StateValidationBlock { + bool singleDisplayOnly; + bool disallowOnDC; + uint8_t supportedPowerLevels; +}; + +struct PP_UVD_CLOCKS { + uint32_t VCLK; + uint32_t DCLK; +}; + +/** +* Structure to hold a PowerPlay Power State. +*/ +struct pp_power_state { + uint32_t id; + struct PP_StateLinkedList orderedList; + struct PP_StateLinkedList allStatesList; + + struct PP_StateClassificationBlock classification; + struct PP_StateValidationBlock validation; + struct PP_StatePcieBlock pcie; + struct PP_StateDisplayBlock display; + struct PP_StateMemroyBlock memory; + struct PP_TemperatureRange temperatures; + struct PP_StateSoftwareAlgorithmBlock software; + struct PP_UVD_CLOCKS uvd_clocks; + struct pp_hw_power_state hardware; +}; + + +/*Structure to hold a VCE state entry*/ +struct PP_VCEState { + uint32_t evclk; + uint32_t ecclk; + uint32_t sclk; + uint32_t mclk; +}; + +enum PP_MMProfilingState { + PP_MMProfilingState_NA = 0, + PP_MMProfilingState_Started, + PP_MMProfilingState_Stopped +}; + +struct PP_Clock_Engine_Request { + unsigned long clientType; + unsigned long ctxid; + uint64_t context_handle; + unsigned long sclk; + unsigned long sclkHardMin; + unsigned long mclk; + unsigned long iclk; + unsigned long evclk; + unsigned long ecclk; + unsigned long ecclkHardMin; + unsigned long vclk; + unsigned long dclk; + unsigned long samclk; + unsigned long acpclk; + unsigned long sclkOverdrive; + unsigned long mclkOverdrive; + unsigned long sclk_threshold; + unsigned long flag; + unsigned long vclk_ceiling; + unsigned long dclk_ceiling; + unsigned long num_cus; + unsigned long pmflag; + enum PP_MMProfilingState MMProfilingState; +}; + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h new file mode 100644 index 000000000000..3bd5e69b9045 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h @@ -0,0 +1,28 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +extern bool acpi_atcs_functions_supported(void *device, + uint32_t index); +extern int acpi_pcie_perf_request(void *device, + uint8_t perf_req, + bool advertise); diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h b/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h new file mode 100644 index 000000000000..0c1593e53654 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h @@ -0,0 +1,47 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_ASICBLOCKS_H +#define PP_ASICBLOCKS_H + + +enum PHM_AsicBlock { + PHM_AsicBlock_GFX, + PHM_AsicBlock_UVD_MVC, + PHM_AsicBlock_UVD, + PHM_AsicBlock_UVD_HD, + PHM_AsicBlock_UVD_SD, + PHM_AsicBlock_Count +}; + +enum PHM_ClockGateSetting { + PHM_ClockGateSetting_StaticOn, + PHM_ClockGateSetting_StaticOff, + PHM_ClockGateSetting_Dynamic +}; + +struct phm_asic_blocks { + bool gfx : 1; + bool uvd : 1; +}; + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h new file mode 100644 index 000000000000..d7d83b7c7f95 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h @@ -0,0 +1,47 @@ + +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_DEBUG_H +#define PP_DEBUG_H + +#include +#include +#include + +#define PP_ASSERT_WITH_CODE(cond, msg, code) \ + do { \ + if (!(cond)) { \ + printk("%s\n", msg); \ + code; \ + } \ + } while (0) + + +#define PP_DBG_LOG(fmt, ...) \ + do { \ + if(0)printk(KERN_INFO "[ pp_dbg ] " fmt, ##__VA_ARGS__); \ + } while (0) + + +#endif /* PP_DEBUG_H */ + diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h b/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h new file mode 100644 index 000000000000..0faf6a25c18b --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h @@ -0,0 +1,67 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _PP_FEATURE_H_ +#define _PP_FEATURE_H_ + +/** + * PowerPlay feature ids. + */ +enum pp_feature { + PP_Feature_PowerPlay = 0, + PP_Feature_User2DPerformance, + PP_Feature_User3DPerformance, + PP_Feature_VariBright, + PP_Feature_VariBrightOnPowerXpress, + PP_Feature_ReducedRefreshRate, + PP_Feature_GFXClockGating, + PP_Feature_OverdriveTest, + PP_Feature_OverDrive, + PP_Feature_PowerBudgetWaiver, + PP_Feature_PowerControl, + PP_Feature_PowerControl_2, + PP_Feature_MultiUVDState, + PP_Feature_Force3DClock, + PP_Feature_BACO, + PP_Feature_PowerDown, + PP_Feature_DynamicUVDState, + PP_Feature_VCEDPM, + PP_Feature_PPM, + PP_Feature_ACP_POWERGATING, + PP_Feature_FFC, + PP_Feature_FPS, + PP_Feature_ViPG, + PP_Feature_Max +}; + +/** + * Struct for PowerPlay feature info. + */ +struct pp_feature_info { + bool supported; /* feature supported by PowerPlay */ + bool enabled; /* feature enabled in PowerPlay */ + bool enabled_default; /* default enable status of the feature */ + uint32_t version; /* feature version */ +}; + +#endif /* _PP_FEATURE_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h new file mode 100644 index 000000000000..4d8ed1f33de4 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h @@ -0,0 +1,39 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _PP_INSTANCE_H_ +#define _PP_INSTANCE_H_ + +#include "smumgr.h" +#include "hwmgr.h" +#include "eventmgr.h" + +#define PP_VALID 0x1F1F1F1F + +struct pp_instance { + uint32_t pp_valid; + struct pp_smumgr *smu_mgr; + struct pp_hwmgr *hwmgr; + struct pp_eventmgr *eventmgr; +}; + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h b/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h new file mode 100644 index 000000000000..b43315cc5d58 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h @@ -0,0 +1,36 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef PP_POWERSOURCE_H +#define PP_POWERSOURCE_H + +enum pp_power_source { + PP_PowerSource_AC = 0, + PP_PowerSource_DC, + PP_PowerSource_LimitedPower, + PP_PowerSource_LimitedPower_2, + PP_PowerSource_Max +}; + + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h b/drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h new file mode 100644 index 000000000000..c067e0925b6b --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h @@ -0,0 +1,46 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _PP_INTERRUPT_H_ +#define _PP_INTERRUPT_H_ + +enum amd_thermal_irq { + AMD_THERMAL_IRQ_LOW_TO_HIGH = 0, + AMD_THERMAL_IRQ_HIGH_TO_LOW, + + AMD_THERMAL_IRQ_LAST +}; + +/* The type of the interrupt callback functions in PowerPlay */ +typedef int (*irq_handler_func_t)(void *private_data, + unsigned src_id, const uint32_t *iv_entry); + +/* Event Manager action chain list information */ +struct pp_interrupt_registration_info { + irq_handler_func_t call_back; /* Pointer to callback function */ + void *context; /* Pointer to callback function context */ + uint32_t src_id; /* Registered interrupt id */ + const uint32_t *iv_entry; +}; + +#endif /* _PP_INTERRUPT_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/smu7.h b/drivers/gpu/drm/amd/powerplay/inc/smu7.h similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/smu7.h rename to drivers/gpu/drm/amd/powerplay/inc/smu7.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72.h b/drivers/gpu/drm/amd/powerplay/inc/smu72.h new file mode 100644 index 000000000000..b73d6b59ac32 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu72.h @@ -0,0 +1,664 @@ +#ifndef SMU72_H +#define SMU72_H + +#if !defined(SMC_MICROCODE) +#pragma pack(push, 1) +#endif + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 8 + +enum SID_OPTION { + SID_OPTION_HI, + SID_OPTION_LO, + SID_OPTION_COUNT +}; + +enum Poly3rdOrderCoeff { + LEAKAGE_TEMPERATURE_SCALAR, + LEAKAGE_VOLTAGE_SCALAR, + DYNAMIC_VOLTAGE_SCALAR, + POLY_3RD_ORDER_COUNT +}; + +struct SMU7_Poly3rdOrder_Data { + int32_t a; + int32_t b; + int32_t c; + int32_t d; + uint8_t a_shift; + uint8_t b_shift; + uint8_t c_shift; + uint8_t x_shift; +}; + +typedef struct SMU7_Poly3rdOrder_Data SMU7_Poly3rdOrder_Data; + +struct Power_Calculator_Data { + uint16_t NoLoadVoltage; + uint16_t LoadVoltage; + uint16_t Resistance; + uint16_t Temperature; + uint16_t BaseLeakage; + uint16_t LkgTempScalar; + uint16_t LkgVoltScalar; + uint16_t LkgAreaScalar; + uint16_t LkgPower; + uint16_t DynVoltScalar; + uint32_t Cac; + uint32_t DynPower; + uint32_t TotalCurrent; + uint32_t TotalPower; +}; + +typedef struct Power_Calculator_Data PowerCalculatorData_t; + +struct Gc_Cac_Weight_Data { + uint8_t index; + uint32_t value; +}; + +typedef struct Gc_Cac_Weight_Data GcCacWeight_Data; + + +typedef struct { + uint32_t high; + uint32_t low; +} data_64_t; + +typedef struct { + data_64_t high; + data_64_t low; +} data_128_t; + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU72_MAX_LEVELS_VDDC 16 +#define SMU72_MAX_LEVELS_VDDGFX 16 +#define SMU72_MAX_LEVELS_VDDCI 8 +#define SMU72_MAX_LEVELS_MVDD 4 + +#define SMU_MAX_SMIO_LEVELS 4 + +#define SMU72_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE /* SCLK + SQ DPM + ULV */ +#define SMU72_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS /* MCLK Levels DPM */ +#define SMU72_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS /* LCLK Levels */ +#define SMU72_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS /* PCIe speed and number of lanes. */ +#define SMU72_MAX_LEVELS_UVD 8 /* VCLK/DCLK levels for UVD. */ +#define SMU72_MAX_LEVELS_VCE 8 /* ECLK levels for VCE. */ +#define SMU72_MAX_LEVELS_ACP 8 /* ACLK levels for ACP. */ +#define SMU72_MAX_LEVELS_SAMU 8 /* SAMCLK levels for SAMU. */ +#define SMU72_MAX_ENTRIES_SMIO 32 /* Number of entries in SMIO table. */ + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7< +#include "pp_instance.h" +#include "amd_powerplay.h" + +struct pp_smumgr; +struct pp_instance; + +#define smu_lower_32_bits(n) ((uint32_t)(n)) +#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16)) + +struct pp_smumgr_func { + int (*smu_init)(struct pp_smumgr *smumgr); + int (*smu_fini)(struct pp_smumgr *smumgr); + int (*start_smu)(struct pp_smumgr *smumgr); + int (*check_fw_load_finish)(struct pp_smumgr *smumgr, + uint32_t firmware); + int (*request_smu_load_fw)(struct pp_smumgr *smumgr); + int (*request_smu_load_specific_fw)(struct pp_smumgr *smumgr, + uint32_t firmware); + int (*get_argument)(struct pp_smumgr *smumgr); + int (*send_msg_to_smc)(struct pp_smumgr *smumgr, uint16_t msg); + int (*send_msg_to_smc_with_parameter)(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter); + int (*download_pptable_settings)(struct pp_smumgr *smumgr, + void **table); + int (*upload_pptable_settings)(struct pp_smumgr *smumgr); +}; + +struct pp_smumgr { + uint32_t chip_family; + uint32_t chip_id; + uint32_t hw_revision; + void *device; + void *backend; + uint32_t usec_timeout; + bool reload_fw; + const struct pp_smumgr_func *smumgr_funcs; +}; + + +extern int smum_init(struct amd_pp_init *pp_init, + struct pp_instance *handle); + +extern int smum_fini(struct pp_smumgr *smumgr); + +extern int smum_get_argument(struct pp_smumgr *smumgr); + +extern int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table); + +extern int smum_upload_powerplay_table(struct pp_smumgr *smumgr); + +extern int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); + +extern int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter); + +extern int smum_wait_on_register(struct pp_smumgr *smumgr, + uint32_t index, uint32_t value, uint32_t mask); + +extern int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, + uint32_t index, uint32_t value, uint32_t mask); + +extern int smum_wait_on_indirect_register(struct pp_smumgr *smumgr, + uint32_t indirect_port, uint32_t index, + uint32_t value, uint32_t mask); + + +extern void smum_wait_for_indirect_register_unequal( + struct pp_smumgr *smumgr, + uint32_t indirect_port, uint32_t index, + uint32_t value, uint32_t mask); + +extern int smu_allocate_memory(void *device, uint32_t size, + enum cgs_gpu_mem_type type, + uint32_t byte_align, uint64_t *mc_addr, + void **kptr, void *handle); + +extern int smu_free_memory(void *device, void *handle); + +#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT + +#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK + +#define SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ + port, index, value, mask) \ + smum_wait_on_indirect_register(smumgr, \ + mm##port##_INDEX, index, value, mask) + + +#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ + index, value, mask) \ + smum_wait_for_register_unequal(smumgr, \ + index, value, mask) + +#define SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, value, mask) \ + SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ + mm##reg, value, mask) + +#define SMUM_WAIT_FIELD_UNEQUAL(smumgr, reg, field, fieldval) \ + SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, \ + (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ + SMUM_FIELD_MASK(reg, field)) + +#define SMUM_GET_FIELD(value, reg, field) \ + (((value) & SMUM_FIELD_MASK(reg, field)) \ + >> SMUM_FIELD_SHIFT(reg, field)) + +#define SMUM_READ_FIELD(device, reg, field) \ + SMUM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field) + +#define SMUM_SET_FIELD(value, reg, field, field_val) \ + (((value) & ~SMUM_FIELD_MASK(reg, field)) | \ + (SMUM_FIELD_MASK(reg, field) & ((field_val) << \ + SMUM_FIELD_SHIFT(reg, field)))) + +#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ + port, index, value, mask) \ + smum_wait_on_indirect_register(smumgr, \ + mm##port##_INDEX_0, index, value, mask) + +#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ + port, index, value, mask) \ + smum_wait_for_indirect_register_unequal(smumgr, \ + mm##port##_INDEX_0, index, value, mask) + + +#define SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \ + SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) + +#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \ + SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) + + +/*Operations on named fields.*/ + +#define SMUM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \ + SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field) + +#define SMUM_WRITE_FIELD(device, reg, field, fieldval) \ + cgs_write_register(device, mm##reg, \ + SMUM_SET_FIELD(cgs_read_register(device, mm##reg), reg, field, fieldval)) + +#define SMUM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \ + cgs_write_ind_register(device, port, ix##reg, \ + SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field, fieldval)) + +#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ + SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \ + (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ + SMUM_FIELD_MASK(reg, field)) + +#define SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \ + SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \ + (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ + SMUM_FIELD_MASK(reg, field)) +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h new file mode 100644 index 000000000000..63631296d751 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h @@ -0,0 +1,420 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef TONGA_PP_SMC_H +#define TONGA_PP_SMC_H + +#pragma pack(push, 1) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 +#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 + +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 + +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 +#define PPSMC_SYSTEMFLAG_12CHANNEL 0x40 + + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 + +#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x10 +#define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17 0x20 +#define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17 0x40 + +/* Defines for DPM 2.0 */ +#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 +#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 +#define PPSMC_DPM2FLAGS_OCP 0x04 + +/* Defines for display watermark level */ + +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + +/* In the HW performance level's state flags:*/ +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 +#define PPSMC_STATEFLAG_POWERSHIFT 0x08 +#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + +/* Fan control algorithm:*/ +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +/* Return codes for driver to SMC communication.*/ + +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) +#define PPSMC_Result_NotNow ((uint16_t)0x03) + +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +typedef uint16_t PPSMC_Result; + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + + +#define PPSMC_MSG_Halt ((uint16_t)0x10) +#define PPSMC_MSG_Resume ((uint16_t)0x11) +#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) +#define PPSMC_MSG_LevelUp ((uint16_t)0x18) +#define PPSMC_MSG_LevelDown ((uint16_t)0x19) +#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) +#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) + +#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) +#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) + +#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) +#define PPSMC_MSG_EnableCac ((uint16_t)0x53) +#define PPSMC_MSG_DisableCac ((uint16_t)0x54) +#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) +#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) +#define PPSMC_CACHistoryStart ((uint16_t)0x57) +#define PPSMC_CACHistoryStop ((uint16_t)0x58) +#define PPSMC_TDPClampingActive ((uint16_t)0x59) +#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) +#define PPSMC_StartFanControl ((uint16_t)0x5B) +#define PPSMC_StopFanControl ((uint16_t)0x5C) +#define PPSMC_NoDisplay ((uint16_t)0x5D) +#define PPSMC_HasDisplay ((uint16_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) +#define PPSMC_MSG_EnableULV ((uint16_t)0x62) +#define PPSMC_MSG_DisableULV ((uint16_t)0x63) +#define PPSMC_MSG_EnterULV ((uint16_t)0x64) +#define PPSMC_MSG_ExitULV ((uint16_t)0x65) +#define PPSMC_PowerShiftActive ((uint16_t)0x6A) +#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) +#define PPSMC_OCPActive ((uint16_t)0x6C) +#define PPSMC_OCPInactive ((uint16_t)0x6D) +#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) +#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) +#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) +#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) +#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) +#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) +#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) +#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) +#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) +#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) +#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) +#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) +#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) +#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) + +#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) +#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) +#define PPSMC_FlushDataCache ((uint16_t)0x80) +#define PPSMC_FlushInstrCache ((uint16_t)0x81) + +#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) + +#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) + +#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) +#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) +#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) + +#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) +#define PPSMC_MSG_ChangeNearTDPLimit ((uint16_t)0x90) +#define PPSMC_MSG_ChangeSafePowerLimit ((uint16_t)0x91) + +#define PPSMC_MSG_DPMStateSweepStart ((uint16_t)0x92) +#define PPSMC_MSG_DPMStateSweepStop ((uint16_t)0x93) + +#define PPSMC_MSG_OVRDDisableSCLKDS ((uint16_t)0x94) +#define PPSMC_MSG_CancelDisableOVRDSCLKDS ((uint16_t)0x95) +#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint16_t)0x96) +#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint16_t)0x97) +#define PPSMC_MSG_GPIO17 ((uint16_t)0x98) + +#define PPSMC_MSG_API_SetSvi2Volt_Vddc ((uint16_t)0x99) +#define PPSMC_MSG_API_SetSvi2Volt_Vddci ((uint16_t)0x9A) +#define PPSMC_MSG_API_SetSvi2Volt_Mvdd ((uint16_t)0x9B) +#define PPSMC_MSG_API_GetSvi2Volt_Vddc ((uint16_t)0x9C) +#define PPSMC_MSG_API_GetSvi2Volt_Vddci ((uint16_t)0x9D) +#define PPSMC_MSG_API_GetSvi2Volt_Mvdd ((uint16_t)0x9E) + +#define PPSMC_MSG_BREAK ((uint16_t)0xF8) + +/* Trinity Specific Messages*/ +#define PPSMC_MSG_Test ((uint16_t) 0x100) +#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101) +#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102) +#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103) +#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) +#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105) +#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106) +#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107) +#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108) +#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109) +#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a) +#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b) +#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e) +#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f) +#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110) +#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111) +#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112) +#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113) +#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114) +#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117) +#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118) +#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119) +#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a) +#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b) +#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c) +#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d) +#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e) +#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f) +#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120) +#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121) +#define PPSMC_MSG_PCIE_PHYPowerDown ((uint16_t) 0x122) +#define PPSMC_MSG_PCIE_PHYPowerUp ((uint16_t) 0x123) +#define PPSMC_MSG_UVD_DPM_Config ((uint16_t) 0x124) +#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122) +#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123) +#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124) +#define PPSMC_MSG_NBDPM_Config ((uint16_t) 0x125) +#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint16_t) 0x126) +#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint16_t) 0x127) +#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128) + +#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129) +#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A) +#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B) +#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C) +#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) +#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) +#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) +#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) +#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) +#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) +#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) +#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134) +#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) +#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) +#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) +#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) +#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b) +#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) +#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e) +#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f) +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) +#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142) +#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143) +#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144) +#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) +#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) +#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) +#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) +#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) +#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b) + +#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c) +#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d) + +#define PPSMC_MSG_DPM_Enable ((uint16_t)0x14e) +#define PPSMC_MSG_DPM_Disable ((uint16_t)0x14f) +#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t)0x150) +#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t)0x151) +#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t)0x152) +#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t)0x153) +#define PPSMC_MSG_UVDDPM_Enable ((uint16_t)0x154) +#define PPSMC_MSG_UVDDPM_Disable ((uint16_t)0x155) +#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t)0x156) +#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t)0x157) +#define PPSMC_MSG_ACPDPM_Enable ((uint16_t)0x158) +#define PPSMC_MSG_ACPDPM_Disable ((uint16_t)0x159) +#define PPSMC_MSG_VCEDPM_Enable ((uint16_t)0x15a) +#define PPSMC_MSG_VCEDPM_Disable ((uint16_t)0x15b) +#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t)0x15c) + +#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) +#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) +#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) +#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160) +#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161) +#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) +#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163) +#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164) +#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165) +#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166) +#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) +#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168) +#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) +#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) +#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b) +#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t)0x16c) +#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t)0x16d) +#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t)0x16e) +#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t)0x16f) +#define PPSMC_MSG_PmStatusLogStart ((uint16_t)0x170) +#define PPSMC_MSG_PmStatusLogSample ((uint16_t)0x171) +#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172) +#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173) +#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174) +#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175) +#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176) +#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177) +#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178) +#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179) +#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a) +#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b) +#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c) +#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d) +#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e) +#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f) +#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182) +#define PPSMC_MSG_UVD_HANDSHAKE_OFF ((uint16_t) 0x183) +#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184) +#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) +#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) +#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) +#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) +#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) +#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) +#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) +#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) +#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D) +#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E) +#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) +#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) +#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) +#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192) +#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193) +#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194) +#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195) +#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207) +#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196) +#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198) +#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199) +#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) + +#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B) +#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) +#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) +#define PPSMC_MSG_Enable_PCC ((uint16_t) 0x19E) +#define PPSMC_MSG_Disable_PCC ((uint16_t) 0x19F) + +#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) +#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) +#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202) +#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203) +#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) +#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206) +#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209) +#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A) + +#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240) +#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241) +#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242) +#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243) +#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244) +#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245) +#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246) + +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) +#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259) +#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A) +#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B) +#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C) +#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D) +#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260) +#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261) +#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262) +#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263) +#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264) +#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265) +#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266) +#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267) +#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268) +#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269) + +typedef uint16_t PPSMC_Msg; + +/* If the SMC firmware has an event status soft register this is what the individual bits mean.*/ +#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 +#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 +#define PPSMC_EVENT_STATUS_DC 0x00000004 +#define PPSMC_EVENT_STATUS_GPIO17 0x00000008 + + +#pragma pack(pop) +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile new file mode 100644 index 000000000000..6c4ef135cf01 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the 'smu manager' sub-component of powerplay. +# It provides the smu management services for the driver. + +SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o + +AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) + +AMD_POWERPLAY_FILES += $(AMD_PP_SMUMGR) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c new file mode 100644 index 000000000000..873a8d264d5c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -0,0 +1,858 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include +#include "linux/delay.h" +#include "cgs_common.h" +#include "smu/smu_8_0_d.h" +#include "smu/smu_8_0_sh_mask.h" +#include "smu8.h" +#include "smu8_fusion.h" +#include "cz_smumgr.h" +#include "cz_ppsmc.h" +#include "smu_ucode_xfer_cz.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "smumgr.h" + +#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32) + +static enum cz_scratch_entry firmware_list[] = { + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, +}; + +static int cz_smum_get_argument(struct pp_smumgr *smumgr) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + return cgs_read_register(smumgr->device, + mmSMU_MP1_SRBM2P_ARG_0); +} + +static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr, + uint16_t msg) +{ + int result = 0; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + result = SMUM_WAIT_FIELD_UNEQUAL(smumgr, + SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); + if (result != 0) { + printk(KERN_ERR "[ powerplay ] cz_send_msg_to_smc_async failed\n"); + return result; + } + + cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0); + cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg); + + return 0; +} + +/* Send a message to the SMC, and wait for its response.*/ +static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + int result = 0; + + result = cz_send_msg_to_smc_async(smumgr, msg); + if (result != 0) + return result; + + result = SMUM_WAIT_FIELD_UNEQUAL(smumgr, + SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); + + if (result != 0) + return result; + + return 0; +} + +static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, + uint32_t smc_address, uint32_t limit) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + if (0 != (3 & smc_address)) { + printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n"); + return -1; + } + + if (limit <= (smc_address + 3)) { + printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n"); + return -1; + } + + cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0, + SMN_MP1_SRAM_START_ADDR + smc_address); + + return 0; +} + +static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smc_address, uint32_t value, uint32_t limit) +{ + int result; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + result = cz_set_smc_sram_address(smumgr, smc_address, limit); + cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value); + + return 0; +} + +static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); + + return cz_send_msg_to_smc(smumgr, msg); +} + +static int cz_request_smu_load_fw(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend); + int result = 0; + uint32_t smc_address; + + if (!smumgr->reload_fw) { + printk(KERN_INFO "[ powerplay ] skip reloading...\n"); + return 0; + } + + smc_address = SMU8_FIRMWARE_HEADER_LOCATION + + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); + + cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4); + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DriverDramAddrHi, + cz_smu->toc_buffer.mc_addr_high); + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DriverDramAddrLo, + cz_smu->toc_buffer.mc_addr_low); + + cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs); + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_ExecuteJob, + cz_smu->toc_entry_aram); + cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_smu->toc_entry_power_profiling_index); + + result = cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_ExecuteJob, + cz_smu->toc_entry_initialize_index); + + return result; +} + +static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, + uint32_t firmware) +{ + int i; + uint32_t index = SMN_MP1_SRAM_START_ADDR + + SMU8_FIRMWARE_HEADER_LOCATION + + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + return cgs_read_register(smumgr->device, + mmSMU_MP1_SRBM2P_ARG_0); + + cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index); + + for (i = 0; i < smumgr->usec_timeout; i++) { + if (firmware == + (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware)) + break; + udelay(1); + } + + if (i >= smumgr->usec_timeout) { + printk(KERN_ERR "[ powerplay ] SMU check loaded firmware failed.\n"); + return -EINVAL; + } + + return 0; +} + +static int cz_load_mec_firmware(struct pp_smumgr *smumgr) +{ + uint32_t reg_data; + uint32_t tmp; + int ret = 0; + struct cgs_firmware_info info = {0}; + struct cz_smumgr *cz_smu; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + cz_smu = (struct cz_smumgr *)smumgr->backend; + ret = cgs_get_firmware_info(smumgr->device, + CGS_UCODE_ID_CP_MEC, &info); + + if (ret) + return -EINVAL; + + /* Disable MEC parsing/prefetching */ + tmp = cgs_read_register(smumgr->device, + mmCP_MEC_CNTL); + tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); + tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); + cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp); + + tmp = cgs_read_register(smumgr->device, + mmCP_CPC_IC_BASE_CNTL); + + tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); + tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); + tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); + cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp); + + reg_data = smu_lower_32_bits(info.mc_addr) & + SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); + cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data); + + reg_data = smu_upper_32_bits(info.mc_addr) & + SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); + cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data); + + return 0; +} + +static int cz_start_smu(struct pp_smumgr *smumgr) +{ + int ret = 0; + uint32_t fw_to_check = UCODE_ID_RLC_G_MASK | + UCODE_ID_SDMA0_MASK | + UCODE_ID_SDMA1_MASK | + UCODE_ID_CP_CE_MASK | + UCODE_ID_CP_ME_MASK | + UCODE_ID_CP_PFP_MASK | + UCODE_ID_CP_MEC_JT1_MASK | + UCODE_ID_CP_MEC_JT2_MASK; + + cz_request_smu_load_fw(smumgr); + cz_check_fw_load_finish(smumgr, fw_to_check); + + ret = cz_load_mec_firmware(smumgr); + if (ret) + printk(KERN_ERR "[ powerplay ] Mec Firmware load failed\n"); + + return ret; +} + +static uint8_t cz_translate_firmware_enum_to_arg( + enum cz_scratch_entry firmware_enum) +{ + uint8_t ret = 0; + + switch (firmware_enum) { + case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0: + ret = UCODE_ID_SDMA0; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: + ret = UCODE_ID_SDMA1; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: + ret = UCODE_ID_CP_CE; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP: + ret = UCODE_ID_CP_PFP; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME: + ret = UCODE_ID_CP_ME; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1: + ret = UCODE_ID_CP_MEC_JT1; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: + ret = UCODE_ID_CP_MEC_JT2; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: + ret = UCODE_ID_GMCON_RENG; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G: + ret = UCODE_ID_RLC_G; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH: + ret = UCODE_ID_RLC_SCRATCH; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM: + ret = UCODE_ID_RLC_SRM_ARAM; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM: + ret = UCODE_ID_RLC_SRM_DRAM; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM: + ret = UCODE_ID_DMCU_ERAM; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM: + ret = UCODE_ID_DMCU_IRAM; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING: + ret = TASK_ARG_INIT_MM_PWR_LOG; + break; + case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT: + case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING: + case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS: + case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT: + case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START: + case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS: + ret = TASK_ARG_REG_MMIO; + break; + case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE: + ret = TASK_ARG_INIT_CLK_TABLE; + break; + } + + return ret; +} + +static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type) +{ + enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case UCODE_ID_SDMA0: + result = CGS_UCODE_ID_SDMA0; + break; + case UCODE_ID_SDMA1: + result = CGS_UCODE_ID_SDMA1; + break; + case UCODE_ID_CP_CE: + result = CGS_UCODE_ID_CP_CE; + break; + case UCODE_ID_CP_PFP: + result = CGS_UCODE_ID_CP_PFP; + break; + case UCODE_ID_CP_ME: + result = CGS_UCODE_ID_CP_ME; + break; + case UCODE_ID_CP_MEC_JT1: + result = CGS_UCODE_ID_CP_MEC_JT1; + break; + case UCODE_ID_CP_MEC_JT2: + result = CGS_UCODE_ID_CP_MEC_JT2; + break; + case UCODE_ID_RLC_G: + result = CGS_UCODE_ID_RLC_G; + break; + default: + break; + } + + return result; +} + +static int cz_smu_populate_single_scratch_task( + struct pp_smumgr *smumgr, + enum cz_scratch_entry fw_enum, + uint8_t type, bool is_last) +{ + uint8_t i; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; + struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; + + task->type = type; + task->arg = cz_translate_firmware_enum_to_arg(fw_enum); + task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; + + for (i = 0; i < cz_smu->scratch_buffer_length; i++) + if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum) + break; + + if (i >= cz_smu->scratch_buffer_length) { + printk(KERN_ERR "[ powerplay ] Invalid Firmware Type\n"); + return -EINVAL; + } + + task->addr.low = cz_smu->scratch_buffer[i].mc_addr_low; + task->addr.high = cz_smu->scratch_buffer[i].mc_addr_high; + task->size_bytes = cz_smu->scratch_buffer[i].data_size; + + if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) { + struct cz_ih_meta_data *pIHReg_restore = + (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr; + pIHReg_restore->command = + METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD; + } + + return 0; +} + +static int cz_smu_populate_single_ucode_load_task( + struct pp_smumgr *smumgr, + enum cz_scratch_entry fw_enum, + bool is_last) +{ + uint8_t i; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; + struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; + + task->type = TASK_TYPE_UCODE_LOAD; + task->arg = cz_translate_firmware_enum_to_arg(fw_enum); + task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; + + for (i = 0; i < cz_smu->driver_buffer_length; i++) + if (cz_smu->driver_buffer[i].firmware_ID == fw_enum) + break; + + if (i >= cz_smu->driver_buffer_length) { + printk(KERN_ERR "[ powerplay ] Invalid Firmware Type\n"); + return -EINVAL; + } + + task->addr.low = cz_smu->driver_buffer[i].mc_addr_low; + task->addr.high = cz_smu->driver_buffer[i].mc_addr_high; + task->size_bytes = cz_smu->driver_buffer[i].data_size; + + return 0; +} + +static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + + cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count; + cz_smu_populate_single_scratch_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, + TASK_TYPE_UCODE_SAVE, true); + + return 0; +} + +static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr) +{ + int i; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; + + for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) + toc->JobList[i] = (uint8_t)IGNORE_JOB; + + return 0; +} + +static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; + + toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count; + cz_smu_populate_single_scratch_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, + TASK_TYPE_UCODE_SAVE, false); + + cz_smu_populate_single_scratch_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, + TASK_TYPE_UCODE_SAVE, true); + + return 0; +} + + +static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; + + toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count; + + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); + + /* populate scratch */ + cz_smu_populate_single_scratch_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, + TASK_TYPE_UCODE_LOAD, false); + + cz_smu_populate_single_scratch_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, + TASK_TYPE_UCODE_LOAD, false); + + cz_smu_populate_single_scratch_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, + TASK_TYPE_UCODE_LOAD, true); + + return 0; +} + +static int cz_smu_construct_toc_for_power_profiling( + struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + + cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count; + + cz_smu_populate_single_scratch_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, + TASK_TYPE_INITIALIZE, true); + return 0; +} + +static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + + cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count; + + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); + + return 0; +} + +static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + + cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count; + + cz_smu_populate_single_scratch_task(smumgr, + CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, + TASK_TYPE_INITIALIZE, true); + + return 0; +} + +static int cz_smu_construct_toc(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + + cz_smu->toc_entry_used_count = 0; + + cz_smu_initialize_toc_empty_job_list(smumgr); + + cz_smu_construct_toc_for_rlc_aram_save(smumgr); + + cz_smu_construct_toc_for_vddgfx_enter(smumgr); + + cz_smu_construct_toc_for_vddgfx_exit(smumgr); + + cz_smu_construct_toc_for_power_profiling(smumgr); + + cz_smu_construct_toc_for_bootup(smumgr); + + cz_smu_construct_toc_for_clock_table(smumgr); + + return 0; +} + +static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + uint32_t firmware_type; + uint32_t i; + int ret; + enum cgs_ucode_id ucode_id; + struct cgs_firmware_info info = {0}; + + cz_smu->driver_buffer_length = 0; + + for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) { + + firmware_type = cz_translate_firmware_enum_to_arg( + firmware_list[i]); + + ucode_id = cz_convert_fw_type_to_cgs(firmware_type); + + ret = cgs_get_firmware_info(smumgr->device, + ucode_id, &info); + + if (ret == 0) { + cz_smu->driver_buffer[i].mc_addr_high = + smu_upper_32_bits(info.mc_addr); + + cz_smu->driver_buffer[i].mc_addr_low = + smu_lower_32_bits(info.mc_addr); + + cz_smu->driver_buffer[i].data_size = info.image_size; + + cz_smu->driver_buffer[i].firmware_ID = firmware_list[i]; + cz_smu->driver_buffer_length++; + } + } + + return 0; +} + +static int cz_smu_populate_single_scratch_entry( + struct pp_smumgr *smumgr, + enum cz_scratch_entry scratch_type, + uint32_t ulsize_byte, + struct cz_buffer_entry *entry) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + long long mc_addr = + ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32) + | cz_smu->smu_buffer.mc_addr_low; + + uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte); + + mc_addr += cz_smu->smu_buffer_used_bytes; + + entry->data_size = ulsize_byte; + entry->kaddr = (char *) cz_smu->smu_buffer.kaddr + + cz_smu->smu_buffer_used_bytes; + entry->mc_addr_low = smu_lower_32_bits(mc_addr); + entry->mc_addr_high = smu_upper_32_bits(mc_addr); + entry->firmware_ID = scratch_type; + + cz_smu->smu_buffer_used_bytes += ulsize_aligned; + + return 0; +} + +static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + unsigned long i; + + for (i = 0; i < cz_smu->scratch_buffer_length; i++) { + if (cz_smu->scratch_buffer[i].firmware_ID + == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) + break; + } + + *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr; + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetClkTableAddrHi, + cz_smu->scratch_buffer[i].mc_addr_high); + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetClkTableAddrLo, + cz_smu->scratch_buffer[i].mc_addr_low); + + cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_smu->toc_entry_clock_table); + + cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram); + + return 0; +} + +static int cz_upload_pptable_settings(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + unsigned long i; + + for (i = 0; i < cz_smu->scratch_buffer_length; i++) { + if (cz_smu->scratch_buffer[i].firmware_ID + == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) + break; + } + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetClkTableAddrHi, + cz_smu->scratch_buffer[i].mc_addr_high); + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetClkTableAddrLo, + cz_smu->scratch_buffer[i].mc_addr_low); + + cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_smu->toc_entry_clock_table); + + cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu); + + return 0; +} + +static int cz_smu_init(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + uint64_t mc_addr = 0; + int ret = 0; + + cz_smu->toc_buffer.data_size = 4096; + cz_smu->smu_buffer.data_size = + ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) + + ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) + + ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) + + ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + + ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); + + ret = smu_allocate_memory(smumgr->device, + cz_smu->toc_buffer.data_size, + CGS_GPU_MEM_TYPE__GART_CACHEABLE, + PAGE_SIZE, + &mc_addr, + &cz_smu->toc_buffer.kaddr, + &cz_smu->toc_buffer.handle); + if (ret != 0) + return -1; + + cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + ret = smu_allocate_memory(smumgr->device, + cz_smu->smu_buffer.data_size, + CGS_GPU_MEM_TYPE__GART_CACHEABLE, + PAGE_SIZE, + &mc_addr, + &cz_smu->smu_buffer.kaddr, + &cz_smu->smu_buffer.handle); + if (ret != 0) + return -1; + + cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + cz_smu_populate_firmware_entries(smumgr); + if (0 != cz_smu_populate_single_scratch_entry(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, + UCODE_ID_RLC_SCRATCH_SIZE_BYTE, + &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { + printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + return -1; + } + + if (0 != cz_smu_populate_single_scratch_entry(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, + UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, + &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { + printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + return -1; + } + if (0 != cz_smu_populate_single_scratch_entry(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, + UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, + &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { + printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + return -1; + } + + if (0 != cz_smu_populate_single_scratch_entry(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, + sizeof(struct SMU8_MultimediaPowerLogData), + &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { + printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + return -1; + } + + if (0 != cz_smu_populate_single_scratch_entry(smumgr, + CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, + sizeof(struct SMU8_Fusion_ClkTable), + &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { + printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + return -1; + } + cz_smu_construct_toc(smumgr); + + return 0; +} + +static int cz_smu_fini(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + cz_smu = (struct cz_smumgr *)smumgr->backend; + if (cz_smu) { + cgs_free_gpu_mem(smumgr->device, + cz_smu->toc_buffer.handle); + cgs_free_gpu_mem(smumgr->device, + cz_smu->smu_buffer.handle); + kfree(cz_smu); + kfree(smumgr); + } + + return 0; +} + +static const struct pp_smumgr_func cz_smu_funcs = { + .smu_init = cz_smu_init, + .smu_fini = cz_smu_fini, + .start_smu = cz_start_smu, + .check_fw_load_finish = cz_check_fw_load_finish, + .request_smu_load_fw = NULL, + .request_smu_load_specific_fw = NULL, + .get_argument = cz_smum_get_argument, + .send_msg_to_smc = cz_send_msg_to_smc, + .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter, + .download_pptable_settings = cz_download_pptable_settings, + .upload_pptable_settings = cz_upload_pptable_settings, +}; + +int cz_smum_init(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu; + + cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL); + if (cz_smu == NULL) + return -ENOMEM; + + smumgr->backend = cz_smu; + smumgr->smumgr_funcs = &cz_smu_funcs; + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h new file mode 100644 index 000000000000..883818039248 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h @@ -0,0 +1,102 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _CZ_SMUMGR_H_ +#define _CZ_SMUMGR_H_ + + +#define MAX_NUM_FIRMWARE 8 +#define MAX_NUM_SCRATCH 11 +#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024 +#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048 +#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024 +#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4) + +enum cz_scratch_entry { + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, + CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, + CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM, + CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM, + CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, + CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT, + CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING, + CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS, + CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT, + CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START, + CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS, + CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE +}; + +struct cz_buffer_entry { + uint32_t data_size; + uint32_t mc_addr_low; + uint32_t mc_addr_high; + void *kaddr; + enum cz_scratch_entry firmware_ID; + unsigned long handle; /* as bo handle used when release bo */ +}; + +struct cz_register_index_data_pair { + uint32_t offset; + uint32_t value; +}; + +struct cz_ih_meta_data { + uint32_t command; + struct cz_register_index_data_pair register_index_value_pair[1]; +}; + +struct cz_smumgr { + uint8_t driver_buffer_length; + uint8_t scratch_buffer_length; + uint16_t toc_entry_used_count; + uint16_t toc_entry_initialize_index; + uint16_t toc_entry_power_profiling_index; + uint16_t toc_entry_aram; + uint16_t toc_entry_ih_register_restore_task_index; + uint16_t toc_entry_clock_table; + uint16_t ih_register_restore_task_size; + uint16_t smu_buffer_used_bytes; + + struct cz_buffer_entry toc_buffer; + struct cz_buffer_entry smu_buffer; + struct cz_buffer_entry firmware_buffer; + struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE]; + struct cz_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE]; + struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; +}; + +struct pp_smumgr; + +extern int cz_smum_init(struct pp_smumgr *smumgr); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c new file mode 100644 index 000000000000..cdbb9f89bf36 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -0,0 +1,1042 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "smumgr.h" +#include "smu73.h" +#include "smu_ucode_xfer_vi.h" +#include "fiji_smumgr.h" +#include "fiji_ppsmc.h" +#include "smu73_discrete.h" +#include "ppatomctrl.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" +#include "oss/oss_3_0_d.h" +#include "gca/gfx_8_0_d.h" +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" +#include "pp_debug.h" +#include "fiji_pwrvirus.h" + +#define AVFS_EN_MSB 1568 +#define AVFS_EN_LSB 1568 + +#define FIJI_SMC_SIZE 0x20000 + +struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = { + /* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */ + /* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ + { 0x3c0fd047, 0x30750000, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0x21680000, 0x0c000000, 0, 0, 0x16, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, + { 0xa00fd047, 0x409c0000, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0x21680000, 0x11000000, 0, 0, 0x16, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, + { 0x0410d047, 0x50c30000, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0x21680000, 0x0d000000, 0, 0, 0x0e, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, + { 0x6810d047, 0x60ea0000, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0x21680000, 0x0e000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, + { 0xcc10d047, 0xe8fd0000, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0x21680000, 0x0f000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, + { 0x3011d047, 0x70110100, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0x21680000, 0x10000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, + { 0x9411d047, 0xf8240100, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0x21680000, 0x11000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, + { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } +}; + +static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type) +{ + enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case UCODE_ID_SMU: + result = CGS_UCODE_ID_SMU; + break; + case UCODE_ID_SDMA0: + result = CGS_UCODE_ID_SDMA0; + break; + case UCODE_ID_SDMA1: + result = CGS_UCODE_ID_SDMA1; + break; + case UCODE_ID_CP_CE: + result = CGS_UCODE_ID_CP_CE; + break; + case UCODE_ID_CP_PFP: + result = CGS_UCODE_ID_CP_PFP; + break; + case UCODE_ID_CP_ME: + result = CGS_UCODE_ID_CP_ME; + break; + case UCODE_ID_CP_MEC: + result = CGS_UCODE_ID_CP_MEC; + break; + case UCODE_ID_CP_MEC_JT1: + result = CGS_UCODE_ID_CP_MEC_JT1; + break; + case UCODE_ID_CP_MEC_JT2: + result = CGS_UCODE_ID_CP_MEC_JT2; + break; + case UCODE_ID_RLC_G: + result = CGS_UCODE_ID_RLC_G; + break; + default: + break; + } + + return result; +} +/** +* Set the address for reading/writing the SMC SRAM space. +* @param smumgr the address of the powerplay hardware manager. +* @param smc_addr the address in the SMC RAM to access. +*/ +static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr, + uint32_t smc_addr, uint32_t limit) +{ + PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), + "SMC address must be 4 byte aligned.", return -EINVAL;); + PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), + "SMC address is beyond the SMC RAM area.", return -EINVAL;); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + return 0; +} + +/** +* Copy bytes from an array into the SMC RAM space. +* +* @param smumgr the address of the powerplay SMU manager. +* @param smcStartAddress the start address in the SMC RAM to copy bytes to. +* @param src the byte array to copy the bytes from. +* @param byteCount the number of bytes to copy. +*/ +int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, + uint32_t smcStartAddress, const uint8_t *src, + uint32_t byteCount, uint32_t limit) +{ + int result; + uint32_t data, originalData; + uint32_t addr, extraShift; + + PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), + "SMC address must be 4 byte aligned.", return -EINVAL;); + PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), + "SMC address is beyond the SMC RAM area.", return -EINVAL;); + + addr = smcStartAddress; + + while (byteCount >= 4) { + /* Bytes are written into the SMC addres space with the MSB first. */ + data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; + + result = fiji_set_smc_sram_address(smumgr, addr, limit); + if (result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + + src += 4; + byteCount -= 4; + addr += 4; + } + + if (byteCount) { + /* Now write the odd bytes left. + * Do a read modify write cycle. + */ + data = 0; + + result = fiji_set_smc_sram_address(smumgr, addr, limit); + if (result) + return result; + + originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + extraShift = 8 * (4 - byteCount); + + while (byteCount > 0) { + /* Bytes are written into the SMC addres + * space with the MSB first. + */ + data = (0x100 * data) + *src++; + byteCount--; + } + data <<= extraShift; + data |= (originalData & ~((~0UL) << extraShift)); + + result = fiji_set_smc_sram_address(smumgr, addr, limit); + if (!result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + } + return 0; +} + +int fiji_program_jump_on_start(struct pp_smumgr *smumgr) +{ + static unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 }; + + fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1); + + return 0; +} + +/** +* Return if the SMC is currently running. +* +* @param smumgr the address of the powerplay hardware manager. +*/ +bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(smumgr->device, + CGS_IND_REG__SMC, ixSMC_PC_C))); +} + +/** +* Send a message to the SMC, and wait for its response. +* +* @param smumgr the address of the powerplay hardware manager. +* @param msg the message to send. +* @return The response that came from the SMC. +*/ +int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + if (!fiji_is_smc_ram_running(smumgr)) + return -1; + + if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) { + printk(KERN_ERR "Failed to send Previous Message."); + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + } + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + return 0; +} + +/** + * Send a message to the SMC with parameter + * @param smumgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: the parameter to send + * @return The response that came from the SMC. + */ +int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + if (!fiji_is_smc_ram_running(smumgr)) + return -1; + + if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) { + printk(KERN_ERR "Failed to send Previous Message."); + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + } + + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + return 0; +} + + +/** +* Send a message to the SMC with parameter, do not wait for response +* +* @param smumgr: the address of the powerplay hardware manager. +* @param msg: the message to send. +* @param parameter: the parameter to send +* @return The response that came from the SMC. +*/ +int fiji_send_msg_to_smc_with_parameter_without_waiting( + struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +{ + if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) { + printk(KERN_ERR "Failed to send Previous Message."); + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + } + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + return 0; +} + +/** +* Uploads the SMU firmware from .hex file +* +* @param smumgr the address of the powerplay SMU manager. +* @return 0 or -1. +*/ + +static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr) +{ + const uint8_t *src; + uint32_t byte_count; + uint32_t *data; + struct cgs_firmware_info info = {0}; + + cgs_get_firmware_info(smumgr->device, + fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); + + if (info.image_size & 3) { + printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (info.image_size > FIJI_SMC_SIZE) { + printk(KERN_ERR "SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + + byte_count = info.image_size; + src = (const uint8_t *)info.kptr; + + data = (uint32_t *)src; + for (; byte_count >= 4; data++, byte_count -= 4) + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]); + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + return 0; +} + +/** +* Read a 32bit value from the SMC SRAM space. +* ALL PARAMETERS ARE IN HOST BYTE ORDER. +* @param smumgr the address of the powerplay hardware manager. +* @param smc_addr the address in the SMC RAM to access. +* @param value and output parameter for the data read from the SMC SRAM. +*/ +int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, + uint32_t *value, uint32_t limit) +{ + int result = fiji_set_smc_sram_address(smumgr, smc_addr, limit); + + if (result) + return result; + + *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + return 0; +} + +/** +* Write a 32bit value to the SMC SRAM space. +* ALL PARAMETERS ARE IN HOST BYTE ORDER. +* @param smumgr the address of the powerplay hardware manager. +* @param smc_addr the address in the SMC RAM to access. +* @param value to write to the SMC SRAM. +*/ +int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, + uint32_t value, uint32_t limit) +{ + int result; + + result = fiji_set_smc_sram_address(smumgr, smc_addr, limit); + + if (result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); + return 0; +} + +static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type) +{ + uint32_t result = 0; + + switch (fw_type) { + case UCODE_ID_SDMA0: + result = UCODE_ID_SDMA0_MASK; + break; + case UCODE_ID_SDMA1: + result = UCODE_ID_SDMA1_MASK; + break; + case UCODE_ID_CP_CE: + result = UCODE_ID_CP_CE_MASK; + break; + case UCODE_ID_CP_PFP: + result = UCODE_ID_CP_PFP_MASK; + break; + case UCODE_ID_CP_ME: + result = UCODE_ID_CP_ME_MASK; + break; + case UCODE_ID_CP_MEC_JT1: + result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK; + break; + case UCODE_ID_CP_MEC_JT2: + result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK; + break; + case UCODE_ID_RLC_G: + result = UCODE_ID_RLC_G_MASK; + break; + default: + printk(KERN_ERR "UCode type is out of range!"); + result = 0; + } + + return result; +} + +/* Populate one firmware image to the data structure */ +static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr, + uint32_t fw_type, struct SMU_Entry *entry) +{ + int result; + struct cgs_firmware_info info = {0}; + + result = cgs_get_firmware_info( + smumgr->device, + fiji_convert_fw_type_to_cgs(fw_type), + &info); + + if (!result) { + entry->version = 0; + entry->id = (uint16_t)fw_type; + entry->image_addr_high = smu_upper_32_bits(info.mc_addr); + entry->image_addr_low = smu_lower_32_bits(info.mc_addr); + entry->meta_data_addr_high = 0; + entry->meta_data_addr_low = 0; + entry->data_size_byte = info.image_size; + entry->num_register_entries = 0; + + if (fw_type == UCODE_ID_RLC_G) + entry->flags = 1; + else + entry->flags = 0; + } + + return result; +} + +static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr) +{ + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + uint32_t fw_to_load; + struct SMU_DRAMData_TOC *toc; + + if (priv->soft_regs_start) + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + priv->soft_regs_start + + offsetof(SMU73_SoftRegisters, UcodeLoadStatus), + 0x0); + + toc = (struct SMU_DRAMData_TOC *)priv->header; + toc->num_entries = 0; + toc->structure_version = 1; + + PP_ASSERT_WITH_CODE( + 0 == fiji_populate_single_firmware_entry(smumgr, + UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n" , return -1 ); + PP_ASSERT_WITH_CODE( + 0 == fiji_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n" , return -1 ); + PP_ASSERT_WITH_CODE( + 0 == fiji_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n" , return -1 ); + PP_ASSERT_WITH_CODE( + 0 == fiji_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n" , return -1 ); + PP_ASSERT_WITH_CODE( + 0 == fiji_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n" , return -1 ); + PP_ASSERT_WITH_CODE( + 0 == fiji_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n" , return -1 ); + PP_ASSERT_WITH_CODE( + 0 == fiji_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n" , return -1 ); + PP_ASSERT_WITH_CODE( + 0 == fiji_populate_single_firmware_entry(smumgr, + UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n" , return -1 ); + PP_ASSERT_WITH_CODE( + 0 == fiji_populate_single_firmware_entry(smumgr, + UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n" , return -1 ); + + fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, + priv->header_buffer.mc_addr_high); + fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO, + priv->header_buffer.mc_addr_low); + + fw_to_load = UCODE_ID_RLC_G_MASK + + UCODE_ID_SDMA0_MASK + + UCODE_ID_SDMA1_MASK + + UCODE_ID_CP_CE_MASK + + UCODE_ID_CP_ME_MASK + + UCODE_ID_CP_PFP_MASK + + UCODE_ID_CP_MEC_MASK + + UCODE_ID_CP_MEC_JT1_MASK + + UCODE_ID_CP_MEC_JT2_MASK; + + if (fiji_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_LoadUcodes, fw_to_load)) + printk(KERN_ERR "Fail to Request SMU Load uCode"); + + return 0; +} + + +/* Check if the FW has been loaded, SMU will not return + * if loading has not finished. + */ +static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr, + uint32_t fw_type) +{ + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + uint32_t mask = fiji_get_mask_for_firmware_type(fw_type); + + /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */ + if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX, + priv->soft_regs_start + + offsetof(SMU73_SoftRegisters, UcodeLoadStatus), + mask, mask)) { + printk(KERN_ERR "check firmware loading failed\n"); + return -EINVAL; + } + return 0; +} + + +static int fiji_reload_firmware(struct pp_smumgr *smumgr) +{ + return smumgr->smumgr_funcs->start_smu(smumgr); +} + +static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr) +{ + uint32_t value; + + value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER); + if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) { + /* driver reads on SR-IOV enabled PF: 0x80000000 + * driver reads on SR-IOV enabled VF: 0x80000001 + * driver reads on SR-IOV disabled: 0x00000000 + */ + return true; + } + return false; +} + +static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type) +{ + if (fiji_is_hw_virtualization_enabled(smumgr)) { + uint32_t masks = fiji_get_mask_for_firmware_type(fw_type); + if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr, + PPSMC_MSG_LoadUcodes, masks)) + printk(KERN_ERR "Fail to Request SMU Load uCode"); + } + /* For non-virtualization cases, + * SMU loads all FWs at once in fiji_request_smu_load_fw. + */ + return 0; +} + +static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) +{ + int result = 0; + + /* Wait for smc boot up */ + /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + RCU_UC_EVENTS, boot_seq_done, 0); */ + + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 1); + + result = fiji_upload_smu_firmware_image(smumgr); + if (result) + return result; + + /* Clear status */ + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMU_STATUS, 0); + + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + /* De-assert reset */ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + /* Wait for ROM firmware to initialize interrupt hendler */ + /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, + SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */ + + /* Set SMU Auto Start */ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMU_INPUT_DATA, AUTO_START, 1); + + /* Clear firmware interrupt enable flag */ + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixFIRMWARE_FLAGS, 0); + + SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, + INTERRUPTS_ENABLED, 1); + + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + /* Wait for done bit to be set */ + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + SMU_STATUS, SMU_DONE, 0); + + /* Check pass/failed indicator */ + if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMU_STATUS, SMU_PASS)) { + PP_ASSERT_WITH_CODE(false, + "SMU Firmware start failed!", return -1); + } + + /* Wait for firmware to initialize */ + SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + + return result; +} + +static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) +{ + int result = 0; + + /* wait for smc boot up */ + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + RCU_UC_EVENTS, boot_seq_done, 0); + + /* Clear firmware interrupt enable flag */ + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixFIRMWARE_FLAGS, 0); + + /* Assert reset */ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 1); + + result = fiji_upload_smu_firmware_image(smumgr); + if (result) + return result; + + /* Set smc instruct start point at 0x0 */ + fiji_program_jump_on_start(smumgr); + + /* Enable clock */ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + /* De-assert reset */ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + /* Wait for firmware to initialize */ + SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + + return result; +} + +int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) +{ + int i, result = -1; + uint32_t reg, data; + PWR_Command_Table *virus = PwrVirusTable; + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + + priv->avfs.AvfsBtcStatus = AVFS_LOAD_VIRUS; + for (i = 0; (i < PWR_VIRUS_TABLE_SIZE); i++) { + switch (virus->command) { + case PwrCmdWrite: + reg = virus->reg; + data = virus->data; + cgs_write_register(smumgr->device, reg, data); + break; + case PwrCmdEnd: + priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_LOADED; + result = 0; + break; + default: + printk(KERN_ERR "Table Exit with Invalid Command!"); + priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL; + result = -1; + break; + } + virus++; + } + return result; +} + +static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) +{ + int result = 0; + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + + priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED; + if (priv->avfs.AvfsBtcParam) { + if (!fiji_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) { + if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) { + priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED; + result = 0; + } else { + printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt" + " to Enable AVFS Failed!"); + fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); + result = -1; + } + } else { + printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] " + "PerformBTC SMU msg failed"); + result = -1; + } + } + /* Soft-Reset to reset the engine before loading uCode */ + /* halt */ + cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000); + /* reset everything */ + cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff); + /* clear reset */ + cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0); + + return result; +} + +int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) +{ + int result = 0; + uint32_t table_start; + uint32_t charz_freq_addr, inversion_voltage_addr, charz_freq; + uint16_t inversion_voltage; + + charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */ + inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */ + + PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, + PmFuseTable), &table_start, 0x40000), + "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate " + "starting address of PmFuse structure", + return -1;); + + charz_freq_addr = table_start + + offsetof(struct SMU73_Discrete_PmFuses, PsmCharzFreq); + inversion_voltage_addr = table_start + + offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage); + + result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr, + (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000); + PP_ASSERT_WITH_CODE(0 == result, + "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not " + "be populated.", return -1;); + + result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr, + (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000); + PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] " + "charz_freq could not be populated.", return -1;); + + return result; +} + +int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) +{ + int32_t vr_config; + uint32_t table_start; + uint32_t level_addr, vr_config_addr; + uint32_t level_size = sizeof(avfs_graphics_level); + + PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, DpmTable), + &table_start, 0x40000), + "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " + "communicate starting address of DPM table", + return -1;); + + /* Default value for vr_config = + * VR_MERGED_WITH_VDDC + VR_STATIC_VOLTAGE(VDDCI) */ + vr_config = 0x01000500; /* Real value:0x50001 */ + + vr_config_addr = table_start + + offsetof(SMU73_Discrete_DpmTable, VRConfig); + + PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr, + (uint8_t *)&vr_config, sizeof(int32_t), 0x40000), + "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " + "vr_config value over to SMC", + return -1;); + + level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); + + PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr, + (uint8_t *)(&avfs_graphics_level), level_size, 0x40000), + "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!", + return -1;); + + return 0; +} + +/* Work in Progress */ +int fiji_restore_vft_table(struct pp_smumgr *smumgr) +{ + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + + if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) { + priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED; + return 0; + } else + return -EINVAL; +} + +/* Work in Progress */ +int fiji_save_vft_table(struct pp_smumgr *smumgr) +{ + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + + if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) { + priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED; + return 0; + } else + return -EINVAL; +} + +int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) +{ + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + + switch (priv->avfs.AvfsBtcStatus) { + case AVFS_BTC_COMPLETED_SAVED: /*S3 State - Pre SMU Start */ + priv->avfs.AvfsBtcStatus = AVFS_BTC_RESTOREVFT_FAILED; + PP_ASSERT_WITH_CODE(0 == fiji_restore_vft_table(smumgr), + "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics " + "Level table over to SMU", + return -1;); + priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED; + break; + case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/ + priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; + PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, + PPSMC_MSG_VftTableIsValid), + "[AVFS][fiji_avfs_event_mgr] SMU did not respond " + "correctly to VftTableIsValid Msg", + return -1;); + priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; + PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, + PPSMC_MSG_EnableAvfs), + "[AVFS][fiji_avfs_event_mgr] SMU did not respond " + "correctly to EnableAvfs Message Msg", + return -1;); + priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED; + break; + case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/ + if (!smu_started) + break; + priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED; + PP_ASSERT_WITH_CODE(0 == fiji_setup_pm_fuse_for_avfs(smumgr), + "[AVFS][fiji_avfs_event_mgr] Failure at " + "fiji_setup_pm_fuse_for_avfs", + return -1;); + priv->avfs.AvfsBtcStatus = AVFS_BTC_DPMTABLESETUP_FAILED; + PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr), + "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" + " table over to SMU", + return -1;); + priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL; + PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr), + "[AVFS][fiji_avfs_event_mgr] Could not setup " + "Pwr Virus for AVFS ", + return -1;); + priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED; + PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr), + "[AVFS][fiji_avfs_event_mgr] Failure at " + "fiji_start_avfs_btc. AVFS Disabled", + return -1;); + priv->avfs.AvfsBtcStatus = AVFS_BTC_SAVEVFT_FAILED; + PP_ASSERT_WITH_CODE(0 == fiji_save_vft_table(smumgr), + "[AVFS][fiji_avfs_event_mgr] Could not save VFT Table", + return -1;); + priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED; + break; + case AVFS_BTC_DISABLED: /* Do nothing */ + break; + case AVFS_BTC_NOTSUPPORTED: /* Do nothing */ + break; + default: + printk(KERN_ERR "[AVFS] Something is broken. See log!"); + break; + } + return 0; +} + +static int fiji_start_smu(struct pp_smumgr *smumgr) +{ + int result = 0; + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + + /* Only start SMC if SMC RAM is not running */ + if (!fiji_is_smc_ram_running(smumgr)) { + fiji_avfs_event_mgr(smumgr, false); + + /* Check if SMU is running in protected mode */ + if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + CGS_IND_REG__SMC, + SMU_FIRMWARE, SMU_MODE)) { + result = fiji_start_smu_in_non_protection_mode(smumgr); + if (result) + return result; + } else { + result = fiji_start_smu_in_protection_mode(smumgr); + if (result) + return result; + } + fiji_avfs_event_mgr(smumgr, true); + } + + /* To initialize all clock gating before RLC loaded and running.*/ + cgs_set_clockgating_state(smumgr->device, + AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); + cgs_set_clockgating_state(smumgr->device, + AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE); + cgs_set_clockgating_state(smumgr->device, + AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE); + cgs_set_clockgating_state(smumgr->device, + AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE); + + /* Setup SoftRegsStart here for register lookup in case + * DummyBackEnd is used and ProcessFirmwareHeader is not executed + */ + fiji_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, SoftRegisters), + &(priv->soft_regs_start), 0x40000); + + result = fiji_request_smu_load_fw(smumgr); + + return result; +} + +static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) +{ + + uint32_t efuse = 0; + uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1; + + if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB, + mask, &efuse)) { + if (efuse) + return true; + } + return false; +} + +/** +* Write a 32bit value to the SMC SRAM space. +* ALL PARAMETERS ARE IN HOST BYTE ORDER. +* @param smumgr the address of the powerplay hardware manager. +* @param smc_addr the address in the SMC RAM to access. +* @param value to write to the SMC SRAM. +*/ +static int fiji_smu_init(struct pp_smumgr *smumgr) +{ + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + uint64_t mc_addr; + + priv->header_buffer.data_size = + ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + smu_allocate_memory(smumgr->device, + priv->header_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &priv->header_buffer.kaddr, + &priv->header_buffer.handle); + + priv->header = priv->header_buffer.kaddr; + priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != priv->header), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->header_buffer.handle); + return -1); + + priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT; + if (fiji_is_hw_avfs_present(smumgr)) + /* AVFS Parameter + * 0 - BTC DC disabled, BTC AC disabled + * 1 - BTC DC enabled, BTC AC disabled + * 2 - BTC DC disabled, BTC AC enabled + * 3 - BTC DC enabled, BTC AC enabled + * Default is 0 - BTC DC disabled, BTC AC disabled + */ + priv->avfs.AvfsBtcParam = 0; + else + priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED; + + priv->acpi_optimization = 1; + + return 0; +} + +static int fiji_smu_fini(struct pp_smumgr *smumgr) +{ + if (smumgr->backend) { + kfree(smumgr->backend); + smumgr->backend = NULL; + } + return 0; +} + +static const struct pp_smumgr_func fiji_smu_funcs = { + .smu_init = &fiji_smu_init, + .smu_fini = &fiji_smu_fini, + .start_smu = &fiji_start_smu, + .check_fw_load_finish = &fiji_check_fw_load_finish, + .request_smu_load_fw = &fiji_reload_firmware, + .request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load, + .send_msg_to_smc = &fiji_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, +}; + +int fiji_smum_init(struct pp_smumgr *smumgr) +{ + struct fiji_smumgr *fiji_smu = NULL; + + fiji_smu = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL); + + if (fiji_smu == NULL) + return -ENOMEM; + + smumgr->backend = fiji_smu; + smumgr->smumgr_funcs = &fiji_smu_funcs; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h new file mode 100644 index 000000000000..8cd22d9c9140 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h @@ -0,0 +1,77 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _FIJI_SMUMANAGER_H_ +#define _FIJI_SMUMANAGER_H_ + +enum AVFS_BTC_STATUS { + AVFS_BTC_BOOT = 0, + AVFS_BTC_BOOT_STARTEDSMU, + AVFS_LOAD_VIRUS, + AVFS_BTC_VIRUS_LOADED, + AVFS_BTC_VIRUS_FAIL, + AVFS_BTC_STARTED, + AVFS_BTC_FAILED, + AVFS_BTC_RESTOREVFT_FAILED, + AVFS_BTC_SAVEVFT_FAILED, + AVFS_BTC_DPMTABLESETUP_FAILED, + AVFS_BTC_COMPLETED_UNSAVED, + AVFS_BTC_COMPLETED_SAVED, + AVFS_BTC_COMPLETED_RESTORED, + AVFS_BTC_DISABLED, + AVFS_BTC_NOTSUPPORTED, + AVFS_BTC_SMUMSG_ERROR +}; + +struct fiji_smu_avfs { + enum AVFS_BTC_STATUS AvfsBtcStatus; + uint32_t AvfsBtcParam; +}; + +struct fiji_buffer_entry { + uint32_t data_size; + uint32_t mc_addr_low; + uint32_t mc_addr_high; + void *kaddr; + unsigned long handle; +}; + +struct fiji_smumgr { + uint8_t *header; + uint8_t *mec_image; + uint32_t soft_regs_start; + struct fiji_smu_avfs avfs; + uint32_t acpi_optimization; + + struct fiji_buffer_entry header_buffer; +}; + +int fiji_smum_init(struct pp_smumgr *smumgr); +int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress, + uint32_t *value, uint32_t limit); +int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, + uint32_t value, uint32_t limit); +int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress, + const uint8_t *src, uint32_t byteCount, uint32_t limit); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c new file mode 100644 index 000000000000..063ae71c9830 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -0,0 +1,263 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include "pp_instance.h" +#include "smumgr.h" +#include "cgs_common.h" +#include "linux/delay.h" +#include "cz_smumgr.h" +#include "tonga_smumgr.h" +#include "fiji_smumgr.h" + +int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) +{ + struct pp_smumgr *smumgr; + + if ((handle == NULL) || (pp_init == NULL)) + return -EINVAL; + + smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL); + if (smumgr == NULL) + return -ENOMEM; + + smumgr->device = pp_init->device; + smumgr->chip_family = pp_init->chip_family; + smumgr->chip_id = pp_init->chip_id; + smumgr->hw_revision = pp_init->rev_id; + smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; + smumgr->reload_fw = 1; + handle->smu_mgr = smumgr; + + switch (smumgr->chip_family) { + case AMD_FAMILY_CZ: + cz_smum_init(smumgr); + break; + case AMD_FAMILY_VI: + switch (smumgr->chip_id) { + case CHIP_TONGA: + tonga_smum_init(smumgr); + break; + case CHIP_FIJI: + fiji_smum_init(smumgr); + break; + default: + return -EINVAL; + } + break; + default: + kfree(smumgr); + return -EINVAL; + } + + return 0; +} + +int smum_fini(struct pp_smumgr *smumgr) +{ + kfree(smumgr); + return 0; +} + +int smum_get_argument(struct pp_smumgr *smumgr) +{ + if (NULL != smumgr->smumgr_funcs->get_argument) + return smumgr->smumgr_funcs->get_argument(smumgr); + + return 0; +} + +int smum_download_powerplay_table(struct pp_smumgr *smumgr, + void **table) +{ + if (NULL != smumgr->smumgr_funcs->download_pptable_settings) + return smumgr->smumgr_funcs->download_pptable_settings(smumgr, + table); + + return 0; +} + +int smum_upload_powerplay_table(struct pp_smumgr *smumgr) +{ + if (NULL != smumgr->smumgr_funcs->upload_pptable_settings) + return smumgr->smumgr_funcs->upload_pptable_settings(smumgr); + + return 0; +} + +int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL) + return -EINVAL; + + return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg); +} + +int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + if (smumgr == NULL || + smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) + return -EINVAL; + return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter( + smumgr, msg, parameter); +} + +/* + * Returns once the part of the register indicated by the mask has + * reached the given value. + */ +int smum_wait_on_register(struct pp_smumgr *smumgr, + uint32_t index, + uint32_t value, uint32_t mask) +{ + uint32_t i; + uint32_t cur_value; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + for (i = 0; i < smumgr->usec_timeout; i++) { + cur_value = cgs_read_register(smumgr->device, index); + if ((cur_value & mask) == (value & mask)) + break; + udelay(1); + } + + /* timeout means wrong logic*/ + if (i == smumgr->usec_timeout) + return -1; + + return 0; +} + +int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, + uint32_t index, + uint32_t value, uint32_t mask) +{ + uint32_t i; + uint32_t cur_value; + + if (smumgr == NULL) + return -EINVAL; + + for (i = 0; i < smumgr->usec_timeout; i++) { + cur_value = cgs_read_register(smumgr->device, + index); + if ((cur_value & mask) != (value & mask)) + break; + udelay(1); + } + + /* timeout means wrong logic */ + if (i == smumgr->usec_timeout) + return -1; + + return 0; +} + + +/* + * Returns once the part of the register indicated by the mask + * has reached the given value.The indirect space is described by + * giving the memory-mapped index of the indirect index register. + */ +int smum_wait_on_indirect_register(struct pp_smumgr *smumgr, + uint32_t indirect_port, + uint32_t index, + uint32_t value, + uint32_t mask) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + cgs_write_register(smumgr->device, indirect_port, index); + return smum_wait_on_register(smumgr, indirect_port + 1, + mask, value); +} + +void smum_wait_for_indirect_register_unequal( + struct pp_smumgr *smumgr, + uint32_t indirect_port, + uint32_t index, + uint32_t value, + uint32_t mask) +{ + if (smumgr == NULL || smumgr->device == NULL) + return; + cgs_write_register(smumgr->device, indirect_port, index); + smum_wait_for_register_unequal(smumgr, indirect_port + 1, + value, mask); +} + +int smu_allocate_memory(void *device, uint32_t size, + enum cgs_gpu_mem_type type, + uint32_t byte_align, uint64_t *mc_addr, + void **kptr, void *handle) +{ + int ret = 0; + cgs_handle_t cgs_handle; + + if (device == NULL || handle == NULL || + mc_addr == NULL || kptr == NULL) + return -EINVAL; + + ret = cgs_alloc_gpu_mem(device, type, size, byte_align, + 0, 0, (cgs_handle_t *)handle); + if (ret) + return -ENOMEM; + + cgs_handle = *(cgs_handle_t *)handle; + + ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr); + if (ret) + goto error_gmap; + + ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr); + if (ret) + goto error_kmap; + + return 0; + +error_kmap: + cgs_gunmap_gpu_mem(device, cgs_handle); + +error_gmap: + cgs_free_gpu_mem(device, cgs_handle); + return ret; +} + +int smu_free_memory(void *device, void *handle) +{ + cgs_handle_t cgs_handle = (cgs_handle_t)handle; + + if (device == NULL || handle == NULL) + return -EINVAL; + + cgs_kunmap_gpu_mem(device, cgs_handle); + cgs_gunmap_gpu_mem(device, cgs_handle); + cgs_free_gpu_mem(device, cgs_handle); + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c new file mode 100644 index 000000000000..ebdb43a8daef --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -0,0 +1,819 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include + +#include "smumgr.h" +#include "tonga_smumgr.h" +#include "pp_debug.h" +#include "smu_ucode_xfer_vi.h" +#include "tonga_ppsmc.h" +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" +#include "cgs_common.h" + +#define TONGA_SMC_SIZE 0x20000 +#define BUFFER_SIZE 80000 +#define MAX_STRING_SIZE 15 +#define BUFFER_SIZETWO 131072 /*128 *1024*/ + +/** +* Set the address for reading/writing the SMC SRAM space. +* @param smumgr the address of the powerplay hardware manager. +* @param smcAddress the address in the SMC RAM to access. +*/ +static int tonga_set_smc_sram_address(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t limit) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)), + "SMC address must be 4 byte aligned.", + return -1;); + + PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)), + "SMC address is beyond the SMC RAM area.", + return -1;); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); + + return 0; +} + +/** +* Copy bytes from an array into the SMC RAM space. +* +* @param smumgr the address of the powerplay SMU manager. +* @param smcStartAddress the start address in the SMC RAM to copy bytes to. +* @param src the byte array to copy the bytes from. +* @param byteCount the number of bytes to copy. +*/ +int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr, + uint32_t smcStartAddress, const uint8_t *src, + uint32_t byteCount, uint32_t limit) +{ + uint32_t addr; + uint32_t data, orig_data; + int result = 0; + uint32_t extra_shift; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), + "SMC address must be 4 byte aligned.", + return 0;); + + PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), + "SMC address is beyond the SMC RAM area.", + return 0;); + + addr = smcStartAddress; + + while (byteCount >= 4) { + /* + * Bytes are written into the + * SMC address space with the MSB first + */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + result = tonga_set_smc_sram_address(smumgr, addr, limit); + + if (result) + goto out; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + + src += 4; + byteCount -= 4; + addr += 4; + } + + if (0 != byteCount) { + /* Now write odd bytes left, do a read modify write cycle */ + data = 0; + + result = tonga_set_smc_sram_address(smumgr, addr, limit); + if (result) + goto out; + + orig_data = cgs_read_register(smumgr->device, + mmSMC_IND_DATA_0); + extra_shift = 8 * (4 - byteCount); + + while (byteCount > 0) { + data = (data << 8) + *src++; + byteCount--; + } + + data <<= extra_shift; + data |= (orig_data & ~((~0UL) << extra_shift)); + + result = tonga_set_smc_sram_address(smumgr, addr, limit); + if (result) + goto out; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + } + +out: + return result; +} + + +int tonga_program_jump_on_start(struct pp_smumgr *smumgr) +{ + static unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 }; + + tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1); + + return 0; +} + +/** +* Return if the SMC is currently running. +* +* @param smumgr the address of the powerplay hardware manager. +*/ +static int tonga_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(smumgr->device, + CGS_IND_REG__SMC, ixSMC_PC_C))); +} + +static int tonga_send_msg_to_smc_offset(struct pp_smumgr *smumgr) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + return 0; +} + +/** +* Send a message to the SMC, and wait for its response. +* +* @param smumgr the address of the powerplay hardware manager. +* @param msg the message to send. +* @return The response that came from the SMC. +*/ +static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + if (!tonga_is_smc_ram_running(smumgr)) + return -1; + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PP_ASSERT_WITH_CODE( + 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), + "Failed to send Previous Message.", + ); + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PP_ASSERT_WITH_CODE( + 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), + "Failed to send Message.", + ); + + return 0; +} + +/* +* Send a message to the SMC, and do not wait for its response. +* +* @param smumgr the address of the powerplay hardware manager. +* @param msg the message to send. +* @return The response that came from the SMC. +*/ +static int tonga_send_msg_to_smc_without_waiting + (struct pp_smumgr *smumgr, uint16_t msg) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PP_ASSERT_WITH_CODE( + 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), + "Failed to send Previous Message.", + ); + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + return 0; +} + +/* +* Send a message to the SMC with parameter +* +* @param smumgr: the address of the powerplay hardware manager. +* @param msg: the message to send. +* @param parameter: the parameter to send +* @return The response that came from the SMC. +*/ +static int tonga_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + if (!tonga_is_smc_ram_running(smumgr)) + return PPSMC_Result_Failed; + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + + return tonga_send_msg_to_smc(smumgr, msg); +} + +/* +* Send a message to the SMC with parameter, do not wait for response +* +* @param smumgr: the address of the powerplay hardware manager. +* @param msg: the message to send. +* @param parameter: the parameter to send +* @return The response that came from the SMC. +*/ +static int tonga_send_msg_to_smc_with_parameter_without_waiting( + struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + + return tonga_send_msg_to_smc_without_waiting(smumgr, msg); +} + +/* + * Read a 32bit value from the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value and output parameter for the data read from the SMC SRAM. + */ +int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t *value, + uint32_t limit) +{ + int result; + + result = tonga_set_smc_sram_address(smumgr, smcAddress, limit); + + if (0 != result) + return result; + + *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + + return 0; +} + +/* + * Write a 32bit value to the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value to write to the SMC SRAM. + */ +int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t value, + uint32_t limit) +{ + int result; + + result = tonga_set_smc_sram_address(smumgr, smcAddress, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); + + return 0; +} + +static int tonga_smu_fini(struct pp_smumgr *smumgr) +{ + if (smumgr->backend != NULL) { + kfree(smumgr->backend); + smumgr->backend = NULL; + } + return 0; +} + +static enum cgs_ucode_id tonga_convert_fw_type_to_cgs(uint32_t fw_type) +{ + enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case UCODE_ID_SMU: + result = CGS_UCODE_ID_SMU; + break; + case UCODE_ID_SDMA0: + result = CGS_UCODE_ID_SDMA0; + break; + case UCODE_ID_SDMA1: + result = CGS_UCODE_ID_SDMA1; + break; + case UCODE_ID_CP_CE: + result = CGS_UCODE_ID_CP_CE; + break; + case UCODE_ID_CP_PFP: + result = CGS_UCODE_ID_CP_PFP; + break; + case UCODE_ID_CP_ME: + result = CGS_UCODE_ID_CP_ME; + break; + case UCODE_ID_CP_MEC: + result = CGS_UCODE_ID_CP_MEC; + break; + case UCODE_ID_CP_MEC_JT1: + result = CGS_UCODE_ID_CP_MEC_JT1; + break; + case UCODE_ID_CP_MEC_JT2: + result = CGS_UCODE_ID_CP_MEC_JT2; + break; + case UCODE_ID_RLC_G: + result = CGS_UCODE_ID_RLC_G; + break; + default: + break; + } + + return result; +} + +/** + * Convert the PPIRI firmware type to SMU type mask. + * For MEC, we need to check all MEC related type +*/ +static uint16_t tonga_get_mask_for_firmware_type(uint16_t firmwareType) +{ + uint16_t result = 0; + + switch (firmwareType) { + case UCODE_ID_SDMA0: + result = UCODE_ID_SDMA0_MASK; + break; + case UCODE_ID_SDMA1: + result = UCODE_ID_SDMA1_MASK; + break; + case UCODE_ID_CP_CE: + result = UCODE_ID_CP_CE_MASK; + break; + case UCODE_ID_CP_PFP: + result = UCODE_ID_CP_PFP_MASK; + break; + case UCODE_ID_CP_ME: + result = UCODE_ID_CP_ME_MASK; + break; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + case UCODE_ID_CP_MEC_JT2: + result = UCODE_ID_CP_MEC_MASK; + break; + case UCODE_ID_RLC_G: + result = UCODE_ID_RLC_G_MASK; + break; + default: + break; + } + + return result; +} + +/** + * Check if the FW has been loaded, + * SMU will not return if loading has not finished. +*/ +static int tonga_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType) +{ + uint16_t fwMask = tonga_get_mask_for_firmware_type(fwType); + + if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, + SOFT_REGISTERS_TABLE_28, fwMask, fwMask)) { + printk(KERN_ERR "[ powerplay ] check firmware loading failed\n"); + return -EINVAL; + } + + return 0; +} + +/* Populate one firmware image to the data structure */ +static int tonga_populate_single_firmware_entry(struct pp_smumgr *smumgr, + uint16_t firmware_type, + struct SMU_Entry *pentry) +{ + int result; + struct cgs_firmware_info info = {0}; + + result = cgs_get_firmware_info( + smumgr->device, + tonga_convert_fw_type_to_cgs(firmware_type), + &info); + + if (result == 0) { + pentry->version = 0; + pentry->id = (uint16_t)firmware_type; + pentry->image_addr_high = smu_upper_32_bits(info.mc_addr); + pentry->image_addr_low = smu_lower_32_bits(info.mc_addr); + pentry->meta_data_addr_high = 0; + pentry->meta_data_addr_low = 0; + pentry->data_size_byte = info.image_size; + pentry->num_register_entries = 0; + + if (firmware_type == UCODE_ID_RLC_G) + pentry->flags = 1; + else + pentry->flags = 0; + } else { + return result; + } + + return result; +} + +static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr) +{ + struct tonga_smumgr *tonga_smu = + (struct tonga_smumgr *)(smumgr->backend); + uint16_t fw_to_load; + int result = 0; + struct SMU_DRAMData_TOC *toc; + /** + * First time this gets called during SmuMgr init, + * we haven't processed SMU header file yet, + * so Soft Register Start offset is unknown. + * However, for this case, UcodeLoadStatus is already 0, + * so we can skip this if the Soft Registers Start offset is 0. + */ + cgs_write_ind_register(smumgr->device, + CGS_IND_REG__SMC, ixSOFT_REGISTERS_TABLE_28, 0); + + tonga_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SMU_DRAM_ADDR_HI, + tonga_smu->smu_buffer.mc_addr_high); + tonga_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SMU_DRAM_ADDR_LO, + tonga_smu->smu_buffer.mc_addr_low); + + toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader; + toc->num_entries = 0; + toc->structure_version = 1; + + PP_ASSERT_WITH_CODE( + 0 == tonga_populate_single_firmware_entry(smumgr, + UCODE_ID_RLC_G, + &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", + return -1); + PP_ASSERT_WITH_CODE( + 0 == tonga_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_CE, + &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", + return -1); + PP_ASSERT_WITH_CODE( + 0 == tonga_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == tonga_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == tonga_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == tonga_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == tonga_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == tonga_populate_single_firmware_entry + (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == tonga_populate_single_firmware_entry + (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + + tonga_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DRV_DRAM_ADDR_HI, + tonga_smu->header_buffer.mc_addr_high); + tonga_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DRV_DRAM_ADDR_LO, + tonga_smu->header_buffer.mc_addr_low); + + fw_to_load = UCODE_ID_RLC_G_MASK + + UCODE_ID_SDMA0_MASK + + UCODE_ID_SDMA1_MASK + + UCODE_ID_CP_CE_MASK + + UCODE_ID_CP_ME_MASK + + UCODE_ID_CP_PFP_MASK + + UCODE_ID_CP_MEC_MASK; + + PP_ASSERT_WITH_CODE( + 0 == tonga_send_msg_to_smc_with_parameter_without_waiting( + smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), + "Fail to Request SMU Load uCode", return 0); + + return result; +} + +static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr, + uint32_t firmwareType) +{ + return 0; +} + +/** + * Upload the SMC firmware to the SMC microcontroller. + * + * @param smumgr the address of the powerplay hardware manager. + * @param pFirmware the data structure containing the various sections of the firmware. + */ +static int tonga_smu_upload_firmware_image(struct pp_smumgr *smumgr) +{ + const uint8_t *src; + uint32_t byte_count; + uint32_t *data; + struct cgs_firmware_info info = {0}; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + cgs_get_firmware_info(smumgr->device, + tonga_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); + + if (info.image_size & 3) { + printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (info.image_size > TONGA_SMC_SIZE) { + printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + + byte_count = info.image_size; + src = (const uint8_t *)info.kptr; + + data = (uint32_t *)src; + for (; byte_count >= 4; data++, byte_count -= 4) + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]); + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + return 0; +} + +static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) +{ + int result; + + /* Assert reset */ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 1); + + result = tonga_smu_upload_firmware_image(smumgr); + if (result) + return result; + + /* Clear status */ + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMU_STATUS, 0); + + /* Enable clock */ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + /* De-assert reset */ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + /* Set SMU Auto Start */ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMU_INPUT_DATA, AUTO_START, 1); + + /* Clear firmware interrupt enable flag */ + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixFIRMWARE_FLAGS, 0); + + SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); + + /** + * Call Test SMU message with 0x20000 offset to trigger SMU start + */ + tonga_send_msg_to_smc_offset(smumgr); + + /* Wait for done bit to be set */ + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + SMU_STATUS, SMU_DONE, 0); + + /* Check pass/failed indicator */ + if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) { + printk(KERN_ERR "[ powerplay ] SMU Firmware start failed\n"); + return -EINVAL; + } + + /* Wait for firmware to initialize */ + SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + + return 0; +} + + +static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr) +{ + int result = 0; + + /* wait for smc boot up */ + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + RCU_UC_EVENTS, boot_seq_done, 0); + + /*Clear firmware interrupt enable flag*/ + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixFIRMWARE_FLAGS, 0); + + + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 1); + + result = tonga_smu_upload_firmware_image(smumgr); + + if (result != 0) + return result; + + /* Set smc instruct start point at 0x0 */ + tonga_program_jump_on_start(smumgr); + + + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + /*De-assert reset*/ + SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + /* Wait for firmware to initialize */ + SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + + return result; +} + +static int tonga_start_smu(struct pp_smumgr *smumgr) +{ + int result; + + /* Only start SMC if SMC RAM is not running */ + if (!tonga_is_smc_ram_running(smumgr)) { + /*Check if SMU is running in protected mode*/ + if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMU_FIRMWARE, SMU_MODE)) { + result = tonga_start_in_non_protection_mode(smumgr); + if (result) + return result; + } else { + result = tonga_start_in_protection_mode(smumgr); + if (result) + return result; + } + } + + result = tonga_request_smu_reload_fw(smumgr); + + return result; +} + +/** + * Write a 32bit value to the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value to write to the SMC SRAM. + */ +static int tonga_smu_init(struct pp_smumgr *smumgr) +{ + struct tonga_smumgr *tonga_smu; + uint8_t *internal_buf; + uint64_t mc_addr = 0; + /* Allocate memory for backend private data */ + tonga_smu = (struct tonga_smumgr *)(smumgr->backend); + tonga_smu->header_buffer.data_size = + ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + tonga_smu->smu_buffer.data_size = 200*4096; + + smu_allocate_memory(smumgr->device, + tonga_smu->header_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &tonga_smu->header_buffer.kaddr, + &tonga_smu->header_buffer.handle); + + tonga_smu->pHeader = tonga_smu->header_buffer.kaddr; + tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)tonga_smu->header_buffer.handle); + return -1); + + smu_allocate_memory(smumgr->device, + tonga_smu->smu_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &tonga_smu->smu_buffer.kaddr, + &tonga_smu->smu_buffer.handle); + + internal_buf = tonga_smu->smu_buffer.kaddr; + tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != internal_buf), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)tonga_smu->smu_buffer.handle); + return -1;); + + return 0; +} + +static const struct pp_smumgr_func tonga_smu_funcs = { + .smu_init = &tonga_smu_init, + .smu_fini = &tonga_smu_fini, + .start_smu = &tonga_start_smu, + .check_fw_load_finish = &tonga_check_fw_load_finish, + .request_smu_load_fw = &tonga_request_smu_reload_fw, + .request_smu_load_specific_fw = &tonga_request_smu_load_specific_fw, + .send_msg_to_smc = &tonga_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, +}; + +int tonga_smum_init(struct pp_smumgr *smumgr) +{ + struct tonga_smumgr *tonga_smu = NULL; + + tonga_smu = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL); + + if (tonga_smu == NULL) + return -ENOMEM; + + smumgr->backend = tonga_smu; + smumgr->smumgr_funcs = &tonga_smu_funcs; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h new file mode 100644 index 000000000000..33c788d7f05c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h @@ -0,0 +1,53 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _TONGA_SMUMGR_H_ +#define _TONGA_SMUMGR_H_ + +struct tonga_buffer_entry { + uint32_t data_size; + uint32_t mc_addr_low; + uint32_t mc_addr_high; + void *kaddr; + unsigned long handle; +}; + +struct tonga_smumgr { + uint8_t *pHeader; + uint8_t *pMecImage; + uint32_t ulSoftRegsStart; + + struct tonga_buffer_entry header_buffer; + struct tonga_buffer_entry smu_buffer; +}; + +extern int tonga_smum_init(struct pp_smumgr *smumgr); +extern int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr, + uint32_t smcStartAddress, const uint8_t *src, + uint32_t byteCount, uint32_t limit); +extern int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress, + uint32_t *value, uint32_t limit); +extern int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress, + uint32_t value, uint32_t limit); + +#endif diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 3a4820e863ec..8b2becd1aa07 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -47,6 +47,8 @@ static void amd_sched_rq_init(struct amd_sched_rq *rq) static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, struct amd_sched_entity *entity) { + if (!list_empty(&entity->list)) + return; spin_lock(&rq->lock); list_add_tail(&entity->list, &rq->entities); spin_unlock(&rq->lock); @@ -55,6 +57,8 @@ static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, struct amd_sched_entity *entity) { + if (list_empty(&entity->list)) + return; spin_lock(&rq->lock); list_del_init(&entity->list); if (rq->current_entity == entity) @@ -138,9 +142,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, atomic_set(&entity->fence_seq, 0); entity->fence_context = fence_context_alloc(1); - /* Add the entity to the run queue */ - amd_sched_rq_add_entity(rq, entity); - return 0; } @@ -302,9 +303,11 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) spin_unlock(&entity->queue_lock); /* first job wakes up scheduler */ - if (first) + if (first) { + /* Add the entity to the run queue */ + amd_sched_rq_add_entity(entity->rq, entity); amd_sched_wakeup(sched); - + } return added; } @@ -349,14 +352,17 @@ static struct amd_sched_entity * amd_sched_select_entity(struct amd_gpu_scheduler *sched) { struct amd_sched_entity *entity; + int i; if (!amd_sched_ready(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - entity = amd_sched_rq_select_entity(&sched->kernel_rq); - if (entity == NULL) - entity = amd_sched_rq_select_entity(&sched->sched_rq); + for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) { + entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); + if (entity) + break; + } return entity; } @@ -478,12 +484,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, struct amd_sched_backend_ops *ops, unsigned hw_submission, long timeout, const char *name) { + int i; sched->ops = ops; sched->hw_submission_limit = hw_submission; sched->name = name; sched->timeout = timeout; - amd_sched_rq_init(&sched->sched_rq); - amd_sched_rq_init(&sched->kernel_rq); + for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) + amd_sched_rq_init(&sched->sched_rq[i]); init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index a0f0ae53aacd..9403145d7bee 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -104,6 +104,12 @@ struct amd_sched_backend_ops { struct fence *(*run_job)(struct amd_sched_job *sched_job); }; +enum amd_sched_priority { + AMD_SCHED_PRIORITY_KERNEL = 0, + AMD_SCHED_PRIORITY_NORMAL, + AMD_SCHED_MAX_PRIORITY +}; + /** * One scheduler is implemented for each hardware ring */ @@ -112,8 +118,7 @@ struct amd_gpu_scheduler { uint32_t hw_submission_limit; long timeout; const char *name; - struct amd_sched_rq sched_rq; - struct amd_sched_rq kernel_rq; + struct amd_sched_rq sched_rq[AMD_SCHED_MAX_PRIORITY]; wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; atomic_t hw_rq_count; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index cebcab560626..0293eb74d777 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -928,11 +928,10 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc, } } - mutex_lock(&dev->struct_mutex); if (dcrtc->cursor_obj) { dcrtc->cursor_obj->update = NULL; dcrtc->cursor_obj->update_data = NULL; - drm_gem_object_unreference(&dcrtc->cursor_obj->obj); + drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj); } dcrtc->cursor_obj = obj; dcrtc->cursor_w = w; @@ -942,14 +941,12 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc, obj->update_data = dcrtc; obj->update = cursor_update; } - mutex_unlock(&dev->struct_mutex); return ret; } static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { - struct drm_device *dev = crtc->dev; struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); int ret; @@ -957,11 +954,9 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) if (!dcrtc->variant->has_spu_adv_reg) return -EFAULT; - mutex_lock(&dev->struct_mutex); dcrtc->cursor_x = x; dcrtc->cursor_y = y; ret = armada_drm_crtc_cursor_update(dcrtc, false); - mutex_unlock(&dev->struct_mutex); return ret; } @@ -972,7 +967,7 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc) struct armada_private *priv = crtc->dev->dev_private; if (dcrtc->cursor_obj) - drm_gem_object_unreference(&dcrtc->cursor_obj->obj); + drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj); priv->dcrtc[dcrtc->num] = NULL; drm_crtc_cleanup(&dcrtc->crtc); @@ -1074,7 +1069,7 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc, return 0; } -static struct drm_crtc_funcs armada_crtc_funcs = { +static const struct drm_crtc_funcs armada_crtc_funcs = { .cursor_set = armada_drm_crtc_cursor_set, .cursor_move = armada_drm_crtc_cursor_move, .destroy = armada_drm_crtc_destroy, @@ -1216,14 +1211,14 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, &armada_primary_plane_funcs, armada_primary_formats, ARRAY_SIZE(armada_primary_formats), - DRM_PLANE_TYPE_PRIMARY); + DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); return ret; } ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, - &armada_crtc_funcs); + &armada_crtc_funcs, NULL); if (ret) goto err_crtc_init; diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c index 471e45627f1e..d4f7ab0a30d4 100644 --- a/drivers/gpu/drm/armada/armada_debugfs.c +++ b/drivers/gpu/drm/armada/armada_debugfs.c @@ -21,9 +21,9 @@ static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data) struct armada_private *priv = dev->dev_private; int ret; - mutex_lock(&dev->struct_mutex); + mutex_lock(&priv->linear_lock); ret = drm_mm_dump_table(m, &priv->linear); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&priv->linear_lock); return ret; } diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h index 4df6f2af2b21..3b2bb6128d40 100644 --- a/drivers/gpu/drm/armada/armada_drm.h +++ b/drivers/gpu/drm/armada/armada_drm.h @@ -57,7 +57,8 @@ struct armada_private { DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8); struct drm_fb_helper *fbdev; struct armada_crtc *dcrtc[2]; - struct drm_mm linear; + struct drm_mm linear; /* protected by linear_lock */ + struct mutex linear_lock; struct drm_property *csc_yuv_prop; struct drm_property *csc_rgb_prop; struct drm_property *colorkey_prop; diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 77ab93d60125..3bd7e1cde99e 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -102,6 +102,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags) dev->mode_config.preferred_depth = 24; dev->mode_config.funcs = &armada_drm_mode_config_funcs; drm_mm_init(&priv->linear, mem->start, resource_size(mem)); + mutex_init(&priv->linear_lock); ret = component_bind_all(dev->dev, dev); if (ret) diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 60a688ef81c7..6e731db31aa4 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -46,22 +46,26 @@ static size_t roundup_gem_size(size_t size) return roundup(size, PAGE_SIZE); } -/* dev->struct_mutex is held here */ void armada_gem_free_object(struct drm_gem_object *obj) { struct armada_gem_object *dobj = drm_to_armada_gem(obj); + struct armada_private *priv = obj->dev->dev_private; DRM_DEBUG_DRIVER("release obj %p\n", dobj); drm_gem_free_mmap_offset(&dobj->obj); + might_lock(&priv->linear_lock); + if (dobj->page) { /* page backed memory */ unsigned int order = get_order(dobj->obj.size); __free_pages(dobj->page, order); } else if (dobj->linear) { /* linear backed memory */ + mutex_lock(&priv->linear_lock); drm_mm_remove_node(dobj->linear); + mutex_unlock(&priv->linear_lock); kfree(dobj->linear); if (dobj->addr) iounmap(dobj->addr); @@ -144,10 +148,10 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) if (!node) return -ENOSPC; - mutex_lock(&dev->struct_mutex); + mutex_lock(&priv->linear_lock); ret = drm_mm_insert_node(&priv->linear, node, size, align, DRM_MM_SEARCH_DEFAULT); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&priv->linear_lock); if (ret) { kfree(node); return ret; @@ -158,9 +162,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) /* Ensure that the memory we're returning is cleared. */ ptr = ioremap_wc(obj->linear->start, size); if (!ptr) { - mutex_lock(&dev->struct_mutex); + mutex_lock(&priv->linear_lock); drm_mm_remove_node(obj->linear); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&priv->linear_lock); kfree(obj->linear); obj->linear = NULL; return -ENOMEM; @@ -274,18 +278,16 @@ int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, struct armada_gem_object *obj; int ret = 0; - mutex_lock(&dev->struct_mutex); obj = armada_gem_object_lookup(dev, file, handle); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); - ret = -EINVAL; - goto err_unlock; + return -EINVAL; } /* Don't allow imported objects to be mapped */ if (obj->obj.import_attach) { ret = -EINVAL; - goto err_unlock; + goto err_unref; } ret = drm_gem_create_mmap_offset(&obj->obj); @@ -294,9 +296,8 @@ int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset); } - drm_gem_object_unreference(&obj->obj); - err_unlock: - mutex_unlock(&dev->struct_mutex); + err_unref: + drm_gem_object_unreference_unlocked(&obj->obj); return ret; } @@ -352,13 +353,13 @@ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data, return -ENOENT; if (!dobj->obj.filp) { - drm_gem_object_unreference(&dobj->obj); + drm_gem_object_unreference_unlocked(&dobj->obj); return -EINVAL; } addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); - drm_gem_object_unreference(&dobj->obj); + drm_gem_object_unreference_unlocked(&dobj->obj); if (IS_ERR_VALUE(addr)) return addr; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 5c22b380f8f3..148e8a42b2c6 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -460,7 +460,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) &armada_ovl_plane_funcs, armada_ovl_formats, ARRAY_SIZE(armada_ovl_formats), - DRM_PLANE_TYPE_OVERLAY); + DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) { kfree(dplane); return ret; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 69d19f3304a5..0123458cbd83 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -751,7 +751,7 @@ static int ast_encoder_init(struct drm_device *dev) return -ENOMEM; drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs); ast_encoder->base.possible_crtcs = 1; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 9f6e234e7029..468a14f266a7 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -344,7 +344,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev) ret = drm_crtc_init_with_planes(dev, &crtc->base, &planes->primary->base, planes->cursor ? &planes->cursor->base : NULL, - &atmel_hlcdc_crtc_funcs); + &atmel_hlcdc_crtc_funcs, NULL); if (ret < 0) goto fail; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 816895447155..a45b32ba029e 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -332,6 +332,10 @@ static const struct of_device_id atmel_hlcdc_of_match[] = { .compatible = "atmel,at91sam9x5-hlcdc", .data = &atmel_hlcdc_dc_at91sam9x5, }, + { + .compatible = "atmel,sama5d2-hlcdc", + .data = &atmel_hlcdc_dc_sama5d4, + }, { .compatible = "atmel,sama5d3-hlcdc", .data = &atmel_hlcdc_dc_sama5d3, @@ -342,6 +346,7 @@ static const struct of_device_id atmel_hlcdc_of_match[] = { }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match); int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc, struct drm_display_mode *mode) @@ -733,10 +738,6 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev) if (!ddev) return -ENOMEM; - ret = drm_dev_set_unique(ddev, dev_name(ddev->dev)); - if (ret) - goto err_unref; - ret = atmel_hlcdc_dc_load(ddev); if (ret) goto err_unref; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 067e4c144bd6..0f7ec016e7a9 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -146,7 +146,7 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder, cfg); } -static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = { +static const struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = { .mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup, .mode_set = atmel_hlcdc_rgb_encoder_mode_set, .disable = atmel_hlcdc_panel_encoder_disable, @@ -192,7 +192,7 @@ atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector) return &rgb->encoder; } -static struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = { +static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = { .get_modes = atmel_hlcdc_panel_get_modes, .mode_valid = atmel_hlcdc_rgb_mode_valid, .best_encoder = atmel_hlcdc_rgb_best_encoder, @@ -256,7 +256,7 @@ static int atmel_hlcdc_create_panel_output(struct drm_device *dev, &atmel_hlcdc_panel_encoder_helper_funcs); ret = drm_encoder_init(dev, &panel->base.encoder, &atmel_hlcdc_panel_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index d0299aed517e..1ffe9c329c46 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -941,7 +941,7 @@ atmel_hlcdc_plane_create(struct drm_device *dev, ret = drm_universal_plane_init(dev, &plane->base, 0, &layer_plane_funcs, desc->formats->formats, - desc->formats->nformats, type); + desc->formats->nformats, type, NULL); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 26bcd03a8cb6..2849f1b95eec 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -119,7 +119,7 @@ static int bochs_crtc_page_flip(struct drm_crtc *crtc, bochs_crtc_mode_set_base(crtc, 0, 0, old_fb); if (event) { spin_lock_irqsave(&bochs->dev->event_lock, irqflags); - drm_send_vblank_event(bochs->dev, -1, event); + drm_crtc_send_vblank_event(crtc, event); spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags); } return 0; @@ -196,7 +196,7 @@ static void bochs_encoder_init(struct drm_device *dev) encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs); } @@ -245,13 +245,13 @@ static enum drm_connector_status bochs_connector_detect(struct drm_connector return connector_status_connected; } -struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { +static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { .get_modes = bochs_connector_get_modes, .mode_valid = bochs_connector_mode_valid, .best_encoder = bochs_connector_best_encoder, }; -struct drm_connector_funcs bochs_connector_connector_funcs = { +static const struct drm_connector_funcs bochs_connector_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = bochs_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -283,7 +283,7 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; - bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs; + bochs->dev->mode_config.funcs = &bochs_mode_funcs; bochs_crtc_init(bochs->dev); bochs_encoder_init(bochs->dev); diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 6dddd392aa42..27e2022de89d 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -22,7 +22,6 @@ config DRM_DW_HDMI_AHB_AUDIO Designware HDMI block. This is used in conjunction with the i.MX6 HDMI driver. - config DRM_NXP_PTN3460 tristate "NXP PTN3460 DP/LVDS bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index d4e28beec30e..f13c33d67c03 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -1,6 +1,6 @@ ccflags-y := -Iinclude/drm -obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o -obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw_hdmi-ahb-audio.o +obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o +obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o diff --git a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c similarity index 99% rename from drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c rename to drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c index 59f630f1c61a..122bb015f4a9 100644 --- a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c @@ -21,7 +21,7 @@ #include #include -#include "dw_hdmi-audio.h" +#include "dw-hdmi-audio.h" #define DRIVER_NAME "dw-hdmi-ahb-audio" diff --git a/drivers/gpu/drm/bridge/dw_hdmi-audio.h b/drivers/gpu/drm/bridge/dw-hdmi-audio.h similarity index 100% rename from drivers/gpu/drm/bridge/dw_hdmi-audio.h rename to drivers/gpu/drm/bridge/dw-hdmi-audio.h diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c similarity index 98% rename from drivers/gpu/drm/bridge/dw_hdmi.c rename to drivers/gpu/drm/bridge/dw-hdmi.c index 56de9f1c95fc..b0aac4733020 100644 --- a/drivers/gpu/drm/bridge/dw_hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -22,13 +22,14 @@ #include #include +#include #include #include #include #include -#include "dw_hdmi.h" -#include "dw_hdmi-audio.h" +#include "dw-hdmi.h" +#include "dw-hdmi-audio.h" #define HDMI_EDID_LEN 512 @@ -1514,7 +1515,7 @@ static void dw_hdmi_connector_force(struct drm_connector *connector) mutex_unlock(&hdmi->mutex); } -static struct drm_connector_funcs dw_hdmi_connector_funcs = { +static const struct drm_connector_funcs dw_hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = dw_hdmi_connector_detect, @@ -1522,13 +1523,24 @@ static struct drm_connector_funcs dw_hdmi_connector_funcs = { .force = dw_hdmi_connector_force, }; -static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { +static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = dw_hdmi_connector_detect, + .destroy = dw_hdmi_connector_destroy, + .force = dw_hdmi_connector_force, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { .get_modes = dw_hdmi_connector_get_modes, .mode_valid = dw_hdmi_connector_mode_valid, .best_encoder = dw_hdmi_connector_best_encoder, }; -static struct drm_bridge_funcs dw_hdmi_bridge_funcs = { +static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { .enable = dw_hdmi_bridge_enable, .disable = dw_hdmi_bridge_disable, .pre_enable = dw_hdmi_bridge_nop, @@ -1645,10 +1657,15 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi) drm_connector_helper_add(&hdmi->connector, &dw_hdmi_connector_helper_funcs); - drm_connector_init(drm, &hdmi->connector, &dw_hdmi_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); - hdmi->connector.encoder = encoder; + if (drm_core_check_feature(drm, DRIVER_ATOMIC)) + drm_connector_init(drm, &hdmi->connector, + &dw_hdmi_atomic_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + else + drm_connector_init(drm, &hdmi->connector, + &dw_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); drm_mode_connector_attach_encoder(&hdmi->connector, encoder); diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h similarity index 100% rename from drivers/gpu/drm/bridge/dw_hdmi.h rename to drivers/gpu/drm/bridge/dw-hdmi.h diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 0ffa3a6a206a..7ecd59f70b8e 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -242,7 +242,7 @@ static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) return ptn_bridge->bridge.encoder; } -static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { +static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { .get_modes = ptn3460_get_modes, .best_encoder = ptn3460_best_encoder, }; @@ -258,7 +258,7 @@ static void ptn3460_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs ptn3460_connector_funcs = { +static const struct drm_connector_funcs ptn3460_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = ptn3460_detect, @@ -299,7 +299,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge) return ret; } -static struct drm_bridge_funcs ptn3460_bridge_funcs = { +static const struct drm_bridge_funcs ptn3460_bridge_funcs = { .pre_enable = ptn3460_pre_enable, .enable = ptn3460_enable, .disable = ptn3460_disable, diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 61385f2298bf..4a02854a6963 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -489,7 +489,7 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev) encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs); return encoder; @@ -533,12 +533,12 @@ static void cirrus_connector_destroy(struct drm_connector *connector) kfree(connector); } -struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = { +static const struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = { .get_modes = cirrus_vga_get_modes, .best_encoder = cirrus_connector_best_encoder, }; -struct drm_connector_funcs cirrus_vga_connector_funcs = { +static const struct drm_connector_funcs cirrus_vga_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = cirrus_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index ef5f7663a718..3f74193885f1 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -288,8 +288,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, state->crtcs[index] = crtc; crtc_state->state = state; - DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n", - crtc->base.id, crtc_state, state); + DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", + crtc->base.id, crtc->name, crtc_state, state); return crtc_state; } @@ -429,11 +429,20 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, } EXPORT_SYMBOL(drm_atomic_crtc_set_property); -/* +/** + * drm_atomic_crtc_get_property - get property value from CRTC state + * @crtc: the drm CRTC to set a property on + * @state: the state object to get the property value from + * @property: the property to set + * @val: return location for the property value + * * This function handles generic/core properties and calls out to * driver's ->atomic_get_property() for driver properties. To ensure * consistent behavior you must call this function rather than the * driver hook directly. + * + * RETURNS: + * Zero on success, error code on failure */ static int drm_atomic_crtc_get_property(struct drm_crtc *crtc, @@ -477,8 +486,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, */ if (state->active && !state->enable) { - DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -487,14 +496,30 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, * be able to trigger. */ if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(state->enable && !state->mode_blob)) { - DRM_DEBUG_ATOMIC("[CRTC:%d] enabled without mode blob\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", + crtc->base.id, crtc->name); return -EINVAL; } if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(!state->enable && state->mode_blob)) { - DRM_DEBUG_ATOMIC("[CRTC:%d] disabled with mode blob\n", + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + + /* + * Reject event generation for when a CRTC is off and stays off. + * It wouldn't be hard to implement this, but userspace has a track + * record of happily burning through 100% cpu (or worse, crash) when the + * display pipe is suspended. To avoid all that fun just reject updates + * that ask for events since likely that indicates a bug in the + * compositor's drawing loop. This is consistent with the vblank IOCTL + * and legacy page_flip IOCTL which also reject service on a disabled + * pipe. + */ + if (state->event && !state->active && !crtc->state->active) { + DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n", crtc->base.id); return -EINVAL; } @@ -540,8 +565,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, state->planes[index] = plane; plane_state->state = state; - DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n", - plane->base.id, plane_state, state); + DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", + plane->base.id, plane->name, plane_state, state); if (plane_state->crtc) { struct drm_crtc_state *crtc_state; @@ -616,11 +641,20 @@ int drm_atomic_plane_set_property(struct drm_plane *plane, } EXPORT_SYMBOL(drm_atomic_plane_set_property); -/* +/** + * drm_atomic_plane_get_property - get property value from plane state + * @plane: the drm plane to set a property on + * @state: the state object to get the property value from + * @property: the property to set + * @val: return location for the property value + * * This function handles generic/core properties and calls out to * driver's ->atomic_get_property() for driver properties. To ensure * consistent behavior you must call this function rather than the * driver hook directly. + * + * RETURNS: + * Zero on success, error code on failure */ static int drm_atomic_plane_get_property(struct drm_plane *plane, @@ -752,8 +786,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane, } if (plane_switching_crtc(state->state, plane, state)) { - DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", + plane->base.id, plane->name); return -EINVAL; } @@ -872,11 +906,20 @@ int drm_atomic_connector_set_property(struct drm_connector *connector, } EXPORT_SYMBOL(drm_atomic_connector_set_property); -/* +/** + * drm_atomic_connector_get_property - get property value from connector state + * @connector: the drm connector to set a property on + * @state: the state object to get the property value from + * @property: the property to set + * @val: return location for the property value + * * This function handles generic/core properties and calls out to * driver's ->atomic_get_property() for driver properties. To ensure * consistent behavior you must call this function rather than the * driver hook directly. + * + * RETURNS: + * Zero on success, error code on failure */ static int drm_atomic_connector_get_property(struct drm_connector *connector, @@ -977,8 +1020,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, } if (crtc) - DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n", - plane_state, crtc->base.id); + DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n", + plane_state, crtc->base.id, crtc->name); else DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", plane_state); @@ -1036,17 +1079,28 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, { struct drm_crtc_state *crtc_state; + if (conn_state->crtc && conn_state->crtc != crtc) { + crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state, + conn_state->crtc); + + crtc_state->connector_mask &= + ~(1 << drm_connector_index(conn_state->connector)); + } + if (crtc) { crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); + + crtc_state->connector_mask |= + 1 << drm_connector_index(conn_state->connector); } conn_state->crtc = crtc; if (crtc) - DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n", - conn_state, crtc->base.id); + DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n", + conn_state, crtc->base.id, crtc->name); else DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", conn_state); @@ -1085,8 +1139,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, if (ret) return ret; - DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n", - crtc->base.id, state); + DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", + crtc->base.id, crtc->name, state); /* * Changed connectors are already in @state, so only need to look at the @@ -1144,35 +1198,6 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state, } EXPORT_SYMBOL(drm_atomic_add_affected_planes); -/** - * drm_atomic_connectors_for_crtc - count number of connected outputs - * @state: atomic state - * @crtc: DRM crtc - * - * This function counts all connectors which will be connected to @crtc - * according to @state. Useful to recompute the enable state for @crtc. - */ -int -drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, - struct drm_crtc *crtc) -{ - struct drm_connector *connector; - struct drm_connector_state *conn_state; - - int i, num_connected_connectors = 0; - - for_each_connector_in_state(state, connector, conn_state, i) { - if (conn_state->crtc == crtc) - num_connected_connectors++; - } - - DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n", - state, num_connected_connectors, crtc->base.id); - - return num_connected_connectors; -} -EXPORT_SYMBOL(drm_atomic_connectors_for_crtc); - /** * drm_atomic_legacy_backoff - locking backoff for legacy ioctls * @state: atomic state @@ -1220,8 +1245,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_plane_in_state(state, plane, plane_state, i) { ret = drm_atomic_plane_check(plane, plane_state); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", + plane->base.id, plane->name); return ret; } } @@ -1229,8 +1254,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_crtc_in_state(state, crtc, crtc_state, i) { ret = drm_atomic_crtc_check(crtc, crtc_state); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", + crtc->base.id, crtc->name); return ret; } } @@ -1241,8 +1266,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) if (!state->allow_modeset) { for_each_crtc_in_state(state, crtc, crtc_state, i) { if (drm_atomic_crtc_needs_modeset(crtc_state)) { - DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", + crtc->base.id, crtc->name); return -EINVAL; } } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 74a5fc4deef6..57cccd68ca52 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -52,6 +52,12 @@ * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the * various functions to implement set_property callbacks. New drivers must not * implement these functions themselves but must use the provided helpers. + * + * The atomic helper uses the same function table structures as all other + * modesetting helpers. See the documentation for struct &drm_crtc_helper_funcs, + * struct &drm_encoder_helper_funcs and struct &drm_connector_helper_funcs. It + * also shares the struct &drm_plane_helper_funcs function table with the plane + * helpers. */ static void drm_atomic_helper_plane_changed(struct drm_atomic_state *state, @@ -82,8 +88,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, static bool check_pending_encoder_assignment(struct drm_atomic_state *state, - struct drm_encoder *new_encoder, - struct drm_connector *new_connector) + struct drm_encoder *new_encoder) { struct drm_connector *connector; struct drm_connector_state *conn_state; @@ -137,9 +142,9 @@ steal_encoder(struct drm_atomic_state *state, */ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", encoder->base.id, encoder->name, - encoder_crtc->base.id); + encoder_crtc->base.id, encoder_crtc->name); crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc); if (IS_ERR(crtc_state)) @@ -240,17 +245,18 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } if (new_encoder == connector_state->best_encoder) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", connector->base.id, connector->name, new_encoder->base.id, new_encoder->name, - connector_state->crtc->base.id); + connector_state->crtc->base.id, + connector_state->crtc->name); return 0; } - if (!check_pending_encoder_assignment(state, new_encoder, connector)) { + if (!check_pending_encoder_assignment(state, new_encoder)) { DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n", connector->base.id, connector->name); @@ -279,12 +285,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) crtc_state = state->crtc_states[idx]; crtc_state->connectors_changed = true; - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", connector->base.id, connector->name, new_encoder->base.id, new_encoder->name, - connector_state->crtc->base.id); + connector_state->crtc->base.id, + connector_state->crtc->name); return 0; } @@ -368,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state) ret = funcs->mode_fixup(crtc, &crtc_state->mode, &crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n", + crtc->base.id, crtc->name); return -EINVAL; } } @@ -416,14 +423,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, for_each_crtc_in_state(state, crtc, crtc_state, i) { if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) { - DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", + crtc->base.id, crtc->name); crtc_state->mode_changed = true; } if (crtc->state->enable != crtc_state->enable) { - DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n", + crtc->base.id, crtc->name); /* * For clarity this assignment is done here, but @@ -456,7 +463,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, * crtc only changed its mode but has the same set of connectors. */ for_each_crtc_in_state(state, crtc, crtc_state, i) { - int num_connectors; + bool has_connectors = + !!crtc_state->connector_mask; /* * We must set ->active_changed after walking connectors for @@ -464,18 +472,18 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, * a full modeset because update_connector_routing force that. */ if (crtc->state->active != crtc_state->active) { - DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n", + crtc->base.id, crtc->name); crtc_state->active_changed = true; } if (!drm_atomic_crtc_needs_modeset(crtc_state)) continue; - DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", - crtc->base.id, + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", + crtc->base.id, crtc->name, crtc_state->enable ? 'y' : 'n', - crtc_state->active ? 'y' : 'n'); + crtc_state->active ? 'y' : 'n'); ret = drm_atomic_add_affected_connectors(state, crtc); if (ret != 0) @@ -485,12 +493,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, if (ret != 0) return ret; - num_connectors = drm_atomic_connectors_for_crtc(state, - crtc); - - if (crtc_state->enable != !!num_connectors) { - DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n", - crtc->base.id); + if (crtc_state->enable != has_connectors) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -537,8 +542,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(plane, plane_state); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", + plane->base.id, plane->name); return ret; } } @@ -553,8 +558,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(crtc, state->crtc_states[i]); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", + crtc->base.id, crtc->name); return ret; } } @@ -667,8 +672,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; - DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); /* Right function depends upon target state. */ @@ -779,8 +784,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; if (crtc->state->enable && funcs->mode_set_nofb) { - DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); funcs->mode_set_nofb(crtc); } @@ -879,8 +884,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, funcs = crtc->helper_private; if (crtc->state->enable) { - DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); if (funcs->enable) funcs->enable(crtc); @@ -1747,7 +1752,7 @@ static int update_output_state(struct drm_atomic_state *state, if (crtc == set->crtc) continue; - if (!drm_atomic_connectors_for_crtc(state, crtc)) { + if (!crtc_state->connector_mask) { ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); if (ret < 0) @@ -2277,6 +2282,15 @@ retry: goto fail; drm_atomic_set_fb_for_plane(plane_state, fb); + /* Make sure we don't accidentally do a full modeset. */ + state->allow_modeset = false; + if (!crtc_state->active) { + DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n", + crtc->base.id); + ret = -EINVAL; + goto fail; + } + ret = drm_atomic_async_commit(state); if (ret != 0) goto fail; @@ -2399,6 +2413,12 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms); * The simpler solution is to just reset the software state to everything off, * which is easiest to do by calling drm_mode_config_reset(). To facilitate this * the atomic helpers provide default reset implementations for all hooks. + * + * On the upside the precise state tracking of atomic simplifies system suspend + * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe + * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume(). + * For other drivers the building blocks are split out, see the documentation + * for these functions. */ /** @@ -2592,6 +2612,28 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, } EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); +/** + * __drm_atomic_helper_connector_reset - reset state on connector + * @connector: drm connector + * @conn_state: connector state to assign + * + * Initializes the newly allocated @conn_state and assigns it to + * #connector ->state, usually required when initializing the drivers + * or when called from the ->reset hook. + * + * This is useful for drivers that subclass the connector state. + */ +void +__drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state) +{ + if (conn_state) + conn_state->connector = connector; + + connector->state = conn_state; +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_reset); + /** * drm_atomic_helper_connector_reset - default ->reset hook for connectors * @connector: drm connector @@ -2602,11 +2644,11 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); */ void drm_atomic_helper_connector_reset(struct drm_connector *connector) { - kfree(connector->state); - connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL); + struct drm_connector_state *conn_state = + kzalloc(sizeof(*conn_state), GFP_KERNEL); - if (connector->state) - connector->state->connector = connector; + kfree(connector->state); + __drm_atomic_helper_connector_reset(connector, conn_state); } EXPORT_SYMBOL(drm_atomic_helper_connector_reset); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 6b8f7211e543..bd93453afa61 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -31,14 +31,14 @@ /** * DOC: overview * - * drm_bridge represents a device that hangs on to an encoder. These are handy - * when a regular drm_encoder entity isn't enough to represent the entire + * struct &drm_bridge represents a device that hangs on to an encoder. These are + * handy when a regular &drm_encoder entity isn't enough to represent the entire * encoder chain. * - * A bridge is always associated to a single drm_encoder at a time, but can be + * A bridge is always attached to a single &drm_encoder at a time, but can be * either connected to it directly, or through an intermediate bridge: * - * encoder ---> bridge B ---> bridge A + * encoder ---> bridge B ---> bridge A * * Here, the output of the encoder feeds to bridge B, and that furthers feeds to * bridge A. @@ -46,11 +46,16 @@ * The driver using the bridge is responsible to make the associations between * the encoder and bridges. Once these links are made, the bridges will * participate along with encoder functions to perform mode_set/enable/disable - * through the ops provided in drm_bridge_funcs. + * through the ops provided in &drm_bridge_funcs. * * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes, - * crtcs, encoders or connectors. They just provide additional hooks to get the - * desired output at the end of the encoder chain. + * CRTCs, encoders or connectors and hence are not visible to userspace. They + * just provide additional hooks to get the desired output at the end of the + * encoder chain. + * + * Bridges can also be chained up using the next pointer in struct &drm_bridge. + * + * Both legacy CRTC helpers and the new atomic modeset helpers support bridges. */ static DEFINE_MUTEX(bridge_lock); @@ -122,34 +127,12 @@ EXPORT_SYMBOL(drm_bridge_attach); /** * DOC: bridge callbacks * - * The drm_bridge_funcs ops are populated by the bridge driver. The drm - * internals(atomic and crtc helpers) use the helpers defined in drm_bridge.c - * These helpers call a specific drm_bridge_funcs op for all the bridges + * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM + * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c + * These helpers call a specific &drm_bridge_funcs op for all the bridges * during encoder configuration. * - * When creating a bridge driver, one can implement drm_bridge_funcs op with - * the help of these rough rules: - * - * pre_enable: this contains things needed to be done for the bridge before - * its clock and timings are enabled by its source. For a bridge, its source - * is generally the encoder or bridge just before it in the encoder chain. - * - * enable: this contains things needed to be done for the bridge once its - * source is enabled. In other words, enable is called once the source is - * ready with clock and timing needed by the bridge. - * - * disable: this contains things needed to be done for the bridge assuming - * that its source is still enabled, i.e. clock and timings are still on. - * - * post_disable: this contains things needed to be done for the bridge once - * its source is disabled, i.e. once clocks and timings are off. - * - * mode_fixup: this should fixup the given mode for the bridge. It is called - * after the encoder's mode fixup. mode_fixup can also reject a mode completely - * if it's unsuitable for the hardware. - * - * mode_set: this sets up the mode for the bridge. It assumes that its source - * (an encoder or a bridge) has set the mode too. + * For detailed specification of the bridge callbacks see &drm_bridge_funcs. */ /** @@ -159,7 +142,7 @@ EXPORT_SYMBOL(drm_bridge_attach); * @mode: desired mode to be set for the bridge * @adjusted_mode: updated mode that works for this bridge * - * Calls 'mode_fixup' drm_bridge_funcs op for all the bridges in the + * Calls ->mode_fixup() &drm_bridge_funcs op for all the bridges in the * encoder chain, starting from the first bridge to the last. * * Note: the bridge passed should be the one closest to the encoder @@ -186,11 +169,11 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge, EXPORT_SYMBOL(drm_bridge_mode_fixup); /** - * drm_bridge_disable - calls 'disable' drm_bridge_funcs op for all + * drm_bridge_disable - calls ->disable() &drm_bridge_funcs op for all * bridges in the encoder chain. * @bridge: bridge control structure * - * Calls 'disable' drm_bridge_funcs op for all the bridges in the encoder + * Calls ->disable() &drm_bridge_funcs op for all the bridges in the encoder * chain, starting from the last bridge to the first. These are called before * calling the encoder's prepare op. * @@ -208,11 +191,11 @@ void drm_bridge_disable(struct drm_bridge *bridge) EXPORT_SYMBOL(drm_bridge_disable); /** - * drm_bridge_post_disable - calls 'post_disable' drm_bridge_funcs op for + * drm_bridge_post_disable - calls ->post_disable() &drm_bridge_funcs op for * all bridges in the encoder chain. * @bridge: bridge control structure * - * Calls 'post_disable' drm_bridge_funcs op for all the bridges in the + * Calls ->post_disable() &drm_bridge_funcs op for all the bridges in the * encoder chain, starting from the first bridge to the last. These are called * after completing the encoder's prepare op. * @@ -236,7 +219,7 @@ EXPORT_SYMBOL(drm_bridge_post_disable); * @mode: desired mode to be set for the bridge * @adjusted_mode: updated mode that works for this bridge * - * Calls 'mode_set' drm_bridge_funcs op for all the bridges in the + * Calls ->mode_set() &drm_bridge_funcs op for all the bridges in the * encoder chain, starting from the first bridge to the last. * * Note: the bridge passed should be the one closest to the encoder @@ -256,11 +239,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge, EXPORT_SYMBOL(drm_bridge_mode_set); /** - * drm_bridge_pre_enable - calls 'pre_enable' drm_bridge_funcs op for all + * drm_bridge_pre_enable - calls ->pre_enable() &drm_bridge_funcs op for all * bridges in the encoder chain. * @bridge: bridge control structure * - * Calls 'pre_enable' drm_bridge_funcs op for all the bridges in the encoder + * Calls ->pre_enable() &drm_bridge_funcs op for all the bridges in the encoder * chain, starting from the last bridge to the first. These are called * before calling the encoder's commit op. * @@ -278,11 +261,11 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge) EXPORT_SYMBOL(drm_bridge_pre_enable); /** - * drm_bridge_enable - calls 'enable' drm_bridge_funcs op for all bridges + * drm_bridge_enable - calls ->enable() &drm_bridge_funcs op for all bridges * in the encoder chain. * @bridge: bridge control structure * - * Calls 'enable' drm_bridge_funcs op for all the bridges in the encoder + * Calls ->enable() &drm_bridge_funcs op for all the bridges in the encoder * chain, starting from the first bridge to the last. These are called * after completing the encoder's commit op. * diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 32dd134700bd..d40bab29747e 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -649,6 +649,18 @@ EXPORT_SYMBOL(drm_framebuffer_remove); DEFINE_WW_CLASS(crtc_ww_class); +static unsigned int drm_num_crtcs(struct drm_device *dev) +{ + unsigned int num = 0; + struct drm_crtc *tmp; + + drm_for_each_crtc(tmp, dev) { + num++; + } + + return num; +} + /** * drm_crtc_init_with_planes - Initialise a new CRTC object with * specified primary and cursor planes. @@ -657,6 +669,7 @@ DEFINE_WW_CLASS(crtc_ww_class); * @primary: Primary plane for CRTC * @cursor: Cursor plane for CRTC * @funcs: callbacks for the new CRTC + * @name: printf style format string for the CRTC name, or NULL for default name * * Inits a new object created as base part of a driver crtc object. * @@ -666,7 +679,8 @@ DEFINE_WW_CLASS(crtc_ww_class); int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor, - const struct drm_crtc_funcs *funcs) + const struct drm_crtc_funcs *funcs, + const char *name, ...) { struct drm_mode_config *config = &dev->mode_config; int ret; @@ -682,6 +696,21 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, if (ret) return ret; + if (name) { + va_list ap; + + va_start(ap, name); + crtc->name = kvasprintf(GFP_KERNEL, name, ap); + va_end(ap); + } else { + crtc->name = kasprintf(GFP_KERNEL, "crtc-%d", + drm_num_crtcs(dev)); + } + if (!crtc->name) { + drm_mode_object_put(dev, &crtc->base); + return -ENOMEM; + } + crtc->base.properties = &crtc->properties; list_add_tail(&crtc->head, &config->crtc_list); @@ -728,6 +757,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) if (crtc->state && crtc->funcs->atomic_destroy_state) crtc->funcs->atomic_destroy_state(crtc, crtc->state); + kfree(crtc->name); + memset(crtc, 0, sizeof(*crtc)); } EXPORT_SYMBOL(drm_crtc_cleanup); @@ -1075,6 +1106,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all); * @encoder: the encoder to init * @funcs: callbacks for this encoder * @encoder_type: user visible type of the encoder + * @name: printf style format string for the encoder name, or NULL for default name * * Initialises a preallocated encoder. Encoder should be * subclassed as part of driver encoder objects. @@ -1085,7 +1117,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all); int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, - int encoder_type) + int encoder_type, const char *name, ...) { int ret; @@ -1098,9 +1130,17 @@ int drm_encoder_init(struct drm_device *dev, encoder->dev = dev; encoder->encoder_type = encoder_type; encoder->funcs = funcs; - encoder->name = kasprintf(GFP_KERNEL, "%s-%d", - drm_encoder_enum_list[encoder_type].name, - encoder->base.id); + if (name) { + va_list ap; + + va_start(ap, name); + encoder->name = kvasprintf(GFP_KERNEL, name, ap); + va_end(ap); + } else { + encoder->name = kasprintf(GFP_KERNEL, "%s-%d", + drm_encoder_enum_list[encoder_type].name, + encoder->base.id); + } if (!encoder->name) { ret = -ENOMEM; goto out_put; @@ -1141,6 +1181,18 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) } EXPORT_SYMBOL(drm_encoder_cleanup); +static unsigned int drm_num_planes(struct drm_device *dev) +{ + unsigned int num = 0; + struct drm_plane *tmp; + + drm_for_each_plane(tmp, dev) { + num++; + } + + return num; +} + /** * drm_universal_plane_init - Initialize a new universal plane object * @dev: DRM device @@ -1150,6 +1202,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup); * @formats: array of supported formats (%DRM_FORMAT_*) * @format_count: number of elements in @formats * @type: type of plane (overlay, primary, cursor) + * @name: printf style format string for the plane name, or NULL for default name * * Initializes a plane object of type @type. * @@ -1160,7 +1213,8 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, - enum drm_plane_type type) + enum drm_plane_type type, + const char *name, ...) { struct drm_mode_config *config = &dev->mode_config; int ret; @@ -1182,6 +1236,22 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, return -ENOMEM; } + if (name) { + va_list ap; + + va_start(ap, name); + plane->name = kvasprintf(GFP_KERNEL, name, ap); + va_end(ap); + } else { + plane->name = kasprintf(GFP_KERNEL, "plane-%d", + drm_num_planes(dev)); + } + if (!plane->name) { + kfree(plane->format_types); + drm_mode_object_put(dev, &plane->base); + return -ENOMEM; + } + memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); plane->format_count = format_count; plane->possible_crtcs = possible_crtcs; @@ -1240,7 +1310,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; return drm_universal_plane_init(dev, plane, possible_crtcs, funcs, - formats, format_count, type); + formats, format_count, type, NULL); } EXPORT_SYMBOL(drm_plane_init); @@ -1272,6 +1342,8 @@ void drm_plane_cleanup(struct drm_plane *plane) if (plane->state && plane->funcs->atomic_destroy_state) plane->funcs->atomic_destroy_state(plane, plane->state); + kfree(plane->name); + memset(plane, 0, sizeof(*plane)); } EXPORT_SYMBOL(drm_plane_cleanup); @@ -1801,7 +1873,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data, copied = 0; crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; drm_for_each_crtc(crtc, dev) { - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", + crtc->base.id, crtc->name); if (put_user(crtc->base.id, crtc_id + copied)) { ret = -EFAULT; goto out; @@ -2646,7 +2719,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, ret = -ENOENT; goto out; } - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); if (crtc_req->mode_valid) { /* If we have a mode we need a framebuffer. */ @@ -4785,9 +4858,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, /* Do DPMS ourselves */ if (property == connector->dev->mode_config.dpms_property) { - ret = 0; - if (connector->funcs->dpms) - ret = (*connector->funcs->dpms)(connector, (int)value); + ret = (*connector->funcs->dpms)(connector, (int)value); } else if (connector->funcs->set_property) ret = connector->funcs->set_property(connector, property, value); @@ -4983,6 +5054,20 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector, { int i; + /* + * In the past, drivers have attempted to model the static association + * of connector to encoder in simple connector/encoder devices using a + * direct assignment of connector->encoder = encoder. This connection + * is a logical one and the responsibility of the core, so drivers are + * expected not to mess with this. + * + * Note that the error return should've been enough here, but a large + * majority of drivers ignores the return value, so add in a big WARN + * to get people's attention. + */ + if (WARN_ON(connector->encoder)) + return -EINVAL; + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) { connector->encoder_ids[i] = encoder->base.id; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 10d0989db273..a02a7f9a6a9d 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -51,6 +51,11 @@ * the same callbacks which drivers can use to e.g. restore the modeset * configuration on resume with drm_helper_resume_force_mode(). * + * Note that this helper library doesn't track the current power state of CRTCs + * and encoders. It can call callbacks like ->dpms() even though the hardware is + * already in the desired state. This deficiency has been fixed in the atomic + * helpers. + * * The driver callbacks are mostly compatible with the atomic modeset helpers, * except for the handling of the primary plane: Atomic helpers require that the * primary plane is implemented as a real standalone plane and not directly tied @@ -62,6 +67,11 @@ * converting to the plane helpers). New drivers must not use these functions * but need to implement the atomic interface instead, potentially using the * atomic helpers for that. + * + * These legacy modeset helpers use the same function table structures as + * all other modesetting helpers. See the documentation for struct + * &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct + * &drm_connector_helper_funcs. */ MODULE_AUTHOR("David Airlie, Jesse Barnes"); MODULE_DESCRIPTION("DRM KMS helper"); @@ -206,8 +216,8 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) * @dev: DRM device * * This function walks through the entire mode setting configuration of @dev. It - * will remove any crtc links of unused encoders and encoder links of - * disconnected connectors. Then it will disable all unused encoders and crtcs + * will remove any CRTC links of unused encoders and encoder links of + * disconnected connectors. Then it will disable all unused encoders and CRTCs * either by calling their disable callback if available or by calling their * dpms callback with DRM_MODE_DPMS_OFF. */ @@ -329,7 +339,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, DRM_DEBUG_KMS("CRTC fixup failed\n"); goto done; } - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); crtc->hwmode = *adjusted_mode; @@ -445,11 +455,36 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) * drm_crtc_helper_set_config - set a new config from userspace * @set: mode set configuration * - * Setup a new configuration, provided by the upper layers (either an ioctl call - * from userspace or internally e.g. from the fbdev support code) in @set, and - * enable it. This is the main helper functions for drivers that implement - * kernel mode setting with the crtc helper functions and the assorted - * ->prepare(), ->modeset() and ->commit() helper callbacks. + * The drm_crtc_helper_set_config() helper function implements the set_config + * callback of struct &drm_crtc_funcs for drivers using the legacy CRTC helpers. + * + * It first tries to locate the best encoder for each connector by calling the + * connector ->best_encoder() (struct &drm_connector_helper_funcs) helper + * operation. + * + * After locating the appropriate encoders, the helper function will call the + * mode_fixup encoder and CRTC helper operations to adjust the requested mode, + * or reject it completely in which case an error will be returned to the + * application. If the new configuration after mode adjustment is identical to + * the current configuration the helper function will return without performing + * any other operation. + * + * If the adjusted mode is identical to the current mode but changes to the + * frame buffer need to be applied, the drm_crtc_helper_set_config() function + * will call the CRTC ->mode_set_base() (struct &drm_crtc_helper_funcs) helper + * operation. + * + * If the adjusted mode differs from the current mode, or if the + * ->mode_set_base() helper operation is not provided, the helper function + * performs a full mode set sequence by calling the ->prepare(), ->mode_set() + * and ->commit() CRTC and encoder helper operations, in that order. + * Alternatively it can also use the dpms and disable helper operations. For + * details see struct &drm_crtc_helper_funcs and struct + * &drm_encoder_helper_funcs. + * + * This function is deprecated. New drivers must implement atomic modeset + * support, for which this function is unsuitable. Instead drivers should use + * drm_atomic_helper_set_config(). * * Returns: * Returns 0 on success, negative errno numbers on failure. @@ -484,11 +519,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) set->fb = NULL; if (set->fb) { - DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", - set->crtc->base.id, set->fb->base.id, - (int)set->num_connectors, set->x, set->y); + DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n", + set->crtc->base.id, set->crtc->name, + set->fb->base.id, + (int)set->num_connectors, set->x, set->y); } else { - DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n", + set->crtc->base.id, set->crtc->name); drm_crtc_helper_disable(set->crtc); return 0; } @@ -628,12 +665,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) connector->encoder->crtc = new_crtc; } if (new_crtc) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", - connector->base.id, connector->name, - new_crtc->base.id); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n", + connector->base.id, connector->name, + new_crtc->base.id, new_crtc->name); } else { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", - connector->base.id, connector->name); + connector->base.id, connector->name); } } @@ -650,8 +687,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, save_set.fb)) { - DRM_ERROR("failed to set mode on [CRTC:%d]\n", - set->crtc->base.id); + DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n", + set->crtc->base.id, set->crtc->name); set->crtc->primary->fb = save_set.fb; ret = -EINVAL; goto fail; @@ -758,10 +795,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) * @connector: affected connector * @mode: DPMS mode * - * This is the main helper function provided by the crtc helper framework for + * The drm_helper_connector_dpms() helper function implements the ->dpms() + * callback of struct &drm_connector_funcs for drivers using the legacy CRTC helpers. + * + * This is the main helper function provided by the CRTC helper framework for * implementing the DPMS connector attribute. It computes the new desired DPMS - * state for all encoders and crtcs in the output mesh and calls the ->dpms() - * callback provided by the driver appropriately. + * state for all encoders and CRTCs in the output mesh and calls the ->dpms() + * callbacks provided by the driver in struct &drm_crtc_helper_funcs and struct + * &drm_encoder_helper_funcs appropriately. + * + * This function is deprecated. New drivers must implement atomic modeset + * support, for which this function is unsuitable. Instead drivers should use + * drm_atomic_helper_connector_dpms(). * * Returns: * Always returns 0. @@ -919,9 +964,9 @@ EXPORT_SYMBOL(drm_helper_resume_force_mode); * @old_fb: previous framebuffer * * This function implements a callback useable as the ->mode_set callback - * required by the crtc helpers. Besides the atomic plane helper functions for + * required by the CRTC helpers. Besides the atomic plane helper functions for * the primary plane the driver must also provide the ->mode_set_nofb callback - * to set up the crtc. + * to set up the CRTC. * * This is a transitional helper useful for converting drivers to the atomic * interfaces. @@ -985,7 +1030,7 @@ EXPORT_SYMBOL(drm_helper_crtc_mode_set); * @old_fb: previous framebuffer * * This function implements a callback useable as the ->mode_set_base used - * required by the crtc helpers. The driver must provide the atomic plane helper + * required by the CRTC helpers. The driver must provide the atomic plane helper * functions for the primary plane. * * This is a transitional helper useful for converting drivers to the atomic diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 809959d56d78..6ed90a2437e5 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -666,7 +666,9 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por } static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, - u8 vcpi, uint16_t pbn) + u8 vcpi, uint16_t pbn, + u8 number_sdp_streams, + u8 *sdp_stream_sink) { struct drm_dp_sideband_msg_req_body req; memset(&req, 0, sizeof(req)); @@ -674,6 +676,9 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n req.u.allocate_payload.port_number = port_num; req.u.allocate_payload.vcpi = vcpi; req.u.allocate_payload.pbn = pbn; + req.u.allocate_payload.number_sdp_streams = number_sdp_streams; + memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink, + number_sdp_streams); drm_dp_encode_sideband_req(&req, msg); msg->path_msg = true; return 0; @@ -973,17 +978,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, u8 *rad) { - int lct = port->parent->lct; + int parent_lct = port->parent->lct; int shift = 4; - int idx = lct / 2; - if (lct > 1) { - memcpy(rad, port->parent->rad, idx); - shift = (lct % 2) ? 4 : 0; + int idx = (parent_lct - 1) / 2; + if (parent_lct > 1) { + memcpy(rad, port->parent->rad, idx + 1); + shift = (parent_lct % 2) ? 4 : 0; } else rad[0] = 0; rad[idx] |= port->port_num << shift; - return lct + 1; + return parent_lct + 1; } /* @@ -1039,7 +1044,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); for (i = 0; i < (mstb->lct - 1); i++) { int shift = (i % 2) ? 0 : 4; - int port_num = mstb->rad[i / 2] >> shift; + int port_num = (mstb->rad[i / 2] >> shift) & 0xf; snprintf(temp, sizeof(temp), "-%d", port_num); strlcat(proppath, temp, proppath_size); } @@ -1190,7 +1195,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ for (i = 0; i < lct - 1; i++) { int shift = (i % 2) ? 0 : 4; - int port_num = rad[i / 2] >> shift; + int port_num = (rad[i / 2] >> shift) & 0xf; list_for_each_entry(port, &mstb->ports, next) { if (port->port_num == port_num) { @@ -1210,6 +1215,50 @@ out: return mstb; } +static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( + struct drm_dp_mst_branch *mstb, + uint8_t *guid) +{ + struct drm_dp_mst_branch *found_mstb; + struct drm_dp_mst_port *port; + + list_for_each_entry(port, &mstb->ports, next) { + if (!port->mstb) + continue; + + if (port->guid_valid && memcmp(port->guid, guid, 16) == 0) + return port->mstb; + + found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); + + if (found_mstb) + return found_mstb; + } + + return NULL; +} + +static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( + struct drm_dp_mst_topology_mgr *mgr, + uint8_t *guid) +{ + struct drm_dp_mst_branch *mstb; + + /* find the port by iterating down */ + mutex_lock(&mgr->lock); + + if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0) + mstb = mgr->mst_primary; + else + mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); + + if (mstb) + kref_get(&mstb->kref); + + mutex_unlock(&mgr->lock); + return mstb; +} + static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { @@ -1320,6 +1369,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, struct drm_dp_sideband_msg_tx *txmsg) { struct drm_dp_mst_branch *mstb = txmsg->dst; + u8 req_type; /* both msg slots are full */ if (txmsg->seqno == -1) { @@ -1336,7 +1386,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, txmsg->seqno = 1; mstb->tx_slots[txmsg->seqno] = txmsg; } - hdr->broadcast = 0; + + req_type = txmsg->msg[0] & 0x7f; + if (req_type == DP_CONNECTION_STATUS_NOTIFY || + req_type == DP_RESOURCE_STATUS_NOTIFY) + hdr->broadcast = 1; + else + hdr->broadcast = 0; hdr->path_msg = txmsg->path_msg; hdr->lct = mstb->lct; hdr->lcr = mstb->lct - 1; @@ -1438,26 +1494,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) } /* called holding qlock */ -static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) +static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) { - struct drm_dp_sideband_msg_tx *txmsg; int ret; /* construct a chunk from the first msg in the tx_msg queue */ - if (list_empty(&mgr->tx_msg_upq)) { - mgr->tx_up_in_progress = false; - return; - } - - txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next); ret = process_single_tx_qlock(mgr, txmsg, true); - if (ret == 1) { - /* up txmsgs aren't put in slots - so free after we send it */ - list_del(&txmsg->next); - kfree(txmsg); - } else if (ret) + + if (ret != 1) DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); - mgr->tx_up_in_progress = true; + + txmsg->dst->tx_slots[txmsg->seqno] = NULL; } static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, @@ -1562,6 +1610,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_mst_branch *mstb; int len, ret; + u8 sinks[DRM_DP_MAX_SDP_STREAMS]; + int i; mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); if (!mstb) @@ -1573,10 +1623,13 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, goto fail_put; } + for (i = 0; i < port->num_sdp_streams; i++) + sinks[i] = i; + txmsg->dst = mstb; len = build_allocate_payload(txmsg, port->port_num, id, - pbn); + pbn, port->num_sdp_streams, sinks); drm_dp_queue_down_tx(mgr, txmsg); @@ -1673,6 +1726,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) if (mgr->proposed_vcpis[i]) { port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; + req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; } else { port = NULL; req_payload.num_slots = 0; @@ -1688,6 +1742,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) if (req_payload.num_slots) { drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload); mgr->payloads[i].num_slots = req_payload.num_slots; + mgr->payloads[i].vcpi = req_payload.vcpi; } else if (mgr->payloads[i].num_slots) { mgr->payloads[i].num_slots = 0; drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]); @@ -1823,7 +1878,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req { struct drm_dp_sideband_msg_reply_body reply; - reply.reply_type = 1; + reply.reply_type = 0; reply.req_type = req_type; drm_dp_encode_sideband_reply(&reply, msg); return 0; @@ -1844,11 +1899,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, drm_dp_encode_up_ack_reply(txmsg, req_type); mutex_lock(&mgr->qlock); - list_add_tail(&txmsg->next, &mgr->tx_msg_upq); - if (!mgr->tx_up_in_progress) { - process_single_up_tx_qlock(mgr); - } + + process_single_up_tx_qlock(mgr, txmsg); + mutex_unlock(&mgr->qlock); + + kfree(txmsg); return 0; } @@ -2145,28 +2201,50 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) if (mgr->up_req_recv.have_eomt) { struct drm_dp_sideband_msg_req_body msg; - struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_branch *mstb = NULL; bool seqno; - mstb = drm_dp_get_mst_branch_device(mgr, - mgr->up_req_recv.initial_hdr.lct, - mgr->up_req_recv.initial_hdr.rad); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; + + if (!mgr->up_req_recv.initial_hdr.broadcast) { + mstb = drm_dp_get_mst_branch_device(mgr, + mgr->up_req_recv.initial_hdr.lct, + mgr->up_req_recv.initial_hdr.rad); + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } } seqno = mgr->up_req_recv.initial_hdr.seqno; drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); + + if (!mstb) + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + drm_dp_update_port(mstb, &msg.u.conn_stat); DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); (*mgr->cbs->hotplug)(mgr); } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); + if (!mstb) + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); } @@ -2258,6 +2336,27 @@ out: } EXPORT_SYMBOL(drm_dp_mst_detect_port); +/** + * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not + * @mgr: manager for this port + * @port: unverified pointer to a port. + * + * This returns whether the port supports audio or not. + */ +bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + bool ret = false; + + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return ret; + ret = port->has_audio; + drm_dp_put_port(port); + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_port_has_audio); + /** * drm_dp_mst_get_edid() - get EDID for an MST port * @connector: toplevel connector to get EDID for @@ -2283,6 +2382,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ edid = drm_get_edid(connector, &port->aux.ddc); drm_mode_connector_set_tile_property(connector); } + port->has_audio = drm_detect_monitor_audio(edid); drm_dp_put_port(port); return edid; } @@ -2566,7 +2666,7 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m, seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); list_for_each_entry(port, &mstb->ports, next) { - seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector); + seq_printf(m, "%sport: %d: ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector); if (port->mstb) drm_dp_mst_dump_mstb(m, port->mstb); } @@ -2736,7 +2836,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mutex_init(&mgr->qlock); mutex_init(&mgr->payload_lock); mutex_init(&mgr->destroy_connector_lock); - INIT_LIST_HEAD(&mgr->tx_msg_upq); INIT_LIST_HEAD(&mgr->tx_msg_downq); INIT_LIST_HEAD(&mgr->destroy_connector_list); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 7dd6728dd092..167c8d3d4a31 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -44,10 +44,6 @@ MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); -MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)"); -MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); -MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); - module_param_named(debug, drm_debug, int, 0600); static DEFINE_SPINLOCK(drm_minor_lock); @@ -633,8 +629,17 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, } } + if (parent) { + ret = drm_dev_set_unique(dev, dev_name(parent)); + if (ret) + goto err_setunique; + } + return dev; +err_setunique: + if (drm_core_check_feature(dev, DRIVER_GEM)) + drm_gem_destroy(dev); err_ctxbitmap: drm_legacy_ctxbitmap_cleanup(dev); drm_ht_remove(&dev->map_hash); @@ -797,23 +802,18 @@ EXPORT_SYMBOL(drm_dev_unregister); /** * drm_dev_set_unique - Set the unique name of a DRM device * @dev: device of which to set the unique name - * @fmt: format string for unique name + * @name: unique name * - * Sets the unique name of a DRM device using the specified format string and - * a variable list of arguments. Drivers can use this at driver probe time if - * the unique name of the devices they drive is static. + * Sets the unique name of a DRM device using the specified string. Drivers + * can use this at driver probe time if the unique name of the devices they + * drive is static. * * Return: 0 on success or a negative error code on failure. */ -int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...) +int drm_dev_set_unique(struct drm_device *dev, const char *name) { - va_list ap; - kfree(dev->unique); - - va_start(ap, fmt); - dev->unique = kvasprintf(GFP_KERNEL, fmt, ap); - va_end(ap); + dev->unique = kstrdup(name, GFP_KERNEL); return dev->unique ? 0 : -ENOMEM; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c214f1246cb4..04cb4877fabd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -637,8 +637,12 @@ static const struct minimode extra_modes[] = { /* * Probably taken from CEA-861 spec. * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. + * + * Index using the VIC. */ static const struct drm_display_mode edid_cea_modes[] = { + /* 0 - dummy, VICs start at 1 */ + { }, /* 1 - 640x480@60Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 490, 492, 525, 0, @@ -987,9 +991,11 @@ static const struct drm_display_mode edid_cea_modes[] = { }; /* - * HDMI 1.4 4k modes. + * HDMI 1.4 4k modes. Index using the VIC. */ static const struct drm_display_mode edid_4k_modes[] = { + /* 0 - dummy, VICs start at 1 */ + { }, /* 1 - 3840x2160@30Hz */ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, 4104, 4400, 0, @@ -2548,13 +2554,13 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode) static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match, unsigned int clock_tolerance) { - u8 mode; + u8 vic; if (!to_match->clock) return 0; - for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { - const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; + for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { + const struct drm_display_mode *cea_mode = &edid_cea_modes[vic]; unsigned int clock1, clock2; /* Check both 60Hz and 59.94Hz */ @@ -2566,7 +2572,7 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m continue; if (drm_mode_equal_no_clocks(to_match, cea_mode)) - return mode + 1; + return vic; } return 0; @@ -2581,13 +2587,13 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m */ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) { - u8 mode; + u8 vic; if (!to_match->clock) return 0; - for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { - const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; + for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { + const struct drm_display_mode *cea_mode = &edid_cea_modes[vic]; unsigned int clock1, clock2; /* Check both 60Hz and 59.94Hz */ @@ -2597,12 +2603,17 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode)) - return mode + 1; + return vic; } return 0; } EXPORT_SYMBOL(drm_match_cea_mode); +static bool drm_valid_cea_vic(u8 vic) +{ + return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes); +} + /** * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to * the input VIC from the CEA mode list @@ -2612,10 +2623,7 @@ EXPORT_SYMBOL(drm_match_cea_mode); */ enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code) { - /* return picture aspect ratio for video_code - 1 to access the - * right array element - */ - return edid_cea_modes[video_code-1].picture_aspect_ratio; + return edid_cea_modes[video_code].picture_aspect_ratio; } EXPORT_SYMBOL(drm_get_cea_aspect_ratio); @@ -2639,13 +2647,13 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode) static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match, unsigned int clock_tolerance) { - u8 mode; + u8 vic; if (!to_match->clock) return 0; - for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { - const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; + for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) { + const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic]; unsigned int clock1, clock2; /* Make sure to also match alternate clocks */ @@ -2657,7 +2665,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_ continue; if (drm_mode_equal_no_clocks(to_match, hdmi_mode)) - return mode + 1; + return vic; } return 0; @@ -2673,13 +2681,13 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_ */ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) { - u8 mode; + u8 vic; if (!to_match->clock) return 0; - for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { - const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; + for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) { + const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic]; unsigned int clock1, clock2; /* Make sure to also match alternate clocks */ @@ -2689,11 +2697,16 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode)) - return mode + 1; + return vic; } return 0; } +static bool drm_valid_hdmi_vic(u8 vic) +{ + return vic > 0 && vic < ARRAY_SIZE(edid_4k_modes); +} + static int add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) { @@ -2713,16 +2726,16 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) list_for_each_entry(mode, &connector->probed_modes, head) { const struct drm_display_mode *cea_mode = NULL; struct drm_display_mode *newmode; - u8 mode_idx = drm_match_cea_mode(mode) - 1; + u8 vic = drm_match_cea_mode(mode); unsigned int clock1, clock2; - if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { - cea_mode = &edid_cea_modes[mode_idx]; + if (drm_valid_cea_vic(vic)) { + cea_mode = &edid_cea_modes[vic]; clock2 = cea_mode_alternate_clock(cea_mode); } else { - mode_idx = drm_match_hdmi_mode(mode) - 1; - if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { - cea_mode = &edid_4k_modes[mode_idx]; + vic = drm_match_hdmi_mode(mode); + if (drm_valid_hdmi_vic(vic)) { + cea_mode = &edid_4k_modes[vic]; clock2 = hdmi_mode_alternate_clock(cea_mode); } } @@ -2773,17 +2786,17 @@ drm_display_mode_from_vic_index(struct drm_connector *connector, { struct drm_device *dev = connector->dev; struct drm_display_mode *newmode; - u8 cea_mode; + u8 vic; if (video_db == NULL || video_index >= video_len) return NULL; /* CEA modes are numbered 1..127 */ - cea_mode = (video_db[video_index] & 127) - 1; - if (cea_mode >= ARRAY_SIZE(edid_cea_modes)) + vic = (video_db[video_index] & 127); + if (!drm_valid_cea_vic(vic)) return NULL; - newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); + newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]); if (!newmode) return NULL; @@ -2878,8 +2891,7 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic) struct drm_device *dev = connector->dev; struct drm_display_mode *newmode; - vic--; /* VICs start at 1 */ - if (vic >= ARRAY_SIZE(edid_4k_modes)) { + if (!drm_valid_hdmi_vic(vic)) { DRM_ERROR("Unknown HDMI VIC: %d\n", vic); return 0; } @@ -3170,24 +3182,24 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode) { const struct drm_display_mode *cea_mode; int clock1, clock2, clock; - u8 mode_idx; + u8 vic; const char *type; /* * allow 5kHz clock difference either way to account for * the 10kHz clock resolution limit of detailed timings. */ - mode_idx = drm_match_cea_mode_clock_tolerance(mode, 5) - 1; - if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { + vic = drm_match_cea_mode_clock_tolerance(mode, 5); + if (drm_valid_cea_vic(vic)) { type = "CEA"; - cea_mode = &edid_cea_modes[mode_idx]; + cea_mode = &edid_cea_modes[vic]; clock1 = cea_mode->clock; clock2 = cea_mode_alternate_clock(cea_mode); } else { - mode_idx = drm_match_hdmi_mode_clock_tolerance(mode, 5) - 1; - if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { + vic = drm_match_hdmi_mode_clock_tolerance(mode, 5); + if (drm_valid_hdmi_vic(vic)) { type = "HDMI"; - cea_mode = &edid_4k_modes[mode_idx]; + cea_mode = &edid_4k_modes[vic]; clock1 = cea_mode->clock; clock2 = hdmi_mode_alternate_clock(cea_mode); } else { @@ -3205,7 +3217,7 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode) return; DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n", - type, mode_idx + 1, mode->clock, clock); + type, vic, mode->clock, clock); mode->clock = clock; } diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index d18b88b755c3..e8629076de32 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -124,7 +124,7 @@ EXPORT_SYMBOL(drm_i2c_encoder_destroy); * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: */ -static inline struct drm_encoder_slave_funcs * +static inline const struct drm_encoder_slave_funcs * get_slave_funcs(struct drm_encoder *enc) { return to_encoder_slave(enc)->slave_funcs; diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index b7d5b848d2f8..c895b6fddbd8 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -266,7 +266,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, fbi = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(fbi)) { ret = PTR_ERR(fbi); - goto err_drm_gem_cma_free_object; + goto err_gem_free_object; } fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1); @@ -299,8 +299,8 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, err_fb_info_destroy: drm_fb_helper_release_fbi(helper); -err_drm_gem_cma_free_object: - drm_gem_cma_free_object(&obj->base); +err_gem_free_object: + dev->driver->gem_free_object(&obj->base); return ret; } @@ -348,9 +348,6 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, } - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - ret = drm_fb_helper_initial_config(helper, preferred_bpp); if (ret < 0) { dev_err(dev->dev, "Failed to set initial hw configuration.\n"); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 69cbab5e5c81..1e103c4c6ee0 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1251,7 +1251,7 @@ retry: goto fail; plane = mode_set->crtc->primary; - plane_mask |= drm_plane_index(plane); + plane_mask |= (1 << drm_plane_index(plane)); plane->old_fb = plane->fb; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 2e10bba4468b..2e8c77e71e1f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -220,6 +220,9 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) static void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { + struct drm_device *dev = obj->dev; + bool final = false; + if (WARN_ON(obj->handle_count == 0)) return; @@ -229,14 +232,39 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) * checked for a name */ - mutex_lock(&obj->dev->object_name_lock); + mutex_lock(&dev->object_name_lock); if (--obj->handle_count == 0) { drm_gem_object_handle_free(obj); drm_gem_object_exported_dma_buf_free(obj); + final = true; } - mutex_unlock(&obj->dev->object_name_lock); + mutex_unlock(&dev->object_name_lock); - drm_gem_object_unreference_unlocked(obj); + if (final) + drm_gem_object_unreference_unlocked(obj); +} + +/* + * Called at device or object close to release the file's + * handle references on objects. + */ +static int +drm_gem_object_release_handle(int id, void *ptr, void *data) +{ + struct drm_file *file_priv = data; + struct drm_gem_object *obj = ptr; + struct drm_device *dev = obj->dev; + + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_gem_remove_prime_handles(obj, file_priv); + drm_vma_node_revoke(&obj->vma_node, file_priv->filp); + + if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, file_priv); + + drm_gem_object_handle_unreference_unlocked(obj); + + return 0; } /** @@ -277,14 +305,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) idr_remove(&filp->object_idr, handle); spin_unlock(&filp->table_lock); - if (drm_core_check_feature(dev, DRIVER_PRIME)) - drm_gem_remove_prime_handles(obj, filp); - drm_vma_node_revoke(&obj->vma_node, filp->filp); - - if (dev->driver->gem_close_object) - dev->driver->gem_close_object(obj, filp); - drm_gem_object_handle_unreference_unlocked(obj); - + drm_gem_object_release_handle(handle, obj, filp); return 0; } EXPORT_SYMBOL(drm_gem_handle_delete); @@ -326,9 +347,12 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, u32 *handlep) { struct drm_device *dev = obj->dev; + u32 handle; int ret; WARN_ON(!mutex_is_locked(&dev->object_name_lock)); + if (obj->handle_count++ == 0) + drm_gem_object_reference(obj); /* * Get the user-visible handle using idr. Preload and perform @@ -338,32 +362,38 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, spin_lock(&file_priv->table_lock); ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); - drm_gem_object_reference(obj); - obj->handle_count++; + spin_unlock(&file_priv->table_lock); idr_preload_end(); + mutex_unlock(&dev->object_name_lock); - if (ret < 0) { - drm_gem_object_handle_unreference_unlocked(obj); - return ret; - } - *handlep = ret; + if (ret < 0) + goto err_unref; + + handle = ret; ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); - if (ret) { - drm_gem_handle_delete(file_priv, *handlep); - return ret; - } + if (ret) + goto err_remove; if (dev->driver->gem_open_object) { ret = dev->driver->gem_open_object(obj, file_priv); - if (ret) { - drm_gem_handle_delete(file_priv, *handlep); - return ret; - } + if (ret) + goto err_revoke; } + *handlep = handle; return 0; + +err_revoke: + drm_vma_node_revoke(&obj->vma_node, file_priv->filp); +err_remove: + spin_lock(&file_priv->table_lock); + idr_remove(&file_priv->object_idr, handle); + spin_unlock(&file_priv->table_lock); +err_unref: + drm_gem_object_handle_unreference_unlocked(obj); + return ret; } /** @@ -630,7 +660,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, return -ENOENT; mutex_lock(&dev->object_name_lock); - idr_preload(GFP_KERNEL); /* prevent races with concurrent gem_close. */ if (obj->handle_count == 0) { ret = -ENOENT; @@ -638,7 +667,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, } if (!obj->name) { - ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); + ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); if (ret < 0) goto err; @@ -649,7 +678,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, ret = 0; err: - idr_preload_end(); mutex_unlock(&dev->object_name_lock); drm_gem_object_unreference_unlocked(obj); return ret; @@ -714,29 +742,6 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private) spin_lock_init(&file_private->table_lock); } -/* - * Called at device close to release the file's - * handle references on objects. - */ -static int -drm_gem_object_release_handle(int id, void *ptr, void *data) -{ - struct drm_file *file_priv = data; - struct drm_gem_object *obj = ptr; - struct drm_device *dev = obj->dev; - - if (drm_core_check_feature(dev, DRIVER_PRIME)) - drm_gem_remove_prime_handles(obj, file_priv); - drm_vma_node_revoke(&obj->vma_node, file_priv->filp); - - if (dev->driver->gem_close_object) - dev->driver->gem_close_object(obj, file_priv); - - drm_gem_object_handle_unreference_unlocked(obj); - - return 0; -} - /** * drm_gem_release - release file-private GEM resources * @dev: drm_device which is being closed by userspace diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index e109b49cd25d..e5df53b6e229 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -59,11 +59,13 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size) struct drm_gem_object *gem_obj; int ret; - cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); - if (!cma_obj) + if (drm->driver->gem_create_object) + gem_obj = drm->driver->gem_create_object(drm, size); + else + gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); + if (!gem_obj) return ERR_PTR(-ENOMEM); - - gem_obj = &cma_obj->base; + cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); ret = drm_gem_object_init(drm, gem_obj, size); if (ret) @@ -119,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, return cma_obj; error: - drm_gem_cma_free_object(&cma_obj->base); + drm->driver->gem_free_object(&cma_obj->base); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_cma_create); @@ -169,7 +171,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, return cma_obj; err_handle_create: - drm_gem_cma_free_object(gem_obj); + drm->driver->gem_free_object(gem_obj); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 607f493ae801..d12a4efa651b 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -73,6 +73,9 @@ static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); +MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)"); +MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); +MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); static void store_vblank(struct drm_device *dev, unsigned int pipe, u32 vblank_count_inc, diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 2d5ca8eec13a..6e6a9c58d404 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -365,6 +365,44 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, } EXPORT_SYMBOL(mipi_dsi_create_packet); +/** + * mipi_dsi_shutdown_peripheral() - sends a Shutdown Peripheral command + * @dsi: DSI peripheral device + * + * Return: 0 on success or a negative error code on failure. + */ +int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi) +{ + struct mipi_dsi_msg msg = { + .channel = dsi->channel, + .type = MIPI_DSI_SHUTDOWN_PERIPHERAL, + .tx_buf = (u8 [2]) { 0, 0 }, + .tx_len = 2, + }; + + return mipi_dsi_device_transfer(dsi, &msg); +} +EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral); + +/** + * mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command + * @dsi: DSI peripheral device + * + * Return: 0 on success or a negative error code on failure. + */ +int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi) +{ + struct mipi_dsi_msg msg = { + .channel = dsi->channel, + .type = MIPI_DSI_TURN_ON_PERIPHERAL, + .tx_buf = (u8 [2]) { 0, 0 }, + .tx_len = 2, + }; + + return mipi_dsi_device_transfer(dsi, &msg); +} +EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral); + /* * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the * the payload in a long packet transmitted from the peripheral back to the diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index ef6bd3656548..20775c05235a 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -553,10 +553,10 @@ EXPORT_SYMBOL(drm_gtf_mode_complex); * drivers/video/fbmon.c * * Standard GTF parameters: - * M = 600 - * C = 40 - * K = 128 - * J = 20 + * M = 600 + * C = 40 + * K = 128 + * J = 20 * * Returns: * The modeline based on the GTF algorithm stored in a drm_display_mode object. @@ -708,7 +708,8 @@ void drm_mode_set_name(struct drm_display_mode *mode) } EXPORT_SYMBOL(drm_mode_set_name); -/** drm_mode_hsync - get the hsync of a mode +/** + * drm_mode_hsync - get the hsync of a mode * @mode: mode * * Returns: @@ -1073,7 +1074,7 @@ static const char * const drm_mode_status_names[] = { MODE_STATUS(ONE_SIZE), MODE_STATUS(NO_REDUCED), MODE_STATUS(NO_STEREO), - MODE_STATUS(UNVERIFIED), + MODE_STATUS(STALE), MODE_STATUS(BAD), MODE_STATUS(ERROR), }; @@ -1171,7 +1172,6 @@ EXPORT_SYMBOL(drm_mode_sort); /** * drm_mode_connector_list_update - update the mode list for the connector * @connector: the connector to update - * @merge_type_bits: whether to merge or overwrite type bits * * This moves the modes from the @connector probed_modes list * to the actual mode list. It compares the probed mode against the current @@ -1180,33 +1180,48 @@ EXPORT_SYMBOL(drm_mode_sort); * This is just a helper functions doesn't validate any modes itself and also * doesn't prune any invalid modes. Callers need to do that themselves. */ -void drm_mode_connector_list_update(struct drm_connector *connector, - bool merge_type_bits) +void drm_mode_connector_list_update(struct drm_connector *connector) { - struct drm_display_mode *mode; struct drm_display_mode *pmode, *pt; - int found_it; WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex)); - list_for_each_entry_safe(pmode, pt, &connector->probed_modes, - head) { - found_it = 0; + list_for_each_entry_safe(pmode, pt, &connector->probed_modes, head) { + struct drm_display_mode *mode; + bool found_it = false; + /* go through current modes checking for the new probed mode */ list_for_each_entry(mode, &connector->modes, head) { - if (drm_mode_equal(pmode, mode)) { - found_it = 1; - /* if equal delete the probed mode */ - mode->status = pmode->status; - /* Merge type bits together */ - if (merge_type_bits) - mode->type |= pmode->type; - else - mode->type = pmode->type; - list_del(&pmode->head); - drm_mode_destroy(connector->dev, pmode); - break; + if (!drm_mode_equal(pmode, mode)) + continue; + + found_it = true; + + /* + * If the old matching mode is stale (ie. left over + * from a previous probe) just replace it outright. + * Otherwise just merge the type bits between all + * equal probed modes. + * + * If two probed modes are considered equal, pick the + * actual timings from the one that's marked as + * preferred (in case the match isn't 100%). If + * multiple or zero preferred modes are present, favor + * the mode added to the probed_modes list first. + */ + if (mode->status == MODE_STALE) { + drm_mode_copy(mode, pmode); + } else if ((mode->type & DRM_MODE_TYPE_PREFERRED) == 0 && + (pmode->type & DRM_MODE_TYPE_PREFERRED) != 0) { + pmode->type |= mode->type; + drm_mode_copy(mode, pmode); + } else { + mode->type |= pmode->type; } + + list_del(&pmode->head); + drm_mode_destroy(connector->dev, pmode); + break; } if (!found_it) { @@ -1229,7 +1244,7 @@ EXPORT_SYMBOL(drm_mode_connector_list_update); * This uses the same parameters as the fb modedb.c, except for an extra * force-enable, force-enable-digital and force-disable bit at the end: * - * x[M][R][-][@][i][m][eDd] + * x[M][R][-][@][i][m][eDd] * * The intermediate drm_cmdline_mode structure is required to store additional * options from the command line modline like the force-enable/disable flag. @@ -1247,7 +1262,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; bool yres_specified = false, cvt = false, rb = false; bool interlace = false, margins = false, was_digit = false; - int i, err; + int i; enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; #ifdef CONFIG_FB @@ -1267,9 +1282,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, case '@': if (!refresh_specified && !bpp_specified && !yres_specified && !cvt && !rb && was_digit) { - err = kstrtouint(&name[i + 1], 10, &refresh); - if (err) - return false; + refresh = simple_strtol(&name[i+1], NULL, 10); refresh_specified = true; was_digit = false; } else @@ -1278,9 +1291,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, case '-': if (!bpp_specified && !yres_specified && !cvt && !rb && was_digit) { - err = kstrtouint(&name[i + 1], 10, &bpp); - if (err) - return false; + bpp = simple_strtol(&name[i+1], NULL, 10); bpp_specified = true; was_digit = false; } else @@ -1288,9 +1299,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, break; case 'x': if (!yres_specified && was_digit) { - err = kstrtouint(&name[i + 1], 10, &yres); - if (err) - return false; + yres = simple_strtol(&name[i+1], NULL, 10); yres_specified = true; was_digit = false; } else diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index c2f5971146ba..e3a4adf03e7b 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -40,17 +40,15 @@ * The basic usage pattern is to: * * drm_modeset_acquire_init(&ctx) - * retry: + * retry: * foreach (lock in random_ordered_set_of_locks) { - * ret = drm_modeset_lock(lock, &ctx) - * if (ret == -EDEADLK) { - * drm_modeset_backoff(&ctx); - * goto retry; - * } + * ret = drm_modeset_lock(lock, &ctx) + * if (ret == -EDEADLK) { + * drm_modeset_backoff(&ctx); + * goto retry; + * } * } - * * ... do stuff ... - * * drm_modeset_drop_locks(&ctx); * drm_modeset_acquire_fini(&ctx); */ diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index fcd2a86acd2c..a1fff1179a97 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -410,6 +410,26 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) } EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); +int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw) +{ + struct pci_dev *root; + u32 lnkcap; + + *mlw = 0; + if (!dev->pdev) + return -EINVAL; + + root = dev->pdev->bus->self; + + pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); + + *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap); + return 0; +} +EXPORT_SYMBOL(drm_pcie_get_max_link_width); + #else int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index a6983d41920d..369d2898ff9e 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -57,6 +57,10 @@ * by the atomic helpers. * * Again drivers are strongly urged to switch to the new interfaces. + * + * The plane helpers share the function table structures with other helpers, + * specifically also the atomic helpers. See struct &drm_plane_helper_funcs for + * the details. */ /* @@ -371,7 +375,7 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev) &drm_primary_helper_funcs, safe_modeset_formats, ARRAY_SIZE(safe_modeset_formats), - DRM_PLANE_TYPE_PRIMARY); + DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); primary = NULL; @@ -398,7 +402,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary; primary = create_primary_plane(dev); - return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs); + return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, + NULL); } EXPORT_SYMBOL(drm_crtc_init); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 9f935f55d74c..27aa7183b20b 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -313,19 +313,15 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { * * Export callbacks: * - * - @gem_prime_pin (optional): prepare a GEM object for exporting - * - * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages - * - * - @gem_prime_vmap: vmap a buffer exported by your driver - * - * - @gem_prime_vunmap: vunmap a buffer exported by your driver - * - * - @gem_prime_mmap (optional): mmap a buffer exported by your driver + * * @gem_prime_pin (optional): prepare a GEM object for exporting + * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages + * * @gem_prime_vmap: vmap a buffer exported by your driver + * * @gem_prime_vunmap: vunmap a buffer exported by your driver + * * @gem_prime_mmap (optional): mmap a buffer exported by your driver * * Import callback: * - * - @gem_prime_import_sg_table (import): produce a GEM object from another + * * @gem_prime_import_sg_table (import): produce a GEM object from another * driver's scatter/gather table */ diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index bfdf5bb223b9..e714b5a7955f 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -53,6 +53,9 @@ * This helper library can be used independently of the modeset helper library. * Drivers can also overwrite different parts e.g. use their own hotplug * handling code to avoid probing unrelated outputs. + * + * The probe helpers share the function table structures with other display + * helper libraries. See struct &drm_connector_helper_funcs for the details. */ static bool drm_kms_helper_poll = true; @@ -126,9 +129,64 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) } EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); - -static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, - uint32_t maxX, uint32_t maxY, bool merge_type_bits) +/** + * drm_helper_probe_single_connector_modes - get complete set of display modes + * @connector: connector to probe + * @maxX: max width for modes + * @maxY: max height for modes + * + * Based on the helper callbacks implemented by @connector in struct + * &drm_connector_helper_funcs try to detect all valid modes. Modes will first + * be added to the connector's probed_modes list, then culled (based on validity + * and the @maxX, @maxY parameters) and put into the normal modes list. + * + * Intended to be used as a generic implementation of the ->fill_modes() + * @connector vfunc for drivers that use the CRTC helpers for output mode + * filtering and detection. + * + * The basic procedure is as follows + * + * 1. All modes currently on the connector's modes list are marked as stale + * + * 2. New modes are added to the connector's probed_modes list with + * drm_mode_probed_add(). New modes start their life with status as OK. + * Modes are added from a single source using the following priority order. + * + * - debugfs 'override_edid' (used for testing only) + * - firmware EDID (drm_load_edid_firmware()) + * - connector helper ->get_modes() vfunc + * - if the connector status is connector_status_connected, standard + * VESA DMT modes up to 1024x768 are automatically added + * (drm_add_modes_noedid()) + * + * Finally modes specified via the kernel command line (video=...) are + * added in addition to what the earlier probes produced + * (drm_helper_probe_add_cmdline_mode()). These modes are generated + * using the VESA GTF/CVT formulas. + * + * 3. Modes are moved from the probed_modes list to the modes list. Potential + * duplicates are merged together (see drm_mode_connector_list_update()). + * After this step the probed_modes list will be empty again. + * + * 4. Any non-stale mode on the modes list then undergoes validation + * + * - drm_mode_validate_basic() performs basic sanity checks + * - drm_mode_validate_size() filters out modes larger than @maxX and @maxY + * (if specified) + * - drm_mode_validate_flag() checks the modes againt basic connector + * capabilites (interlace_allowed,doublescan_allowed,stereo_allowed) + * - the optional connector ->mode_valid() helper can perform driver and/or + * hardware specific checks + * + * 5. Any mode whose status is not OK is pruned from the connector's modes list, + * accompanied by a debug message indicating the reason for the mode's + * rejection (see drm_mode_prune_invalid()). + * + * Returns: + * The number of modes found on @connector. + */ +int drm_helper_probe_single_connector_modes(struct drm_connector *connector, + uint32_t maxX, uint32_t maxY) { struct drm_device *dev = connector->dev; struct drm_display_mode *mode; @@ -143,9 +201,9 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); - /* set all modes to the unverified state */ + /* set all old modes to the stale state */ list_for_each_entry(mode, &connector->modes, head) - mode->status = MODE_UNVERIFIED; + mode->status = MODE_STALE; old_status = connector->status; @@ -200,17 +258,16 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect goto prune; } -#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE - count = drm_load_edid_firmware(connector); - if (count == 0) -#endif - { - if (connector->override_edid) { - struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; + if (connector->override_edid) { + struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; - count = drm_add_edid_modes(connector, edid); - drm_edid_to_eld(connector, edid); - } else + count = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); + } else { +#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE + count = drm_load_edid_firmware(connector); + if (count == 0) +#endif count = (*connector_funcs->get_modes)(connector); } @@ -220,7 +277,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect if (count == 0) goto prune; - drm_mode_connector_list_update(connector, merge_type_bits); + drm_mode_connector_list_update(connector); if (connector->interlace_allowed) mode_flags |= DRM_MODE_FLAG_INTERLACE; @@ -264,48 +321,8 @@ prune: return count; } - -/** - * drm_helper_probe_single_connector_modes - get complete set of display modes - * @connector: connector to probe - * @maxX: max width for modes - * @maxY: max height for modes - * - * Based on the helper callbacks implemented by @connector try to detect all - * valid modes. Modes will first be added to the connector's probed_modes list, - * then culled (based on validity and the @maxX, @maxY parameters) and put into - * the normal modes list. - * - * Intended to be use as a generic implementation of the ->fill_modes() - * @connector vfunc for drivers that use the crtc helpers for output mode - * filtering and detection. - * - * Returns: - * The number of modes found on @connector. - */ -int drm_helper_probe_single_connector_modes(struct drm_connector *connector, - uint32_t maxX, uint32_t maxY) -{ - return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true); -} EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); -/** - * drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes - * @connector: connector to probe - * @maxX: max width for modes - * @maxY: max height for modes - * - * This operates like drm_hehlper_probe_single_connector_modes except it - * replaces the mode bits instead of merging them for preferred modes. - */ -int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector, - uint32_t maxX, uint32_t maxY) -{ - return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false); -} -EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge); - /** * drm_kms_helper_hotplug_event - fire off KMS hotplug events * @dev: drm_device whose connector state changed diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 0ca64106a97b..d503f8e8c2d1 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -240,7 +240,7 @@ static ssize_t edid_show(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { - struct device *connector_dev = container_of(kobj, struct device, kobj); + struct device *connector_dev = kobj_to_dev(kobj); struct drm_connector *connector = to_drm_connector(connector_dev); unsigned char *edid; size_t size; diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig new file mode 100644 index 000000000000..2cde7a5442fb --- /dev/null +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -0,0 +1,20 @@ + +config DRM_ETNAVIV + tristate "ETNAVIV (DRM support for Vivante GPU IP cores)" + depends on DRM + depends on ARCH_MXC || ARCH_DOVE + select SHMEM + select TMPFS + select IOMMU_API + select IOMMU_SUPPORT + select WANT_DEV_COREDUMP + help + DRM driver for Vivante GPUs. + +config DRM_ETNAVIV_REGISTER_LOGGING + bool "enable ETNAVIV register logging" + depends on DRM_ETNAVIV + help + Compile in support for logging register reads/writes in a format + that can be parsed by envytools demsm tool. If enabled, register + logging can be switched on via etnaviv.reglog=y module param. diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile new file mode 100644 index 000000000000..1086e9876f91 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/Makefile @@ -0,0 +1,14 @@ +etnaviv-y := \ + etnaviv_buffer.o \ + etnaviv_cmd_parser.o \ + etnaviv_drv.o \ + etnaviv_dump.o \ + etnaviv_gem_prime.o \ + etnaviv_gem_submit.o \ + etnaviv_gem.o \ + etnaviv_gpu.o \ + etnaviv_iommu_v2.o \ + etnaviv_iommu.o \ + etnaviv_mmu.o + +obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h new file mode 100644 index 000000000000..8c44ba9a694e --- /dev/null +++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h @@ -0,0 +1,218 @@ +#ifndef CMDSTREAM_XML +#define CMDSTREAM_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- cmdstream.xml ( 12589 bytes, from 2014-02-17 14:57:56) +- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) + +Copyright (C) 2014 +*/ + + +#define FE_OPCODE_LOAD_STATE 0x00000001 +#define FE_OPCODE_END 0x00000002 +#define FE_OPCODE_NOP 0x00000003 +#define FE_OPCODE_DRAW_2D 0x00000004 +#define FE_OPCODE_DRAW_PRIMITIVES 0x00000005 +#define FE_OPCODE_DRAW_INDEXED_PRIMITIVES 0x00000006 +#define FE_OPCODE_WAIT 0x00000007 +#define FE_OPCODE_LINK 0x00000008 +#define FE_OPCODE_STALL 0x00000009 +#define FE_OPCODE_CALL 0x0000000a +#define FE_OPCODE_RETURN 0x0000000b +#define FE_OPCODE_CHIP_SELECT 0x0000000d +#define PRIMITIVE_TYPE_POINTS 0x00000001 +#define PRIMITIVE_TYPE_LINES 0x00000002 +#define PRIMITIVE_TYPE_LINE_STRIP 0x00000003 +#define PRIMITIVE_TYPE_TRIANGLES 0x00000004 +#define PRIMITIVE_TYPE_TRIANGLE_STRIP 0x00000005 +#define PRIMITIVE_TYPE_TRIANGLE_FAN 0x00000006 +#define PRIMITIVE_TYPE_LINE_LOOP 0x00000007 +#define PRIMITIVE_TYPE_QUADS 0x00000008 +#define VIV_FE_LOAD_STATE 0x00000000 + +#define VIV_FE_LOAD_STATE_HEADER 0x00000000 +#define VIV_FE_LOAD_STATE_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT 27 +#define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE 0x08000000 +#define VIV_FE_LOAD_STATE_HEADER_FIXP 0x04000000 +#define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK 0x03ff0000 +#define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT 16 +#define VIV_FE_LOAD_STATE_HEADER_COUNT(x) (((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK) +#define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK 0x0000ffff +#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT 0 +#define VIV_FE_LOAD_STATE_HEADER_OFFSET(x) (((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK) +#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR 2 + +#define VIV_FE_END 0x00000000 + +#define VIV_FE_END_HEADER 0x00000000 +#define VIV_FE_END_HEADER_EVENT_ID__MASK 0x0000001f +#define VIV_FE_END_HEADER_EVENT_ID__SHIFT 0 +#define VIV_FE_END_HEADER_EVENT_ID(x) (((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK) +#define VIV_FE_END_HEADER_EVENT_ENABLE 0x00000100 +#define VIV_FE_END_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_END_HEADER_OP__SHIFT 27 +#define VIV_FE_END_HEADER_OP_END 0x10000000 + +#define VIV_FE_NOP 0x00000000 + +#define VIV_FE_NOP_HEADER 0x00000000 +#define VIV_FE_NOP_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_NOP_HEADER_OP__SHIFT 27 +#define VIV_FE_NOP_HEADER_OP_NOP 0x18000000 + +#define VIV_FE_DRAW_2D 0x00000000 + +#define VIV_FE_DRAW_2D_HEADER 0x00000000 +#define VIV_FE_DRAW_2D_HEADER_COUNT__MASK 0x0000ff00 +#define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT 8 +#define VIV_FE_DRAW_2D_HEADER_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK) +#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK 0x07ff0000 +#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT 16 +#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK) +#define VIV_FE_DRAW_2D_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_DRAW_2D_HEADER_OP__SHIFT 27 +#define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D 0x20000000 + +#define VIV_FE_DRAW_2D_TOP_LEFT 0x00000008 +#define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK 0x0000ffff +#define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT 0 +#define VIV_FE_DRAW_2D_TOP_LEFT_X(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK) +#define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK 0xffff0000 +#define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT 16 +#define VIV_FE_DRAW_2D_TOP_LEFT_Y(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK) + +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT 0x0000000c +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK 0x0000ffff +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT 0 +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK) +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK 0xffff0000 +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT 16 +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK) + +#define VIV_FE_DRAW_PRIMITIVES 0x00000000 + +#define VIV_FE_DRAW_PRIMITIVES_HEADER 0x00000000 +#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT 27 +#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES 0x28000000 + +#define VIV_FE_DRAW_PRIMITIVES_COMMAND 0x00000004 +#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff +#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT 0 +#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK) + +#define VIV_FE_DRAW_PRIMITIVES_START 0x00000008 + +#define VIV_FE_DRAW_PRIMITIVES_COUNT 0x0000000c + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES 0x00000000 + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER 0x00000000 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT 27 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES 0x30000000 + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND 0x00000004 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT 0 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK) + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_START 0x00000008 + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT 0x0000000c + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET 0x00000010 + +#define VIV_FE_WAIT 0x00000000 + +#define VIV_FE_WAIT_HEADER 0x00000000 +#define VIV_FE_WAIT_HEADER_DELAY__MASK 0x0000ffff +#define VIV_FE_WAIT_HEADER_DELAY__SHIFT 0 +#define VIV_FE_WAIT_HEADER_DELAY(x) (((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK) +#define VIV_FE_WAIT_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_WAIT_HEADER_OP__SHIFT 27 +#define VIV_FE_WAIT_HEADER_OP_WAIT 0x38000000 + +#define VIV_FE_LINK 0x00000000 + +#define VIV_FE_LINK_HEADER 0x00000000 +#define VIV_FE_LINK_HEADER_PREFETCH__MASK 0x0000ffff +#define VIV_FE_LINK_HEADER_PREFETCH__SHIFT 0 +#define VIV_FE_LINK_HEADER_PREFETCH(x) (((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK) +#define VIV_FE_LINK_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_LINK_HEADER_OP__SHIFT 27 +#define VIV_FE_LINK_HEADER_OP_LINK 0x40000000 + +#define VIV_FE_LINK_ADDRESS 0x00000004 + +#define VIV_FE_STALL 0x00000000 + +#define VIV_FE_STALL_HEADER 0x00000000 +#define VIV_FE_STALL_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_STALL_HEADER_OP__SHIFT 27 +#define VIV_FE_STALL_HEADER_OP_STALL 0x48000000 + +#define VIV_FE_STALL_TOKEN 0x00000004 +#define VIV_FE_STALL_TOKEN_FROM__MASK 0x0000001f +#define VIV_FE_STALL_TOKEN_FROM__SHIFT 0 +#define VIV_FE_STALL_TOKEN_FROM(x) (((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK) +#define VIV_FE_STALL_TOKEN_TO__MASK 0x00001f00 +#define VIV_FE_STALL_TOKEN_TO__SHIFT 8 +#define VIV_FE_STALL_TOKEN_TO(x) (((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK) + +#define VIV_FE_CALL 0x00000000 + +#define VIV_FE_CALL_HEADER 0x00000000 +#define VIV_FE_CALL_HEADER_PREFETCH__MASK 0x0000ffff +#define VIV_FE_CALL_HEADER_PREFETCH__SHIFT 0 +#define VIV_FE_CALL_HEADER_PREFETCH(x) (((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK) +#define VIV_FE_CALL_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_CALL_HEADER_OP__SHIFT 27 +#define VIV_FE_CALL_HEADER_OP_CALL 0x50000000 + +#define VIV_FE_CALL_ADDRESS 0x00000004 + +#define VIV_FE_CALL_RETURN_PREFETCH 0x00000008 + +#define VIV_FE_CALL_RETURN_ADDRESS 0x0000000c + +#define VIV_FE_RETURN 0x00000000 + +#define VIV_FE_RETURN_HEADER 0x00000000 +#define VIV_FE_RETURN_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_RETURN_HEADER_OP__SHIFT 27 +#define VIV_FE_RETURN_HEADER_OP_RETURN 0x58000000 + +#define VIV_FE_CHIP_SELECT 0x00000000 + +#define VIV_FE_CHIP_SELECT_HEADER 0x00000000 +#define VIV_FE_CHIP_SELECT_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT 27 +#define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT 0x68000000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15 0x00008000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14 0x00004000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13 0x00002000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12 0x00001000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11 0x00000800 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10 0x00000400 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9 0x00000200 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8 0x00000100 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7 0x00000080 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6 0x00000040 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5 0x00000020 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4 0x00000010 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3 0x00000008 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2 0x00000004 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001 + + +#endif /* CMDSTREAM_XML */ diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h new file mode 100644 index 000000000000..9e585d51fb78 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/common.xml.h @@ -0,0 +1,249 @@ +#ifndef COMMON_XML +#define COMMON_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01) +- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) + +Copyright (C) 2015 +*/ + + +#define PIPE_ID_PIPE_3D 0x00000000 +#define PIPE_ID_PIPE_2D 0x00000001 +#define SYNC_RECIPIENT_FE 0x00000001 +#define SYNC_RECIPIENT_RA 0x00000005 +#define SYNC_RECIPIENT_PE 0x00000007 +#define SYNC_RECIPIENT_DE 0x0000000b +#define SYNC_RECIPIENT_VG 0x0000000f +#define SYNC_RECIPIENT_TESSELATOR 0x00000010 +#define SYNC_RECIPIENT_VG2 0x00000011 +#define SYNC_RECIPIENT_TESSELATOR2 0x00000012 +#define SYNC_RECIPIENT_VG3 0x00000013 +#define SYNC_RECIPIENT_TESSELATOR3 0x00000014 +#define ENDIAN_MODE_NO_SWAP 0x00000000 +#define ENDIAN_MODE_SWAP_16 0x00000001 +#define ENDIAN_MODE_SWAP_32 0x00000002 +#define chipModel_GC300 0x00000300 +#define chipModel_GC320 0x00000320 +#define chipModel_GC350 0x00000350 +#define chipModel_GC355 0x00000355 +#define chipModel_GC400 0x00000400 +#define chipModel_GC410 0x00000410 +#define chipModel_GC420 0x00000420 +#define chipModel_GC450 0x00000450 +#define chipModel_GC500 0x00000500 +#define chipModel_GC530 0x00000530 +#define chipModel_GC600 0x00000600 +#define chipModel_GC700 0x00000700 +#define chipModel_GC800 0x00000800 +#define chipModel_GC860 0x00000860 +#define chipModel_GC880 0x00000880 +#define chipModel_GC1000 0x00001000 +#define chipModel_GC2000 0x00002000 +#define chipModel_GC2100 0x00002100 +#define chipModel_GC4000 0x00004000 +#define RGBA_BITS_R 0x00000001 +#define RGBA_BITS_G 0x00000002 +#define RGBA_BITS_B 0x00000004 +#define RGBA_BITS_A 0x00000008 +#define chipFeatures_FAST_CLEAR 0x00000001 +#define chipFeatures_SPECIAL_ANTI_ALIASING 0x00000002 +#define chipFeatures_PIPE_3D 0x00000004 +#define chipFeatures_DXT_TEXTURE_COMPRESSION 0x00000008 +#define chipFeatures_DEBUG_MODE 0x00000010 +#define chipFeatures_Z_COMPRESSION 0x00000020 +#define chipFeatures_YUV420_SCALER 0x00000040 +#define chipFeatures_MSAA 0x00000080 +#define chipFeatures_DC 0x00000100 +#define chipFeatures_PIPE_2D 0x00000200 +#define chipFeatures_ETC1_TEXTURE_COMPRESSION 0x00000400 +#define chipFeatures_FAST_SCALER 0x00000800 +#define chipFeatures_HIGH_DYNAMIC_RANGE 0x00001000 +#define chipFeatures_YUV420_TILER 0x00002000 +#define chipFeatures_MODULE_CG 0x00004000 +#define chipFeatures_MIN_AREA 0x00008000 +#define chipFeatures_NO_EARLY_Z 0x00010000 +#define chipFeatures_NO_422_TEXTURE 0x00020000 +#define chipFeatures_BUFFER_INTERLEAVING 0x00040000 +#define chipFeatures_BYTE_WRITE_2D 0x00080000 +#define chipFeatures_NO_SCALER 0x00100000 +#define chipFeatures_YUY2_AVERAGING 0x00200000 +#define chipFeatures_HALF_PE_CACHE 0x00400000 +#define chipFeatures_HALF_TX_CACHE 0x00800000 +#define chipFeatures_YUY2_RENDER_TARGET 0x01000000 +#define chipFeatures_MEM32 0x02000000 +#define chipFeatures_PIPE_VG 0x04000000 +#define chipFeatures_VGTS 0x08000000 +#define chipFeatures_FE20 0x10000000 +#define chipFeatures_BYTE_WRITE_3D 0x20000000 +#define chipFeatures_RS_YUV_TARGET 0x40000000 +#define chipFeatures_32_BIT_INDICES 0x80000000 +#define chipMinorFeatures0_FLIP_Y 0x00000001 +#define chipMinorFeatures0_DUAL_RETURN_BUS 0x00000002 +#define chipMinorFeatures0_ENDIANNESS_CONFIG 0x00000004 +#define chipMinorFeatures0_TEXTURE_8K 0x00000008 +#define chipMinorFeatures0_CORRECT_TEXTURE_CONVERTER 0x00000010 +#define chipMinorFeatures0_SPECIAL_MSAA_LOD 0x00000020 +#define chipMinorFeatures0_FAST_CLEAR_FLUSH 0x00000040 +#define chipMinorFeatures0_2DPE20 0x00000080 +#define chipMinorFeatures0_CORRECT_AUTO_DISABLE 0x00000100 +#define chipMinorFeatures0_RENDERTARGET_8K 0x00000200 +#define chipMinorFeatures0_2BITPERTILE 0x00000400 +#define chipMinorFeatures0_SEPARATE_TILE_STATUS_WHEN_INTERLEAVED 0x00000800 +#define chipMinorFeatures0_SUPER_TILED 0x00001000 +#define chipMinorFeatures0_VG_20 0x00002000 +#define chipMinorFeatures0_TS_EXTENDED_COMMANDS 0x00004000 +#define chipMinorFeatures0_COMPRESSION_FIFO_FIXED 0x00008000 +#define chipMinorFeatures0_HAS_SIGN_FLOOR_CEIL 0x00010000 +#define chipMinorFeatures0_VG_FILTER 0x00020000 +#define chipMinorFeatures0_VG_21 0x00040000 +#define chipMinorFeatures0_SHADER_HAS_W 0x00080000 +#define chipMinorFeatures0_HAS_SQRT_TRIG 0x00100000 +#define chipMinorFeatures0_MORE_MINOR_FEATURES 0x00200000 +#define chipMinorFeatures0_MC20 0x00400000 +#define chipMinorFeatures0_MSAA_SIDEBAND 0x00800000 +#define chipMinorFeatures0_BUG_FIXES0 0x01000000 +#define chipMinorFeatures0_VAA 0x02000000 +#define chipMinorFeatures0_BYPASS_IN_MSAA 0x04000000 +#define chipMinorFeatures0_HZ 0x08000000 +#define chipMinorFeatures0_NEW_TEXTURE 0x10000000 +#define chipMinorFeatures0_2D_A8_TARGET 0x20000000 +#define chipMinorFeatures0_CORRECT_STENCIL 0x40000000 +#define chipMinorFeatures0_ENHANCE_VR 0x80000000 +#define chipMinorFeatures1_RSUV_SWIZZLE 0x00000001 +#define chipMinorFeatures1_V2_COMPRESSION 0x00000002 +#define chipMinorFeatures1_VG_DOUBLE_BUFFER 0x00000004 +#define chipMinorFeatures1_EXTRA_EVENT_STATES 0x00000008 +#define chipMinorFeatures1_NO_STRIPING_NEEDED 0x00000010 +#define chipMinorFeatures1_TEXTURE_STRIDE 0x00000020 +#define chipMinorFeatures1_BUG_FIXES3 0x00000040 +#define chipMinorFeatures1_AUTO_DISABLE 0x00000080 +#define chipMinorFeatures1_AUTO_RESTART_TS 0x00000100 +#define chipMinorFeatures1_DISABLE_PE_GATING 0x00000200 +#define chipMinorFeatures1_L2_WINDOWING 0x00000400 +#define chipMinorFeatures1_HALF_FLOAT 0x00000800 +#define chipMinorFeatures1_PIXEL_DITHER 0x00001000 +#define chipMinorFeatures1_TWO_STENCIL_REFERENCE 0x00002000 +#define chipMinorFeatures1_EXTENDED_PIXEL_FORMAT 0x00004000 +#define chipMinorFeatures1_CORRECT_MIN_MAX_DEPTH 0x00008000 +#define chipMinorFeatures1_2D_DITHER 0x00010000 +#define chipMinorFeatures1_BUG_FIXES5 0x00020000 +#define chipMinorFeatures1_NEW_2D 0x00040000 +#define chipMinorFeatures1_NEW_FP 0x00080000 +#define chipMinorFeatures1_TEXTURE_HALIGN 0x00100000 +#define chipMinorFeatures1_NON_POWER_OF_TWO 0x00200000 +#define chipMinorFeatures1_LINEAR_TEXTURE_SUPPORT 0x00400000 +#define chipMinorFeatures1_HALTI0 0x00800000 +#define chipMinorFeatures1_CORRECT_OVERFLOW_VG 0x01000000 +#define chipMinorFeatures1_NEGATIVE_LOG_FIX 0x02000000 +#define chipMinorFeatures1_RESOLVE_OFFSET 0x04000000 +#define chipMinorFeatures1_OK_TO_GATE_AXI_CLOCK 0x08000000 +#define chipMinorFeatures1_MMU_VERSION 0x10000000 +#define chipMinorFeatures1_WIDE_LINE 0x20000000 +#define chipMinorFeatures1_BUG_FIXES6 0x40000000 +#define chipMinorFeatures1_FC_FLUSH_STALL 0x80000000 +#define chipMinorFeatures2_LINE_LOOP 0x00000001 +#define chipMinorFeatures2_LOGIC_OP 0x00000002 +#define chipMinorFeatures2_UNK2 0x00000004 +#define chipMinorFeatures2_SUPERTILED_TEXTURE 0x00000008 +#define chipMinorFeatures2_UNK4 0x00000010 +#define chipMinorFeatures2_RECT_PRIMITIVE 0x00000020 +#define chipMinorFeatures2_COMPOSITION 0x00000040 +#define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT 0x00000080 +#define chipMinorFeatures2_UNK8 0x00000100 +#define chipMinorFeatures2_UNK9 0x00000200 +#define chipMinorFeatures2_UNK10 0x00000400 +#define chipMinorFeatures2_SAMPLERBASE_16 0x00000800 +#define chipMinorFeatures2_UNK12 0x00001000 +#define chipMinorFeatures2_UNK13 0x00002000 +#define chipMinorFeatures2_UNK14 0x00004000 +#define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000 +#define chipMinorFeatures2_FULL_DIRECTFB 0x00010000 +#define chipMinorFeatures2_2D_TILING 0x00020000 +#define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000 +#define chipMinorFeatures2_TILE_FILLER 0x00080000 +#define chipMinorFeatures2_UNK20 0x00100000 +#define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT 0x00200000 +#define chipMinorFeatures2_UNK22 0x00400000 +#define chipMinorFeatures2_UNK23 0x00800000 +#define chipMinorFeatures2_UNK24 0x01000000 +#define chipMinorFeatures2_MIXED_STREAMS 0x02000000 +#define chipMinorFeatures2_2D_420_L2CACHE 0x04000000 +#define chipMinorFeatures2_UNK27 0x08000000 +#define chipMinorFeatures2_2D_NO_INDEX8_BRUSH 0x10000000 +#define chipMinorFeatures2_TEXTURE_TILED_READ 0x20000000 +#define chipMinorFeatures2_UNK30 0x40000000 +#define chipMinorFeatures2_UNK31 0x80000000 +#define chipMinorFeatures3_ROTATION_STALL_FIX 0x00000001 +#define chipMinorFeatures3_UNK1 0x00000002 +#define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX 0x00000004 +#define chipMinorFeatures3_UNK3 0x00000008 +#define chipMinorFeatures3_UNK4 0x00000010 +#define chipMinorFeatures3_UNK5 0x00000020 +#define chipMinorFeatures3_UNK6 0x00000040 +#define chipMinorFeatures3_UNK7 0x00000080 +#define chipMinorFeatures3_UNK8 0x00000100 +#define chipMinorFeatures3_UNK9 0x00000200 +#define chipMinorFeatures3_BUG_FIXES10 0x00000400 +#define chipMinorFeatures3_UNK11 0x00000800 +#define chipMinorFeatures3_BUG_FIXES11 0x00001000 +#define chipMinorFeatures3_UNK13 0x00002000 +#define chipMinorFeatures3_UNK14 0x00004000 +#define chipMinorFeatures3_UNK15 0x00008000 +#define chipMinorFeatures3_UNK16 0x00010000 +#define chipMinorFeatures3_UNK17 0x00020000 +#define chipMinorFeatures3_UNK18 0x00040000 +#define chipMinorFeatures3_UNK19 0x00080000 +#define chipMinorFeatures3_UNK20 0x00100000 +#define chipMinorFeatures3_UNK21 0x00200000 +#define chipMinorFeatures3_UNK22 0x00400000 +#define chipMinorFeatures3_UNK23 0x00800000 +#define chipMinorFeatures3_UNK24 0x01000000 +#define chipMinorFeatures3_UNK25 0x02000000 +#define chipMinorFeatures3_UNK26 0x04000000 +#define chipMinorFeatures3_UNK27 0x08000000 +#define chipMinorFeatures3_UNK28 0x10000000 +#define chipMinorFeatures3_UNK29 0x20000000 +#define chipMinorFeatures3_UNK30 0x40000000 +#define chipMinorFeatures3_UNK31 0x80000000 +#define chipMinorFeatures4_UNK0 0x00000001 +#define chipMinorFeatures4_UNK1 0x00000002 +#define chipMinorFeatures4_UNK2 0x00000004 +#define chipMinorFeatures4_UNK3 0x00000008 +#define chipMinorFeatures4_UNK4 0x00000010 +#define chipMinorFeatures4_UNK5 0x00000020 +#define chipMinorFeatures4_UNK6 0x00000040 +#define chipMinorFeatures4_UNK7 0x00000080 +#define chipMinorFeatures4_UNK8 0x00000100 +#define chipMinorFeatures4_UNK9 0x00000200 +#define chipMinorFeatures4_UNK10 0x00000400 +#define chipMinorFeatures4_UNK11 0x00000800 +#define chipMinorFeatures4_UNK12 0x00001000 +#define chipMinorFeatures4_UNK13 0x00002000 +#define chipMinorFeatures4_UNK14 0x00004000 +#define chipMinorFeatures4_UNK15 0x00008000 +#define chipMinorFeatures4_UNK16 0x00010000 +#define chipMinorFeatures4_UNK17 0x00020000 +#define chipMinorFeatures4_UNK18 0x00040000 +#define chipMinorFeatures4_UNK19 0x00080000 +#define chipMinorFeatures4_UNK20 0x00100000 +#define chipMinorFeatures4_UNK21 0x00200000 +#define chipMinorFeatures4_UNK22 0x00400000 +#define chipMinorFeatures4_UNK23 0x00800000 +#define chipMinorFeatures4_UNK24 0x01000000 +#define chipMinorFeatures4_UNK25 0x02000000 +#define chipMinorFeatures4_UNK26 0x04000000 +#define chipMinorFeatures4_UNK27 0x08000000 +#define chipMinorFeatures4_UNK28 0x10000000 +#define chipMinorFeatures4_UNK29 0x20000000 +#define chipMinorFeatures4_UNK30 0x40000000 +#define chipMinorFeatures4_UNK31 0x80000000 + +#endif /* COMMON_XML */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c new file mode 100644 index 000000000000..332c55ebba6d --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -0,0 +1,268 @@ +/* + * Copyright (C) 2014 Etnaviv Project + * Author: Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "etnaviv_gpu.h" +#include "etnaviv_gem.h" +#include "etnaviv_mmu.h" + +#include "common.xml.h" +#include "state.xml.h" +#include "cmdstream.xml.h" + +/* + * Command Buffer helper: + */ + + +static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data) +{ + u32 *vaddr = (u32 *)buffer->vaddr; + + BUG_ON(buffer->user_size >= buffer->size); + + vaddr[buffer->user_size / 4] = data; + buffer->user_size += 4; +} + +static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer, + u32 reg, u32 value) +{ + u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR; + + buffer->user_size = ALIGN(buffer->user_size, 8); + + /* write a register via cmd stream */ + OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE | + VIV_FE_LOAD_STATE_HEADER_COUNT(1) | + VIV_FE_LOAD_STATE_HEADER_OFFSET(index)); + OUT(buffer, value); +} + +static inline void CMD_END(struct etnaviv_cmdbuf *buffer) +{ + buffer->user_size = ALIGN(buffer->user_size, 8); + + OUT(buffer, VIV_FE_END_HEADER_OP_END); +} + +static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer) +{ + buffer->user_size = ALIGN(buffer->user_size, 8); + + OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200); +} + +static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer, + u16 prefetch, u32 address) +{ + buffer->user_size = ALIGN(buffer->user_size, 8); + + OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK | + VIV_FE_LINK_HEADER_PREFETCH(prefetch)); + OUT(buffer, address); +} + +static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer, + u32 from, u32 to) +{ + buffer->user_size = ALIGN(buffer->user_size, 8); + + OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL); + OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to)); +} + +static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe) +{ + u32 flush; + u32 stall; + + /* + * This assumes that if we're switching to 2D, we're switching + * away from 3D, and vice versa. Hence, if we're switching to + * the 2D core, we need to flush the 3D depth and color caches, + * otherwise we need to flush the 2D pixel engine cache. + */ + if (pipe == ETNA_PIPE_2D) + flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; + else + flush = VIVS_GL_FLUSH_CACHE_PE2D; + + stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) | + VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE); + + CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); + CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall); + + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + + CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, + VIVS_GL_PIPE_SELECT_PIPE(pipe)); +} + +static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf) +{ + return buf->paddr - gpu->memory_base; +} + +static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, + struct etnaviv_cmdbuf *buf, u32 off, u32 len) +{ + u32 size = buf->size; + u32 *ptr = buf->vaddr + off; + + dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", + ptr, gpu_va(gpu, buf) + off, size - len * 4 - off); + + print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, + ptr, len * 4, 0); +} + +u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) +{ + struct etnaviv_cmdbuf *buffer = gpu->buffer; + + /* initialize buffer */ + buffer->user_size = 0; + + CMD_WAIT(buffer); + CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4); + + return buffer->user_size / 8; +} + +void etnaviv_buffer_end(struct etnaviv_gpu *gpu) +{ + struct etnaviv_cmdbuf *buffer = gpu->buffer; + + /* Replace the last WAIT with an END */ + buffer->user_size -= 16; + + CMD_END(buffer); + mb(); +} + +void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, + struct etnaviv_cmdbuf *cmdbuf) +{ + struct etnaviv_cmdbuf *buffer = gpu->buffer; + u32 *lw = buffer->vaddr + buffer->user_size - 16; + u32 back, link_target, link_size, reserve_size, extra_size = 0; + + if (drm_debug & DRM_UT_DRIVER) + etnaviv_buffer_dump(gpu, buffer, 0, 0x50); + + /* + * If we need to flush the MMU prior to submitting this buffer, we + * will need to append a mmu flush load state, followed by a new + * link to this buffer - a total of four additional words. + */ + if (gpu->mmu->need_flush || gpu->switch_context) { + /* link command */ + extra_size += 2; + /* flush command */ + if (gpu->mmu->need_flush) + extra_size += 2; + /* pipe switch commands */ + if (gpu->switch_context) + extra_size += 8; + } + + reserve_size = (6 + extra_size) * 4; + + /* + * if we are going to completely overflow the buffer, we need to wrap. + */ + if (buffer->user_size + reserve_size > buffer->size) + buffer->user_size = 0; + + /* save offset back into main buffer */ + back = buffer->user_size + reserve_size - 6 * 4; + link_target = gpu_va(gpu, buffer) + buffer->user_size; + link_size = 6; + + /* Skip over any extra instructions */ + link_target += extra_size * sizeof(u32); + + if (drm_debug & DRM_UT_DRIVER) + pr_info("stream link to 0x%08x @ 0x%08x %p\n", + link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr); + + /* jump back from cmd to main buffer */ + CMD_LINK(cmdbuf, link_size, link_target); + + link_target = gpu_va(gpu, cmdbuf); + link_size = cmdbuf->size / 8; + + + + if (drm_debug & DRM_UT_DRIVER) { + print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, + cmdbuf->vaddr, cmdbuf->size, 0); + + pr_info("link op: %p\n", lw); + pr_info("link addr: %p\n", lw + 1); + pr_info("addr: 0x%08x\n", link_target); + pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back); + pr_info("event: %d\n", event); + } + + if (gpu->mmu->need_flush || gpu->switch_context) { + u32 new_target = gpu_va(gpu, buffer) + buffer->user_size; + + if (gpu->mmu->need_flush) { + /* Add the MMU flush */ + CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU, + VIVS_GL_FLUSH_MMU_FLUSH_FEMMU | + VIVS_GL_FLUSH_MMU_FLUSH_UNK1 | + VIVS_GL_FLUSH_MMU_FLUSH_UNK2 | + VIVS_GL_FLUSH_MMU_FLUSH_PEMMU | + VIVS_GL_FLUSH_MMU_FLUSH_UNK4); + + gpu->mmu->need_flush = false; + } + + if (gpu->switch_context) { + etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state); + gpu->switch_context = false; + } + + /* And the link to the first buffer */ + CMD_LINK(buffer, link_size, link_target); + + /* Update the link target to point to above instructions */ + link_target = new_target; + link_size = extra_size; + } + + /* trigger event */ + CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | + VIVS_GL_EVENT_FROM_PE); + + /* append WAIT/LINK to main buffer */ + CMD_WAIT(buffer); + CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4)); + + /* Change WAIT into a LINK command; write the address first. */ + *(lw + 1) = link_target; + mb(); + *(lw) = VIV_FE_LINK_HEADER_OP_LINK | + VIV_FE_LINK_HEADER_PREFETCH(link_size); + mb(); + + if (drm_debug & DRM_UT_DRIVER) + etnaviv_buffer_dump(gpu, buffer, 0, 0x50); +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c new file mode 100644 index 000000000000..dcfd565c88d1 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include + +#include "etnaviv_gem.h" +#include "etnaviv_gpu.h" + +#include "cmdstream.xml.h" + +#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT) + +struct etna_validation_state { + struct etnaviv_gpu *gpu; + const struct drm_etnaviv_gem_submit_reloc *relocs; + unsigned int num_relocs; + u32 *start; +}; + +static const struct { + u16 offset; + u16 size; +} etnaviv_sensitive_states[] __initconst = { +#define ST(start, num) { (start) >> 2, (num) } + /* 2D */ + ST(0x1200, 1), + ST(0x1228, 1), + ST(0x1238, 1), + ST(0x1284, 1), + ST(0x128c, 1), + ST(0x1304, 1), + ST(0x1310, 1), + ST(0x1318, 1), + ST(0x12800, 4), + ST(0x128a0, 4), + ST(0x128c0, 4), + ST(0x12970, 4), + ST(0x12a00, 8), + ST(0x12b40, 8), + ST(0x12b80, 8), + ST(0x12ce0, 8), + /* 3D */ + ST(0x0644, 1), + ST(0x064c, 1), + ST(0x0680, 8), + ST(0x1410, 1), + ST(0x1430, 1), + ST(0x1458, 1), + ST(0x1460, 8), + ST(0x1480, 8), + ST(0x1500, 8), + ST(0x1520, 8), + ST(0x1608, 1), + ST(0x1610, 1), + ST(0x1658, 1), + ST(0x165c, 1), + ST(0x1664, 1), + ST(0x1668, 1), + ST(0x16a4, 1), + ST(0x16c0, 8), + ST(0x16e0, 8), + ST(0x1740, 8), + ST(0x2400, 14 * 16), + ST(0x10800, 32 * 16), +#undef ST +}; + +#define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u) +static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE); + +void __init etnaviv_validate_init(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++) + bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset, + etnaviv_sensitive_states[i].size); +} + +static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state, + unsigned int buf_offset, unsigned int state_addr) +{ + if (state->num_relocs && state->relocs->submit_offset < buf_offset) { + dev_warn_once(state->gpu->dev, + "%s: relocation for non-sensitive state 0x%x at offset %u\n", + __func__, state_addr, + state->relocs->submit_offset); + while (state->num_relocs && + state->relocs->submit_offset < buf_offset) { + state->relocs++; + state->num_relocs--; + } + } +} + +static bool etnaviv_validate_load_state(struct etna_validation_state *state, + u32 *ptr, unsigned int state_offset, unsigned int num) +{ + unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num); + unsigned int st_offset = state_offset, buf_offset; + + for_each_set_bit_from(st_offset, etnaviv_states, size) { + buf_offset = (ptr - state->start + + st_offset - state_offset) * 4; + + etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4); + if (state->num_relocs && + state->relocs->submit_offset == buf_offset) { + state->relocs++; + state->num_relocs--; + continue; + } + + dev_warn_ratelimited(state->gpu->dev, + "%s: load state touches restricted state 0x%x at offset %u\n", + __func__, st_offset * 4, buf_offset); + return false; + } + + if (state->num_relocs) { + buf_offset = (ptr - state->start + num) * 4; + etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 + + state->relocs->submit_offset - + buf_offset); + } + + return true; +} + +static uint8_t cmd_length[32] = { + [FE_OPCODE_DRAW_PRIMITIVES] = 4, + [FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6, + [FE_OPCODE_NOP] = 2, + [FE_OPCODE_STALL] = 2, +}; + +bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream, + unsigned int size, + struct drm_etnaviv_gem_submit_reloc *relocs, + unsigned int reloc_size) +{ + struct etna_validation_state state; + u32 *buf = stream; + u32 *end = buf + size; + + state.gpu = gpu; + state.relocs = relocs; + state.num_relocs = reloc_size; + state.start = stream; + + while (buf < end) { + u32 cmd = *buf; + unsigned int len, n, off; + unsigned int op = cmd >> 27; + + switch (op) { + case FE_OPCODE_LOAD_STATE: + n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT); + len = ALIGN(1 + n, 2); + if (buf + len > end) + break; + + off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET); + if (!etnaviv_validate_load_state(&state, buf + 1, + off, n)) + return false; + break; + + case FE_OPCODE_DRAW_2D: + n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT); + if (n == 0) + n = 256; + len = 2 + n * 2; + break; + + default: + len = cmd_length[op]; + if (len == 0) { + dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n", + __func__, op, buf - state.start); + return false; + } + break; + } + + buf += len; + } + + if (buf > end) { + dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n", + __func__, buf - state.start, size); + return false; + } + + return true; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c new file mode 100644 index 000000000000..5c89ebb52fd2 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -0,0 +1,707 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "etnaviv_drv.h" +#include "etnaviv_gpu.h" +#include "etnaviv_gem.h" +#include "etnaviv_mmu.h" +#include "etnaviv_gem.h" + +#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING +static bool reglog; +MODULE_PARM_DESC(reglog, "Enable register read/write logging"); +module_param(reglog, bool, 0600); +#else +#define reglog 0 +#endif + +void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, + const char *dbgname) +{ + struct resource *res; + void __iomem *ptr; + + if (name) + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + else + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + ptr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ptr)) { + dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name, + PTR_ERR(ptr)); + return ptr; + } + + if (reglog) + dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n", + dbgname, ptr, (size_t)resource_size(res)); + + return ptr; +} + +void etnaviv_writel(u32 data, void __iomem *addr) +{ + if (reglog) + printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); + + writel(data, addr); +} + +u32 etnaviv_readl(const void __iomem *addr) +{ + u32 val = readl(addr); + + if (reglog) + printk(KERN_DEBUG "IO:R %p %08x\n", addr, val); + + return val; +} + +/* + * DRM operations: + */ + + +static void load_gpu(struct drm_device *dev) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + unsigned int i; + + for (i = 0; i < ETNA_MAX_PIPES; i++) { + struct etnaviv_gpu *g = priv->gpu[i]; + + if (g) { + int ret; + + ret = etnaviv_gpu_init(g); + if (ret) { + dev_err(g->dev, "hw init failed: %d\n", ret); + priv->gpu[i] = NULL; + } + } + } +} + +static int etnaviv_open(struct drm_device *dev, struct drm_file *file) +{ + struct etnaviv_file_private *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + file->driver_priv = ctx; + + return 0; +} + +static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct etnaviv_file_private *ctx = file->driver_priv; + unsigned int i; + + for (i = 0; i < ETNA_MAX_PIPES; i++) { + struct etnaviv_gpu *gpu = priv->gpu[i]; + + if (gpu) { + mutex_lock(&gpu->lock); + if (gpu->lastctx == ctx) + gpu->lastctx = NULL; + mutex_unlock(&gpu->lock); + } + } + + kfree(ctx); +} + +/* + * DRM debugfs: + */ + +#ifdef CONFIG_DEBUG_FS +static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + + etnaviv_gem_describe_objects(priv, m); + + return 0; +} + +static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m) +{ + int ret; + + read_lock(&dev->vma_offset_manager->vm_lock); + ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); + read_unlock(&dev->vma_offset_manager->vm_lock); + + return ret; +} + +static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) +{ + seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); + + mutex_lock(&gpu->mmu->lock); + drm_mm_dump_table(m, &gpu->mmu->mm); + mutex_unlock(&gpu->mmu->lock); + + return 0; +} + +static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) +{ + struct etnaviv_cmdbuf *buf = gpu->buffer; + u32 size = buf->size; + u32 *ptr = buf->vaddr; + u32 i; + + seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n", + buf->vaddr, (u64)buf->paddr, size - buf->user_size); + + for (i = 0; i < size / 4; i++) { + if (i && !(i % 4)) + seq_puts(m, "\n"); + if (i % 4 == 0) + seq_printf(m, "\t0x%p: ", ptr + i); + seq_printf(m, "%08x ", *(ptr + i)); + } + seq_puts(m, "\n"); +} + +static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m) +{ + seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev)); + + mutex_lock(&gpu->lock); + etnaviv_buffer_dump(gpu, m); + mutex_unlock(&gpu->lock); + + return 0; +} + +static int show_unlocked(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + int (*show)(struct drm_device *dev, struct seq_file *m) = + node->info_ent->data; + + return show(dev, m); +} + +static int show_each_gpu(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct etnaviv_drm_private *priv = dev->dev_private; + struct etnaviv_gpu *gpu; + int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) = + node->info_ent->data; + unsigned int i; + int ret = 0; + + for (i = 0; i < ETNA_MAX_PIPES; i++) { + gpu = priv->gpu[i]; + if (!gpu) + continue; + + ret = show(gpu, m); + if (ret < 0) + break; + } + + return ret; +} + +static struct drm_info_list etnaviv_debugfs_list[] = { + {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, + {"gem", show_unlocked, 0, etnaviv_gem_show}, + { "mm", show_unlocked, 0, etnaviv_mm_show }, + {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, + {"ring", show_each_gpu, 0, etnaviv_ring_show}, +}; + +static int etnaviv_debugfs_init(struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + int ret; + + ret = drm_debugfs_create_files(etnaviv_debugfs_list, + ARRAY_SIZE(etnaviv_debugfs_list), + minor->debugfs_root, minor); + + if (ret) { + dev_err(dev->dev, "could not install etnaviv_debugfs_list\n"); + return ret; + } + + return ret; +} + +static void etnaviv_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files(etnaviv_debugfs_list, + ARRAY_SIZE(etnaviv_debugfs_list), minor); +} +#endif + +/* + * DRM ioctls: + */ + +static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct drm_etnaviv_param *args = data; + struct etnaviv_gpu *gpu; + + if (args->pipe >= ETNA_MAX_PIPES) + return -EINVAL; + + gpu = priv->gpu[args->pipe]; + if (!gpu) + return -ENXIO; + + return etnaviv_gpu_get_param(gpu, args->param, &args->value); +} + +static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_new *args = data; + + if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED | + ETNA_BO_FORCE_MMU)) + return -EINVAL; + + return etnaviv_gem_new_handle(dev, file, args->size, + args->flags, &args->handle); +} + +#define TS(t) ((struct timespec){ \ + .tv_sec = (t).tv_sec, \ + .tv_nsec = (t).tv_nsec \ +}) + +static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_cpu_prep *args = data; + struct drm_gem_object *obj; + int ret; + + if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC)) + return -EINVAL; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout)); + + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_cpu_fini *args = data; + struct drm_gem_object *obj; + int ret; + + if (args->flags) + return -EINVAL; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + ret = etnaviv_gem_cpu_fini(obj); + + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_info *args = data; + struct drm_gem_object *obj; + int ret; + + if (args->pad) + return -EINVAL; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + ret = etnaviv_gem_mmap_offset(obj, &args->offset); + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_wait_fence *args = data; + struct etnaviv_drm_private *priv = dev->dev_private; + struct timespec *timeout = &TS(args->timeout); + struct etnaviv_gpu *gpu; + + if (args->flags & ~(ETNA_WAIT_NONBLOCK)) + return -EINVAL; + + if (args->pipe >= ETNA_MAX_PIPES) + return -EINVAL; + + gpu = priv->gpu[args->pipe]; + if (!gpu) + return -ENXIO; + + if (args->flags & ETNA_WAIT_NONBLOCK) + timeout = NULL; + + return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence, + timeout); +} + +static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_userptr *args = data; + int access; + + if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || + args->flags == 0) + return -EINVAL; + + if (offset_in_page(args->user_ptr | args->user_size) || + (uintptr_t)args->user_ptr != args->user_ptr || + (u32)args->user_size != args->user_size || + args->user_ptr & ~PAGE_MASK) + return -EINVAL; + + if (args->flags & ETNA_USERPTR_WRITE) + access = VERIFY_WRITE; + else + access = VERIFY_READ; + + if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr, + args->user_size)) + return -EFAULT; + + return etnaviv_gem_new_userptr(dev, file, args->user_ptr, + args->user_size, args->flags, + &args->handle); +} + +static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct drm_etnaviv_gem_wait *args = data; + struct timespec *timeout = &TS(args->timeout); + struct drm_gem_object *obj; + struct etnaviv_gpu *gpu; + int ret; + + if (args->flags & ~(ETNA_WAIT_NONBLOCK)) + return -EINVAL; + + if (args->pipe >= ETNA_MAX_PIPES) + return -EINVAL; + + gpu = priv->gpu[args->pipe]; + if (!gpu) + return -ENXIO; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + if (args->flags & ETNA_WAIT_NONBLOCK) + timeout = NULL; + + ret = etnaviv_gem_wait_bo(gpu, obj, timeout); + + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static const struct drm_ioctl_desc etnaviv_ioctls[] = { +#define ETNA_IOCTL(n, func, flags) \ + DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) + ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW), +}; + +static const struct vm_operations_struct vm_ops = { + .fault = etnaviv_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = etnaviv_gem_mmap, +}; + +static struct drm_driver etnaviv_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | + DRIVER_GEM | + DRIVER_PRIME | + DRIVER_RENDER, + .open = etnaviv_open, + .preclose = etnaviv_preclose, + .set_busid = drm_platform_set_busid, + .gem_free_object = etnaviv_gem_free_object, + .gem_vm_ops = &vm_ops, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_pin = etnaviv_gem_prime_pin, + .gem_prime_unpin = etnaviv_gem_prime_unpin, + .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, + .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, + .gem_prime_vmap = etnaviv_gem_prime_vmap, + .gem_prime_vunmap = etnaviv_gem_prime_vunmap, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = etnaviv_debugfs_init, + .debugfs_cleanup = etnaviv_debugfs_cleanup, +#endif + .ioctls = etnaviv_ioctls, + .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, + .fops = &fops, + .name = "etnaviv", + .desc = "etnaviv DRM", + .date = "20151214", + .major = 1, + .minor = 0, +}; + +/* + * Platform driver: + */ +static int etnaviv_bind(struct device *dev) +{ + struct etnaviv_drm_private *priv; + struct drm_device *drm; + int ret; + + drm = drm_dev_alloc(&etnaviv_drm_driver, dev); + if (!drm) + return -ENOMEM; + + drm->platformdev = to_platform_device(dev); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(dev, "failed to allocate private data\n"); + ret = -ENOMEM; + goto out_unref; + } + drm->dev_private = priv; + + priv->wq = alloc_ordered_workqueue("etnaviv", 0); + if (!priv->wq) { + ret = -ENOMEM; + goto out_wq; + } + + mutex_init(&priv->gem_lock); + INIT_LIST_HEAD(&priv->gem_list); + priv->num_gpus = 0; + + dev_set_drvdata(dev, drm); + + ret = component_bind_all(dev, drm); + if (ret < 0) + goto out_bind; + + load_gpu(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + goto out_register; + + return 0; + +out_register: + component_unbind_all(dev, drm); +out_bind: + flush_workqueue(priv->wq); + destroy_workqueue(priv->wq); +out_wq: + kfree(priv); +out_unref: + drm_dev_unref(drm); + + return ret; +} + +static void etnaviv_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct etnaviv_drm_private *priv = drm->dev_private; + + drm_dev_unregister(drm); + + flush_workqueue(priv->wq); + destroy_workqueue(priv->wq); + + component_unbind_all(dev, drm); + + drm->dev_private = NULL; + kfree(priv); + + drm_put_dev(drm); +} + +static const struct component_master_ops etnaviv_master_ops = { + .bind = etnaviv_bind, + .unbind = etnaviv_unbind, +}; + +static int compare_of(struct device *dev, void *data) +{ + struct device_node *np = data; + + return dev->of_node == np; +} + +static int compare_str(struct device *dev, void *data) +{ + return !strcmp(dev_name(dev), data); +} + +static int etnaviv_pdev_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct component_match *match = NULL; + + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + + if (node) { + struct device_node *core_node; + int i; + + for (i = 0; ; i++) { + core_node = of_parse_phandle(node, "cores", i); + if (!core_node) + break; + + component_match_add(&pdev->dev, &match, compare_of, + core_node); + of_node_put(core_node); + } + } else if (dev->platform_data) { + char **names = dev->platform_data; + unsigned i; + + for (i = 0; names[i]; i++) + component_match_add(dev, &match, compare_str, names[i]); + } + + return component_master_add_with_match(dev, &etnaviv_master_ops, match); +} + +static int etnaviv_pdev_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &etnaviv_master_ops); + + return 0; +} + +static const struct of_device_id dt_match[] = { + { .compatible = "fsl,imx-gpu-subsystem" }, + { .compatible = "marvell,dove-gpu-subsystem" }, + {} +}; +MODULE_DEVICE_TABLE(of, dt_match); + +static struct platform_driver etnaviv_platform_driver = { + .probe = etnaviv_pdev_probe, + .remove = etnaviv_pdev_remove, + .driver = { + .owner = THIS_MODULE, + .name = "etnaviv", + .of_match_table = dt_match, + }, +}; + +static int __init etnaviv_init(void) +{ + int ret; + + etnaviv_validate_init(); + + ret = platform_driver_register(&etnaviv_gpu_driver); + if (ret != 0) + return ret; + + ret = platform_driver_register(&etnaviv_platform_driver); + if (ret != 0) + platform_driver_unregister(&etnaviv_gpu_driver); + + return ret; +} +module_init(etnaviv_init); + +static void __exit etnaviv_exit(void) +{ + platform_driver_unregister(&etnaviv_gpu_driver); + platform_driver_unregister(&etnaviv_platform_driver); +} +module_exit(etnaviv_exit); + +MODULE_AUTHOR("Christian Gmeiner "); +MODULE_AUTHOR("Russell King "); +MODULE_AUTHOR("Lucas Stach "); +MODULE_DESCRIPTION("etnaviv DRM Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:etnaviv"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h new file mode 100644 index 000000000000..d6bd438bd5be --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_DRV_H__ +#define __ETNAVIV_DRV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +struct etnaviv_cmdbuf; +struct etnaviv_gpu; +struct etnaviv_mmu; +struct etnaviv_gem_object; +struct etnaviv_gem_submit; + +struct etnaviv_file_private { + /* currently we don't do anything useful with this.. but when + * per-context address spaces are supported we'd keep track of + * the context's page-tables here. + */ + int dummy; +}; + +struct etnaviv_drm_private { + int num_gpus; + struct etnaviv_gpu *gpu[ETNA_MAX_PIPES]; + + /* list of GEM objects: */ + struct mutex gem_lock; + struct list_head gem_list; + + struct workqueue_struct *wq; +}; + +static inline void etnaviv_queue_work(struct drm_device *dev, + struct work_struct *w) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + + queue_work(priv->wq, w); +} + +int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, + struct drm_file *file); + +int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); +int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); +int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, + struct drm_gem_object *obj, u32 *iova); +void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj); +struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); +void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); +void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sg); +int etnaviv_gem_prime_pin(struct drm_gem_object *obj); +void etnaviv_gem_prime_unpin(struct drm_gem_object *obj); +void *etnaviv_gem_vaddr(struct drm_gem_object *obj); +int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, + struct timespec *timeout); +int etnaviv_gem_cpu_fini(struct drm_gem_object *obj); +void etnaviv_gem_free_object(struct drm_gem_object *obj); +int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, + u32 size, u32 flags, u32 *handle); +struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev, + u32 size, u32 flags); +struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev, + u32 size, u32 flags); +int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, + uintptr_t ptr, u32 size, u32 flags, u32 *handle); +u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu); +void etnaviv_buffer_end(struct etnaviv_gpu *gpu); +void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, + struct etnaviv_cmdbuf *cmdbuf); +void etnaviv_validate_init(void); +bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, + u32 *stream, unsigned int size, + struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size); + +#ifdef CONFIG_DEBUG_FS +void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, + struct seq_file *m); +#endif + +void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, + const char *dbgname); +void etnaviv_writel(u32 data, void __iomem *addr); +u32 etnaviv_readl(const void __iomem *addr); + +#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) +#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) + +/* + * Return the storage size of a structure with a variable length array. + * The array is nelem elements of elem_size, where the base structure + * is defined by base. If the size overflows size_t, return zero. + */ +static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base) +{ + if (elem_size && nelem > (SIZE_MAX - base) / elem_size) + return 0; + return base + nelem * elem_size; +} + +/* returns true if fence a comes after fence b */ +static inline bool fence_after(u32 a, u32 b) +{ + return (s32)(a - b) > 0; +} + +static inline bool fence_after_eq(u32 a, u32 b) +{ + return (s32)(a - b) >= 0; +} + +static inline unsigned long etnaviv_timeout_to_jiffies( + const struct timespec *timeout) +{ + unsigned long timeout_jiffies = timespec_to_jiffies(timeout); + unsigned long start_jiffies = jiffies; + unsigned long remaining_jiffies; + + if (time_after(start_jiffies, timeout_jiffies)) + remaining_jiffies = 0; + else + remaining_jiffies = timeout_jiffies - start_jiffies; + + return remaining_jiffies; +} + +#endif /* __ETNAVIV_DRV_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c new file mode 100644 index 000000000000..bf8fa859e8be --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -0,0 +1,227 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "etnaviv_dump.h" +#include "etnaviv_gem.h" +#include "etnaviv_gpu.h" +#include "etnaviv_mmu.h" +#include "state.xml.h" +#include "state_hi.xml.h" + +struct core_dump_iterator { + void *start; + struct etnaviv_dump_object_header *hdr; + void *data; +}; + +static const unsigned short etnaviv_dump_registers[] = { + VIVS_HI_AXI_STATUS, + VIVS_HI_CLOCK_CONTROL, + VIVS_HI_IDLE_STATE, + VIVS_HI_AXI_CONFIG, + VIVS_HI_INTR_ENBL, + VIVS_HI_CHIP_IDENTITY, + VIVS_HI_CHIP_FEATURE, + VIVS_HI_CHIP_MODEL, + VIVS_HI_CHIP_REV, + VIVS_HI_CHIP_DATE, + VIVS_HI_CHIP_TIME, + VIVS_HI_CHIP_MINOR_FEATURE_0, + VIVS_HI_CACHE_CONTROL, + VIVS_HI_AXI_CONTROL, + VIVS_PM_POWER_CONTROLS, + VIVS_PM_MODULE_CONTROLS, + VIVS_PM_MODULE_STATUS, + VIVS_PM_PULSE_EATER, + VIVS_MC_MMU_FE_PAGE_TABLE, + VIVS_MC_MMU_TX_PAGE_TABLE, + VIVS_MC_MMU_PE_PAGE_TABLE, + VIVS_MC_MMU_PEZ_PAGE_TABLE, + VIVS_MC_MMU_RA_PAGE_TABLE, + VIVS_MC_DEBUG_MEMORY, + VIVS_MC_MEMORY_BASE_ADDR_RA, + VIVS_MC_MEMORY_BASE_ADDR_FE, + VIVS_MC_MEMORY_BASE_ADDR_TX, + VIVS_MC_MEMORY_BASE_ADDR_PEZ, + VIVS_MC_MEMORY_BASE_ADDR_PE, + VIVS_MC_MEMORY_TIMING_CONTROL, + VIVS_MC_BUS_CONFIG, + VIVS_FE_DMA_STATUS, + VIVS_FE_DMA_DEBUG_STATE, + VIVS_FE_DMA_ADDRESS, + VIVS_FE_DMA_LOW, + VIVS_FE_DMA_HIGH, + VIVS_FE_AUTO_FLUSH, +}; + +static void etnaviv_core_dump_header(struct core_dump_iterator *iter, + u32 type, void *data_end) +{ + struct etnaviv_dump_object_header *hdr = iter->hdr; + + hdr->magic = cpu_to_le32(ETDUMP_MAGIC); + hdr->type = cpu_to_le32(type); + hdr->file_offset = cpu_to_le32(iter->data - iter->start); + hdr->file_size = cpu_to_le32(data_end - iter->data); + + iter->hdr++; + iter->data += hdr->file_size; +} + +static void etnaviv_core_dump_registers(struct core_dump_iterator *iter, + struct etnaviv_gpu *gpu) +{ + struct etnaviv_dump_registers *reg = iter->data; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) { + reg->reg = etnaviv_dump_registers[i]; + reg->value = gpu_read(gpu, etnaviv_dump_registers[i]); + } + + etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg); +} + +static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter, + struct etnaviv_gpu *gpu, size_t mmu_size) +{ + etnaviv_iommu_dump(gpu->mmu, iter->data); + + etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size); +} + +static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type, + void *ptr, size_t size, u64 iova) +{ + memcpy(iter->data, ptr, size); + + iter->hdr->iova = cpu_to_le64(iova); + + etnaviv_core_dump_header(iter, type, iter->data + size); +} + +void etnaviv_core_dump(struct etnaviv_gpu *gpu) +{ + struct core_dump_iterator iter; + struct etnaviv_vram_mapping *vram; + struct etnaviv_gem_object *obj; + struct etnaviv_cmdbuf *cmd; + unsigned int n_obj, n_bomap_pages; + size_t file_size, mmu_size; + __le64 *bomap, *bomap_start; + + mmu_size = etnaviv_iommu_dump_size(gpu->mmu); + + /* We always dump registers, mmu, ring and end marker */ + n_obj = 4; + n_bomap_pages = 0; + file_size = ARRAY_SIZE(etnaviv_dump_registers) * + sizeof(struct etnaviv_dump_registers) + + mmu_size + gpu->buffer->size; + + /* Add in the active command buffers */ + list_for_each_entry(cmd, &gpu->active_cmd_list, node) { + file_size += cmd->size; + n_obj++; + } + + /* Add in the active buffer objects */ + list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { + if (!vram->use) + continue; + + obj = vram->object; + file_size += obj->base.size; + n_bomap_pages += obj->base.size >> PAGE_SHIFT; + n_obj++; + } + + /* If we have any buffer objects, add a bomap object */ + if (n_bomap_pages) { + file_size += n_bomap_pages * sizeof(__le64); + n_obj++; + } + + /* Add the size of the headers */ + file_size += sizeof(*iter.hdr) * n_obj; + + /* Allocate the file in vmalloc memory, it's likely to be big */ + iter.start = vmalloc(file_size); + if (!iter.start) { + dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); + return; + } + + /* Point the data member after the headers */ + iter.hdr = iter.start; + iter.data = &iter.hdr[n_obj]; + + memset(iter.hdr, 0, iter.data - iter.start); + + etnaviv_core_dump_registers(&iter, gpu); + etnaviv_core_dump_mmu(&iter, gpu, mmu_size); + etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr, + gpu->buffer->size, gpu->buffer->paddr); + + list_for_each_entry(cmd, &gpu->active_cmd_list, node) + etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr, + cmd->size, cmd->paddr); + + /* Reserve space for the bomap */ + if (n_bomap_pages) { + bomap_start = bomap = iter.data; + memset(bomap, 0, sizeof(*bomap) * n_bomap_pages); + etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP, + bomap + n_bomap_pages); + } else { + /* Silence warning */ + bomap_start = bomap = NULL; + } + + list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { + struct page **pages; + void *vaddr; + + if (vram->use == 0) + continue; + + obj = vram->object; + + pages = etnaviv_gem_get_pages(obj); + if (pages) { + int j; + + iter.hdr->data[0] = bomap - bomap_start; + + for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++) + *bomap++ = cpu_to_le64(page_to_phys(*pages++)); + } + + iter.hdr->iova = cpu_to_le64(vram->iova); + + vaddr = etnaviv_gem_vaddr(&obj->base); + if (vaddr && !IS_ERR(vaddr)) + memcpy(iter.data, vaddr, obj->base.size); + + etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data + + obj->base.size); + } + + etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); + + dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h new file mode 100644 index 000000000000..97f2f8db9133 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * Etnaviv devcoredump file definitions + */ +#ifndef ETNAVIV_DUMP_H +#define ETNAVIV_DUMP_H + +#include + +enum { + ETDUMP_MAGIC = 0x414e5445, + ETDUMP_BUF_REG = 0, + ETDUMP_BUF_MMU, + ETDUMP_BUF_RING, + ETDUMP_BUF_CMD, + ETDUMP_BUF_BOMAP, + ETDUMP_BUF_BO, + ETDUMP_BUF_END, +}; + +struct etnaviv_dump_object_header { + __le32 magic; + __le32 type; + __le32 file_offset; + __le32 file_size; + __le64 iova; + __le32 data[2]; +}; + +/* Registers object, an array of these */ +struct etnaviv_dump_registers { + __le32 reg; + __le32 value; +}; + +#ifdef __KERNEL__ +struct etnaviv_gpu; +void etnaviv_core_dump(struct etnaviv_gpu *gpu); +#endif + +#endif diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c new file mode 100644 index 000000000000..9f77c3b94cc6 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -0,0 +1,899 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "etnaviv_drv.h" +#include "etnaviv_gem.h" +#include "etnaviv_gpu.h" +#include "etnaviv_mmu.h" + +static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) +{ + struct drm_device *dev = etnaviv_obj->base.dev; + struct sg_table *sgt = etnaviv_obj->sgt; + + /* + * For non-cached buffers, ensure the new pages are clean + * because display controller, GPU, etc. are not coherent. + */ + if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) + dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); +} + +static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) +{ + struct drm_device *dev = etnaviv_obj->base.dev; + struct sg_table *sgt = etnaviv_obj->sgt; + + /* + * For non-cached buffers, ensure the new pages are clean + * because display controller, GPU, etc. are not coherent: + * + * WARNING: The DMA API does not support concurrent CPU + * and device access to the memory area. With BIDIRECTIONAL, + * we will clean the cache lines which overlap the region, + * and invalidate all cache lines (partially) contained in + * the region. + * + * If you have dirty data in the overlapping cache lines, + * that will corrupt the GPU-written data. If you have + * written into the remainder of the region, this can + * discard those writes. + */ + if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) + dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); +} + +/* called with etnaviv_obj->lock held */ +static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + struct drm_device *dev = etnaviv_obj->base.dev; + struct page **p = drm_gem_get_pages(&etnaviv_obj->base); + + if (IS_ERR(p)) { + dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); + return PTR_ERR(p); + } + + etnaviv_obj->pages = p; + + return 0; +} + +static void put_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + if (etnaviv_obj->sgt) { + etnaviv_gem_scatterlist_unmap(etnaviv_obj); + sg_free_table(etnaviv_obj->sgt); + kfree(etnaviv_obj->sgt); + etnaviv_obj->sgt = NULL; + } + if (etnaviv_obj->pages) { + drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages, + true, false); + + etnaviv_obj->pages = NULL; + } +} + +struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + int ret; + + lockdep_assert_held(&etnaviv_obj->lock); + + if (!etnaviv_obj->pages) { + ret = etnaviv_obj->ops->get_pages(etnaviv_obj); + if (ret < 0) + return ERR_PTR(ret); + } + + if (!etnaviv_obj->sgt) { + struct drm_device *dev = etnaviv_obj->base.dev; + int npages = etnaviv_obj->base.size >> PAGE_SHIFT; + struct sg_table *sgt; + + sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); + if (IS_ERR(sgt)) { + dev_err(dev->dev, "failed to allocate sgt: %ld\n", + PTR_ERR(sgt)); + return ERR_CAST(sgt); + } + + etnaviv_obj->sgt = sgt; + + etnaviv_gem_scatter_map(etnaviv_obj); + } + + return etnaviv_obj->pages; +} + +void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + lockdep_assert_held(&etnaviv_obj->lock); + /* when we start tracking the pin count, then do something here */ +} + +static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + pgprot_t vm_page_prot; + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + + vm_page_prot = vm_get_page_prot(vma->vm_flags); + + if (etnaviv_obj->flags & ETNA_BO_WC) { + vma->vm_page_prot = pgprot_writecombine(vm_page_prot); + } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) { + vma->vm_page_prot = pgprot_noncached(vm_page_prot); + } else { + /* + * Shunt off cached objs to shmem file so they have their own + * address_space (so unmap_mapping_range does what we want, + * in particular in the case of mmap'd dmabufs) + */ + fput(vma->vm_file); + get_file(obj->filp); + vma->vm_pgoff = 0; + vma->vm_file = obj->filp; + + vma->vm_page_prot = vm_page_prot; + } + + return 0; +} + +int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct etnaviv_gem_object *obj; + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) { + DBG("mmap failed: %d", ret); + return ret; + } + + obj = to_etnaviv_bo(vma->vm_private_data); + return etnaviv_gem_mmap_obj(vma->vm_private_data, vma); +} + +int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct page **pages, *page; + pgoff_t pgoff; + int ret; + + /* + * Make sure we don't parallel update on a fault, nor move or remove + * something from beneath our feet. Note that vm_insert_page() is + * specifically coded to take care of this, so we don't have to. + */ + ret = mutex_lock_interruptible(&etnaviv_obj->lock); + if (ret) + goto out; + + /* make sure we have pages attached now */ + pages = etnaviv_gem_get_pages(etnaviv_obj); + mutex_unlock(&etnaviv_obj->lock); + + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out; + } + + /* We don't use vmf->pgoff since that has the fake offset: */ + pgoff = ((unsigned long)vmf->virtual_address - + vma->vm_start) >> PAGE_SHIFT; + + page = pages[pgoff]; + + VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT); + + ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + +out: + switch (ret) { + case -EAGAIN: + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } +} + +int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset) +{ + int ret; + + /* Make it mmapable */ + ret = drm_gem_create_mmap_offset(obj); + if (ret) + dev_err(obj->dev->dev, "could not allocate mmap offset\n"); + else + *offset = drm_vma_node_offset_addr(&obj->vma_node); + + return ret; +} + +static struct etnaviv_vram_mapping * +etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, + struct etnaviv_iommu *mmu) +{ + struct etnaviv_vram_mapping *mapping; + + list_for_each_entry(mapping, &obj->vram_list, obj_node) { + if (mapping->mmu == mmu) + return mapping; + } + + return NULL; +} + +int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, + struct drm_gem_object *obj, u32 *iova) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct etnaviv_vram_mapping *mapping; + struct page **pages; + int ret = 0; + + mutex_lock(&etnaviv_obj->lock); + mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); + if (mapping) { + /* + * Holding the object lock prevents the use count changing + * beneath us. If the use count is zero, the MMU might be + * reaping this object, so take the lock and re-check that + * the MMU owns this mapping to close this race. + */ + if (mapping->use == 0) { + mutex_lock(&gpu->mmu->lock); + if (mapping->mmu == gpu->mmu) + mapping->use += 1; + else + mapping = NULL; + mutex_unlock(&gpu->mmu->lock); + if (mapping) + goto out; + } else { + mapping->use += 1; + goto out; + } + } + + pages = etnaviv_gem_get_pages(etnaviv_obj); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out; + } + + /* + * See if we have a reaped vram mapping we can re-use before + * allocating a fresh mapping. + */ + mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL); + if (!mapping) { + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) { + ret = -ENOMEM; + goto out; + } + + INIT_LIST_HEAD(&mapping->scan_node); + mapping->object = etnaviv_obj; + } else { + list_del(&mapping->obj_node); + } + + mapping->mmu = gpu->mmu; + mapping->use = 1; + + ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base, + mapping); + if (ret < 0) + kfree(mapping); + else + list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); + +out: + mutex_unlock(&etnaviv_obj->lock); + + if (!ret) { + /* Take a reference on the object */ + drm_gem_object_reference(obj); + *iova = mapping->iova; + } + + return ret; +} + +void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct etnaviv_vram_mapping *mapping; + + mutex_lock(&etnaviv_obj->lock); + mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); + + WARN_ON(mapping->use == 0); + mapping->use -= 1; + mutex_unlock(&etnaviv_obj->lock); + + drm_gem_object_unreference_unlocked(obj); +} + +void *etnaviv_gem_vaddr(struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + mutex_lock(&etnaviv_obj->lock); + if (!etnaviv_obj->vaddr) { + struct page **pages = etnaviv_gem_get_pages(etnaviv_obj); + + if (IS_ERR(pages)) + return ERR_CAST(pages); + + etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, + VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + } + mutex_unlock(&etnaviv_obj->lock); + + return etnaviv_obj->vaddr; +} + +static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) +{ + if (op & ETNA_PREP_READ) + return DMA_FROM_DEVICE; + else if (op & ETNA_PREP_WRITE) + return DMA_TO_DEVICE; + else + return DMA_BIDIRECTIONAL; +} + +int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, + struct timespec *timeout) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct drm_device *dev = obj->dev; + bool write = !!(op & ETNA_PREP_WRITE); + int ret; + + if (op & ETNA_PREP_NOSYNC) { + if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, + write)) + return -EBUSY; + } else { + unsigned long remain = etnaviv_timeout_to_jiffies(timeout); + + ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, + write, true, remain); + if (ret <= 0) + return ret == 0 ? -ETIMEDOUT : ret; + } + + if (etnaviv_obj->flags & ETNA_BO_CACHED) { + if (!etnaviv_obj->sgt) { + void *ret; + + mutex_lock(&etnaviv_obj->lock); + ret = etnaviv_gem_get_pages(etnaviv_obj); + mutex_unlock(&etnaviv_obj->lock); + if (IS_ERR(ret)) + return PTR_ERR(ret); + } + + dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, + etnaviv_obj->sgt->nents, + etnaviv_op_to_dma_dir(op)); + etnaviv_obj->last_cpu_prep_op = op; + } + + return 0; +} + +int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + if (etnaviv_obj->flags & ETNA_BO_CACHED) { + /* fini without a prep is almost certainly a userspace error */ + WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); + dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, + etnaviv_obj->sgt->nents, + etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); + etnaviv_obj->last_cpu_prep_op = 0; + } + + return 0; +} + +int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, + struct timespec *timeout) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout); +} + +#ifdef CONFIG_DEBUG_FS +static void etnaviv_gem_describe_fence(struct fence *fence, + const char *type, struct seq_file *m) +{ + if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + seq_printf(m, "\t%9s: %s %s seq %u\n", + type, + fence->ops->get_driver_name(fence), + fence->ops->get_timeline_name(fence), + fence->seqno); +} + +static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct reservation_object *robj = etnaviv_obj->resv; + struct reservation_object_list *fobj; + struct fence *fence; + unsigned long off = drm_vma_node_start(&obj->vma_node); + + seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", + etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', + obj->name, obj->refcount.refcount.counter, + off, etnaviv_obj->vaddr, obj->size); + + rcu_read_lock(); + fobj = rcu_dereference(robj->fence); + if (fobj) { + unsigned int i, shared_count = fobj->shared_count; + + for (i = 0; i < shared_count; i++) { + fence = rcu_dereference(fobj->shared[i]); + etnaviv_gem_describe_fence(fence, "Shared", m); + } + } + + fence = rcu_dereference(robj->fence_excl); + if (fence) + etnaviv_gem_describe_fence(fence, "Exclusive", m); + rcu_read_unlock(); +} + +void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, + struct seq_file *m) +{ + struct etnaviv_gem_object *etnaviv_obj; + int count = 0; + size_t size = 0; + + mutex_lock(&priv->gem_lock); + list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) { + struct drm_gem_object *obj = &etnaviv_obj->base; + + seq_puts(m, " "); + etnaviv_gem_describe(obj, m); + count++; + size += obj->size; + } + mutex_unlock(&priv->gem_lock); + + seq_printf(m, "Total %d objects, %zu bytes\n", count, size); +} +#endif + +static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) +{ + if (etnaviv_obj->vaddr) + vunmap(etnaviv_obj->vaddr); + put_pages(etnaviv_obj); +} + +static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { + .get_pages = etnaviv_gem_shmem_get_pages, + .release = etnaviv_gem_shmem_release, +}; + +void etnaviv_gem_free_object(struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct etnaviv_vram_mapping *mapping, *tmp; + + /* object should not be active */ + WARN_ON(is_active(etnaviv_obj)); + + list_del(&etnaviv_obj->gem_node); + + list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, + obj_node) { + struct etnaviv_iommu *mmu = mapping->mmu; + + WARN_ON(mapping->use); + + if (mmu) + etnaviv_iommu_unmap_gem(mmu, mapping); + + list_del(&mapping->obj_node); + kfree(mapping); + } + + drm_gem_free_mmap_offset(obj); + etnaviv_obj->ops->release(etnaviv_obj); + if (etnaviv_obj->resv == &etnaviv_obj->_resv) + reservation_object_fini(&etnaviv_obj->_resv); + drm_gem_object_release(obj); + + kfree(etnaviv_obj); +} + +int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + mutex_lock(&priv->gem_lock); + list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list); + mutex_unlock(&priv->gem_lock); + + return 0; +} + +static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, + struct reservation_object *robj, const struct etnaviv_gem_ops *ops, + struct drm_gem_object **obj) +{ + struct etnaviv_gem_object *etnaviv_obj; + unsigned sz = sizeof(*etnaviv_obj); + bool valid = true; + + /* validate flags */ + switch (flags & ETNA_BO_CACHE_MASK) { + case ETNA_BO_UNCACHED: + case ETNA_BO_CACHED: + case ETNA_BO_WC: + break; + default: + valid = false; + } + + if (!valid) { + dev_err(dev->dev, "invalid cache flag: %x\n", + (flags & ETNA_BO_CACHE_MASK)); + return -EINVAL; + } + + etnaviv_obj = kzalloc(sz, GFP_KERNEL); + if (!etnaviv_obj) + return -ENOMEM; + + etnaviv_obj->flags = flags; + etnaviv_obj->ops = ops; + if (robj) { + etnaviv_obj->resv = robj; + } else { + etnaviv_obj->resv = &etnaviv_obj->_resv; + reservation_object_init(&etnaviv_obj->_resv); + } + + mutex_init(&etnaviv_obj->lock); + INIT_LIST_HEAD(&etnaviv_obj->vram_list); + + *obj = &etnaviv_obj->base; + + return 0; +} + +static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, + u32 size, u32 flags) +{ + struct drm_gem_object *obj = NULL; + int ret; + + size = PAGE_ALIGN(size); + + ret = etnaviv_gem_new_impl(dev, size, flags, NULL, + &etnaviv_gem_shmem_ops, &obj); + if (ret) + goto fail; + + ret = drm_gem_object_init(dev, obj, size); + if (ret == 0) { + struct address_space *mapping; + + /* + * Our buffers are kept pinned, so allocating them + * from the MOVABLE zone is a really bad idea, and + * conflicts with CMA. See coments above new_inode() + * why this is required _and_ expected if you're + * going to pin these pages. + */ + mapping = file_inode(obj->filp)->i_mapping; + mapping_set_gfp_mask(mapping, GFP_HIGHUSER); + } + + if (ret) + goto fail; + + return obj; + +fail: + if (obj) + drm_gem_object_unreference_unlocked(obj); + + return ERR_PTR(ret); +} + +/* convenience method to construct a GEM buffer object, and userspace handle */ +int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, + u32 size, u32 flags, u32 *handle) +{ + struct drm_gem_object *obj; + int ret; + + obj = __etnaviv_gem_new(dev, size, flags); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + ret = etnaviv_gem_obj_add(dev, obj); + if (ret < 0) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } + + ret = drm_gem_handle_create(file, obj, handle); + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev, + u32 size, u32 flags) +{ + struct drm_gem_object *obj; + int ret; + + obj = __etnaviv_gem_new(dev, size, flags); + if (IS_ERR(obj)) + return obj; + + ret = etnaviv_gem_obj_add(dev, obj); + if (ret < 0) { + drm_gem_object_unreference_unlocked(obj); + return ERR_PTR(ret); + } + + return obj; +} + +int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, + struct reservation_object *robj, const struct etnaviv_gem_ops *ops, + struct etnaviv_gem_object **res) +{ + struct drm_gem_object *obj; + int ret; + + ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj); + if (ret) + return ret; + + drm_gem_private_object_init(dev, obj, size); + + *res = to_etnaviv_bo(obj); + + return 0; +} + +struct get_pages_work { + struct work_struct work; + struct mm_struct *mm; + struct task_struct *task; + struct etnaviv_gem_object *etnaviv_obj; +}; + +static struct page **etnaviv_gem_userptr_do_get_pages( + struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task) +{ + int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; + struct page **pvec; + uintptr_t ptr; + + pvec = drm_malloc_ab(npages, sizeof(struct page *)); + if (!pvec) + return ERR_PTR(-ENOMEM); + + pinned = 0; + ptr = etnaviv_obj->userptr.ptr; + + down_read(&mm->mmap_sem); + while (pinned < npages) { + ret = get_user_pages(task, mm, ptr, npages - pinned, + !etnaviv_obj->userptr.ro, 0, + pvec + pinned, NULL); + if (ret < 0) + break; + + ptr += ret * PAGE_SIZE; + pinned += ret; + } + up_read(&mm->mmap_sem); + + if (ret < 0) { + release_pages(pvec, pinned, 0); + drm_free_large(pvec); + return ERR_PTR(ret); + } + + return pvec; +} + +static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work) +{ + struct get_pages_work *work = container_of(_work, typeof(*work), work); + struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj; + struct page **pvec; + + pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task); + + mutex_lock(&etnaviv_obj->lock); + if (IS_ERR(pvec)) { + etnaviv_obj->userptr.work = ERR_CAST(pvec); + } else { + etnaviv_obj->userptr.work = NULL; + etnaviv_obj->pages = pvec; + } + + mutex_unlock(&etnaviv_obj->lock); + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + + mmput(work->mm); + put_task_struct(work->task); + kfree(work); +} + +static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + struct page **pvec = NULL; + struct get_pages_work *work; + struct mm_struct *mm; + int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; + + if (etnaviv_obj->userptr.work) { + if (IS_ERR(etnaviv_obj->userptr.work)) { + ret = PTR_ERR(etnaviv_obj->userptr.work); + etnaviv_obj->userptr.work = NULL; + } else { + ret = -EAGAIN; + } + return ret; + } + + mm = get_task_mm(etnaviv_obj->userptr.task); + pinned = 0; + if (mm == current->mm) { + pvec = drm_malloc_ab(npages, sizeof(struct page *)); + if (!pvec) { + mmput(mm); + return -ENOMEM; + } + + pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages, + !etnaviv_obj->userptr.ro, pvec); + if (pinned < 0) { + drm_free_large(pvec); + mmput(mm); + return pinned; + } + + if (pinned == npages) { + etnaviv_obj->pages = pvec; + mmput(mm); + return 0; + } + } + + release_pages(pvec, pinned, 0); + drm_free_large(pvec); + + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (!work) { + mmput(mm); + return -ENOMEM; + } + + get_task_struct(current); + drm_gem_object_reference(&etnaviv_obj->base); + + work->mm = mm; + work->task = current; + work->etnaviv_obj = etnaviv_obj; + + etnaviv_obj->userptr.work = &work->work; + INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages); + + etnaviv_queue_work(etnaviv_obj->base.dev, &work->work); + + return -EAGAIN; +} + +static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) +{ + if (etnaviv_obj->sgt) { + etnaviv_gem_scatterlist_unmap(etnaviv_obj); + sg_free_table(etnaviv_obj->sgt); + kfree(etnaviv_obj->sgt); + } + if (etnaviv_obj->pages) { + int npages = etnaviv_obj->base.size >> PAGE_SHIFT; + + release_pages(etnaviv_obj->pages, npages, 0); + drm_free_large(etnaviv_obj->pages); + } + put_task_struct(etnaviv_obj->userptr.task); +} + +static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { + .get_pages = etnaviv_gem_userptr_get_pages, + .release = etnaviv_gem_userptr_release, +}; + +int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, + uintptr_t ptr, u32 size, u32 flags, u32 *handle) +{ + struct etnaviv_gem_object *etnaviv_obj; + int ret; + + ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL, + &etnaviv_gem_userptr_ops, &etnaviv_obj); + if (ret) + return ret; + + etnaviv_obj->userptr.ptr = ptr; + etnaviv_obj->userptr.task = current; + etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE); + get_task_struct(current); + + ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); + if (ret) { + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + return ret; + } + + ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + + return ret; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h new file mode 100644 index 000000000000..a300b4b3d545 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_GEM_H__ +#define __ETNAVIV_GEM_H__ + +#include +#include "etnaviv_drv.h" + +struct etnaviv_gem_ops; +struct etnaviv_gem_object; + +struct etnaviv_gem_userptr { + uintptr_t ptr; + struct task_struct *task; + struct work_struct *work; + bool ro; +}; + +struct etnaviv_vram_mapping { + struct list_head obj_node; + struct list_head scan_node; + struct list_head mmu_node; + struct etnaviv_gem_object *object; + struct etnaviv_iommu *mmu; + struct drm_mm_node vram_node; + unsigned int use; + u32 iova; +}; + +struct etnaviv_gem_object { + struct drm_gem_object base; + const struct etnaviv_gem_ops *ops; + struct mutex lock; + + u32 flags; + + struct list_head gem_node; + struct etnaviv_gpu *gpu; /* non-null if active */ + atomic_t gpu_active; + u32 access; + + struct page **pages; + struct sg_table *sgt; + void *vaddr; + + /* normally (resv == &_resv) except for imported bo's */ + struct reservation_object *resv; + struct reservation_object _resv; + + struct list_head vram_list; + + /* cache maintenance */ + u32 last_cpu_prep_op; + + struct etnaviv_gem_userptr userptr; +}; + +static inline +struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj) +{ + return container_of(obj, struct etnaviv_gem_object, base); +} + +struct etnaviv_gem_ops { + int (*get_pages)(struct etnaviv_gem_object *); + void (*release)(struct etnaviv_gem_object *); +}; + +static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj) +{ + return atomic_read(&etnaviv_obj->gpu_active) != 0; +} + +#define MAX_CMDS 4 + +/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, + * associated with the cmdstream submission for synchronization (and + * make it easier to unwind when things go wrong, etc). This only + * lasts for the duration of the submit-ioctl. + */ +struct etnaviv_gem_submit { + struct drm_device *dev; + struct etnaviv_gpu *gpu; + struct ww_acquire_ctx ticket; + u32 fence; + unsigned int nr_bos; + struct { + u32 flags; + struct etnaviv_gem_object *obj; + u32 iova; + } bos[0]; +}; + +int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, + struct timespec *timeout); +int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, + struct reservation_object *robj, const struct etnaviv_gem_ops *ops, + struct etnaviv_gem_object **res); +int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj); +struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj); +void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj); + +#endif /* __ETNAVIV_GEM_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c new file mode 100644 index 000000000000..e94db4f95770 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "etnaviv_drv.h" +#include "etnaviv_gem.h" + + +struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */ + + return etnaviv_obj->sgt; +} + +void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) +{ + return etnaviv_gem_vaddr(obj); +} + +void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + /* TODO msm_gem_vunmap() */ +} + +int etnaviv_gem_prime_pin(struct drm_gem_object *obj) +{ + if (!obj->import_attach) { + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + mutex_lock(&etnaviv_obj->lock); + etnaviv_gem_get_pages(etnaviv_obj); + mutex_unlock(&etnaviv_obj->lock); + } + return 0; +} + +void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) +{ + if (!obj->import_attach) { + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + mutex_lock(&etnaviv_obj->lock); + etnaviv_gem_put_pages(to_etnaviv_bo(obj)); + mutex_unlock(&etnaviv_obj->lock); + } +} + +static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) +{ + if (etnaviv_obj->vaddr) + dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, + etnaviv_obj->vaddr); + + /* Don't drop the pages for imported dmabuf, as they are not + * ours, just free the array we allocated: + */ + if (etnaviv_obj->pages) + drm_free_large(etnaviv_obj->pages); + + drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt); +} + +static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { + /* .get_pages should never be called */ + .release = etnaviv_gem_prime_release, +}; + +struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sgt) +{ + struct etnaviv_gem_object *etnaviv_obj; + size_t size = PAGE_ALIGN(attach->dmabuf->size); + int ret, npages; + + ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC, + attach->dmabuf->resv, + &etnaviv_gem_prime_ops, &etnaviv_obj); + if (ret < 0) + return ERR_PTR(ret); + + npages = size / PAGE_SIZE; + + etnaviv_obj->sgt = sgt; + etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (!etnaviv_obj->pages) { + ret = -ENOMEM; + goto fail; + } + + ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages, + NULL, npages); + if (ret) + goto fail; + + ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); + if (ret) + goto fail; + + return &etnaviv_obj->base; + +fail: + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c new file mode 100644 index 000000000000..1aba01a999df --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -0,0 +1,443 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "etnaviv_drv.h" +#include "etnaviv_gpu.h" +#include "etnaviv_gem.h" + +/* + * Cmdstream submission: + */ + +#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE) +/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */ +#define BO_LOCKED 0x4000 +#define BO_PINNED 0x2000 + +static inline void __user *to_user_ptr(u64 address) +{ + return (void __user *)(uintptr_t)address; +} + +static struct etnaviv_gem_submit *submit_create(struct drm_device *dev, + struct etnaviv_gpu *gpu, size_t nr) +{ + struct etnaviv_gem_submit *submit; + size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit)); + + submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + if (submit) { + submit->dev = dev; + submit->gpu = gpu; + + /* initially, until copy_from_user() and bo lookup succeeds: */ + submit->nr_bos = 0; + + ww_acquire_init(&submit->ticket, &reservation_ww_class); + } + + return submit; +} + +static int submit_lookup_objects(struct etnaviv_gem_submit *submit, + struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos, + unsigned nr_bos) +{ + struct drm_etnaviv_gem_submit_bo *bo; + unsigned i; + int ret = 0; + + spin_lock(&file->table_lock); + + for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) { + struct drm_gem_object *obj; + + if (bo->flags & BO_INVALID_FLAGS) { + DRM_ERROR("invalid flags: %x\n", bo->flags); + ret = -EINVAL; + goto out_unlock; + } + + submit->bos[i].flags = bo->flags; + + /* normally use drm_gem_object_lookup(), but for bulk lookup + * all under single table_lock just hit object_idr directly: + */ + obj = idr_find(&file->object_idr, bo->handle); + if (!obj) { + DRM_ERROR("invalid handle %u at index %u\n", + bo->handle, i); + ret = -EINVAL; + goto out_unlock; + } + + /* + * Take a refcount on the object. The file table lock + * prevents the object_idr's refcount on this being dropped. + */ + drm_gem_object_reference(obj); + + submit->bos[i].obj = to_etnaviv_bo(obj); + } + +out_unlock: + submit->nr_bos = i; + spin_unlock(&file->table_lock); + + return ret; +} + +static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i) +{ + if (submit->bos[i].flags & BO_LOCKED) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + + ww_mutex_unlock(&etnaviv_obj->resv->lock); + submit->bos[i].flags &= ~BO_LOCKED; + } +} + +static int submit_lock_objects(struct etnaviv_gem_submit *submit) +{ + int contended, slow_locked = -1, i, ret = 0; + +retry: + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + + if (slow_locked == i) + slow_locked = -1; + + contended = i; + + if (!(submit->bos[i].flags & BO_LOCKED)) { + ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock, + &submit->ticket); + if (ret == -EALREADY) + DRM_ERROR("BO at index %u already on submit list\n", + i); + if (ret) + goto fail; + submit->bos[i].flags |= BO_LOCKED; + } + } + + ww_acquire_done(&submit->ticket); + + return 0; + +fail: + for (; i >= 0; i--) + submit_unlock_object(submit, i); + + if (slow_locked > 0) + submit_unlock_object(submit, slow_locked); + + if (ret == -EDEADLK) { + struct etnaviv_gem_object *etnaviv_obj; + + etnaviv_obj = submit->bos[contended].obj; + + /* we lost out in a seqno race, lock and retry.. */ + ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock, + &submit->ticket); + if (!ret) { + submit->bos[contended].flags |= BO_LOCKED; + slow_locked = contended; + goto retry; + } + } + + return ret; +} + +static int submit_fence_sync(const struct etnaviv_gem_submit *submit) +{ + unsigned int context = submit->gpu->fence_context; + int i, ret = 0; + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; + + ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write); + if (ret) + break; + } + + return ret; +} + +static void submit_unpin_objects(struct etnaviv_gem_submit *submit) +{ + int i; + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + + if (submit->bos[i].flags & BO_PINNED) + etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base); + + submit->bos[i].iova = 0; + submit->bos[i].flags &= ~BO_PINNED; + } +} + +static int submit_pin_objects(struct etnaviv_gem_submit *submit) +{ + int i, ret = 0; + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + u32 iova; + + ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base, + &iova); + if (ret) + break; + + submit->bos[i].flags |= BO_PINNED; + submit->bos[i].iova = iova; + } + + return ret; +} + +static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx, + struct etnaviv_gem_object **obj, u32 *iova) +{ + if (idx >= submit->nr_bos) { + DRM_ERROR("invalid buffer index: %u (out of %u)\n", + idx, submit->nr_bos); + return -EINVAL; + } + + if (obj) + *obj = submit->bos[idx].obj; + if (iova) + *iova = submit->bos[idx].iova; + + return 0; +} + +/* process the reloc's and patch up the cmdstream as needed: */ +static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, + u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs, + u32 nr_relocs) +{ + u32 i, last_offset = 0; + u32 *ptr = stream; + int ret; + + for (i = 0; i < nr_relocs; i++) { + const struct drm_etnaviv_gem_submit_reloc *r = relocs + i; + struct etnaviv_gem_object *bobj; + u32 iova, off; + + if (unlikely(r->flags)) { + DRM_ERROR("invalid reloc flags\n"); + return -EINVAL; + } + + if (r->submit_offset % 4) { + DRM_ERROR("non-aligned reloc offset: %u\n", + r->submit_offset); + return -EINVAL; + } + + /* offset in dwords: */ + off = r->submit_offset / 4; + + if ((off >= size ) || + (off < last_offset)) { + DRM_ERROR("invalid offset %u at reloc %u\n", off, i); + return -EINVAL; + } + + ret = submit_bo(submit, r->reloc_idx, &bobj, &iova); + if (ret) + return ret; + + if (r->reloc_offset >= + bobj->base.size - sizeof(*ptr)) { + DRM_ERROR("relocation %u outside object", i); + return -EINVAL; + } + + ptr[off] = iova + r->reloc_offset; + + last_offset = off; + } + + return 0; +} + +static void submit_cleanup(struct etnaviv_gem_submit *submit) +{ + unsigned i; + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + + submit_unlock_object(submit, i); + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + } + + ww_acquire_fini(&submit->ticket); + kfree(submit); +} + +int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct drm_etnaviv_gem_submit *args = data; + struct drm_etnaviv_gem_submit_reloc *relocs; + struct drm_etnaviv_gem_submit_bo *bos; + struct etnaviv_gem_submit *submit; + struct etnaviv_cmdbuf *cmdbuf; + struct etnaviv_gpu *gpu; + void *stream; + int ret; + + if (args->pipe >= ETNA_MAX_PIPES) + return -EINVAL; + + gpu = priv->gpu[args->pipe]; + if (!gpu) + return -ENXIO; + + if (args->stream_size % 4) { + DRM_ERROR("non-aligned cmdstream buffer size: %u\n", + args->stream_size); + return -EINVAL; + } + + if (args->exec_state != ETNA_PIPE_3D && + args->exec_state != ETNA_PIPE_2D && + args->exec_state != ETNA_PIPE_VG) { + DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state); + return -EINVAL; + } + + /* + * Copy the command submission and bo array to kernel space in + * one go, and do this outside of any locks. + */ + bos = drm_malloc_ab(args->nr_bos, sizeof(*bos)); + relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs)); + stream = drm_malloc_ab(1, args->stream_size); + cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8, + args->nr_bos); + if (!bos || !relocs || !stream || !cmdbuf) { + ret = -ENOMEM; + goto err_submit_cmds; + } + + cmdbuf->exec_state = args->exec_state; + cmdbuf->ctx = file->driver_priv; + + ret = copy_from_user(bos, to_user_ptr(args->bos), + args->nr_bos * sizeof(*bos)); + if (ret) { + ret = -EFAULT; + goto err_submit_cmds; + } + + ret = copy_from_user(relocs, to_user_ptr(args->relocs), + args->nr_relocs * sizeof(*relocs)); + if (ret) { + ret = -EFAULT; + goto err_submit_cmds; + } + + ret = copy_from_user(stream, to_user_ptr(args->stream), + args->stream_size); + if (ret) { + ret = -EFAULT; + goto err_submit_cmds; + } + + submit = submit_create(dev, gpu, args->nr_bos); + if (!submit) { + ret = -ENOMEM; + goto err_submit_cmds; + } + + ret = submit_lookup_objects(submit, file, bos, args->nr_bos); + if (ret) + goto err_submit_objects; + + ret = submit_lock_objects(submit); + if (ret) + goto err_submit_objects; + + if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4, + relocs, args->nr_relocs)) { + ret = -EINVAL; + goto err_submit_objects; + } + + ret = submit_fence_sync(submit); + if (ret) + goto err_submit_objects; + + ret = submit_pin_objects(submit); + if (ret) + goto out; + + ret = submit_reloc(submit, stream, args->stream_size / 4, + relocs, args->nr_relocs); + if (ret) + goto out; + + memcpy(cmdbuf->vaddr, stream, args->stream_size); + cmdbuf->user_size = ALIGN(args->stream_size, 8); + + ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); + if (ret == 0) + cmdbuf = NULL; + + args->fence = submit->fence; + +out: + submit_unpin_objects(submit); + + /* + * If we're returning -EAGAIN, it may be due to the userptr code + * wanting to run its workqueue outside of any locks. Flush our + * workqueue to ensure that it is run in a timely manner. + */ + if (ret == -EAGAIN) + flush_workqueue(priv->wq); + +err_submit_objects: + submit_cleanup(submit); + +err_submit_cmds: + /* if we still own the cmdbuf */ + if (cmdbuf) + etnaviv_gpu_cmdbuf_free(cmdbuf); + if (stream) + drm_free_large(stream); + if (bos) + drm_free_large(bos); + if (relocs) + drm_free_large(relocs); + + return ret; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c new file mode 100644 index 000000000000..056a72e6ed26 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -0,0 +1,1647 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include "etnaviv_dump.h" +#include "etnaviv_gpu.h" +#include "etnaviv_gem.h" +#include "etnaviv_mmu.h" +#include "etnaviv_iommu.h" +#include "etnaviv_iommu_v2.h" +#include "common.xml.h" +#include "state.xml.h" +#include "state_hi.xml.h" +#include "cmdstream.xml.h" + +static const struct platform_device_id gpu_ids[] = { + { .name = "etnaviv-gpu,2d" }, + { }, +}; + +static bool etnaviv_dump_core = true; +module_param_named(dump_core, etnaviv_dump_core, bool, 0600); + +/* + * Driver functions: + */ + +int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) +{ + switch (param) { + case ETNAVIV_PARAM_GPU_MODEL: + *value = gpu->identity.model; + break; + + case ETNAVIV_PARAM_GPU_REVISION: + *value = gpu->identity.revision; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_0: + *value = gpu->identity.features; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_1: + *value = gpu->identity.minor_features0; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_2: + *value = gpu->identity.minor_features1; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_3: + *value = gpu->identity.minor_features2; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_4: + *value = gpu->identity.minor_features3; + break; + + case ETNAVIV_PARAM_GPU_STREAM_COUNT: + *value = gpu->identity.stream_count; + break; + + case ETNAVIV_PARAM_GPU_REGISTER_MAX: + *value = gpu->identity.register_max; + break; + + case ETNAVIV_PARAM_GPU_THREAD_COUNT: + *value = gpu->identity.thread_count; + break; + + case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE: + *value = gpu->identity.vertex_cache_size; + break; + + case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT: + *value = gpu->identity.shader_core_count; + break; + + case ETNAVIV_PARAM_GPU_PIXEL_PIPES: + *value = gpu->identity.pixel_pipes; + break; + + case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE: + *value = gpu->identity.vertex_output_buffer_size; + break; + + case ETNAVIV_PARAM_GPU_BUFFER_SIZE: + *value = gpu->identity.buffer_size; + break; + + case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT: + *value = gpu->identity.instruction_count; + break; + + case ETNAVIV_PARAM_GPU_NUM_CONSTANTS: + *value = gpu->identity.num_constants; + break; + + default: + DBG("%s: invalid param: %u", dev_name(gpu->dev), param); + return -EINVAL; + } + + return 0; +} + +static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) +{ + if (gpu->identity.minor_features0 & + chipMinorFeatures0_MORE_MINOR_FEATURES) { + u32 specs[2]; + + specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); + specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); + + gpu->identity.stream_count = + (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) + >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT; + gpu->identity.register_max = + (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) + >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT; + gpu->identity.thread_count = + (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) + >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT; + gpu->identity.vertex_cache_size = + (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) + >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT; + gpu->identity.shader_core_count = + (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) + >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT; + gpu->identity.pixel_pipes = + (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK) + >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT; + gpu->identity.vertex_output_buffer_size = + (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) + >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT; + + gpu->identity.buffer_size = + (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) + >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT; + gpu->identity.instruction_count = + (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) + >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT; + gpu->identity.num_constants = + (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) + >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT; + } + + /* Fill in the stream count if not specified */ + if (gpu->identity.stream_count == 0) { + if (gpu->identity.model >= 0x1000) + gpu->identity.stream_count = 4; + else + gpu->identity.stream_count = 1; + } + + /* Convert the register max value */ + if (gpu->identity.register_max) + gpu->identity.register_max = 1 << gpu->identity.register_max; + else if (gpu->identity.model == 0x0400) + gpu->identity.register_max = 32; + else + gpu->identity.register_max = 64; + + /* Convert thread count */ + if (gpu->identity.thread_count) + gpu->identity.thread_count = 1 << gpu->identity.thread_count; + else if (gpu->identity.model == 0x0400) + gpu->identity.thread_count = 64; + else if (gpu->identity.model == 0x0500 || + gpu->identity.model == 0x0530) + gpu->identity.thread_count = 128; + else + gpu->identity.thread_count = 256; + + if (gpu->identity.vertex_cache_size == 0) + gpu->identity.vertex_cache_size = 8; + + if (gpu->identity.shader_core_count == 0) { + if (gpu->identity.model >= 0x1000) + gpu->identity.shader_core_count = 2; + else + gpu->identity.shader_core_count = 1; + } + + if (gpu->identity.pixel_pipes == 0) + gpu->identity.pixel_pipes = 1; + + /* Convert virtex buffer size */ + if (gpu->identity.vertex_output_buffer_size) { + gpu->identity.vertex_output_buffer_size = + 1 << gpu->identity.vertex_output_buffer_size; + } else if (gpu->identity.model == 0x0400) { + if (gpu->identity.revision < 0x4000) + gpu->identity.vertex_output_buffer_size = 512; + else if (gpu->identity.revision < 0x4200) + gpu->identity.vertex_output_buffer_size = 256; + else + gpu->identity.vertex_output_buffer_size = 128; + } else { + gpu->identity.vertex_output_buffer_size = 512; + } + + switch (gpu->identity.instruction_count) { + case 0: + if ((gpu->identity.model == 0x2000 && + gpu->identity.revision == 0x5108) || + gpu->identity.model == 0x880) + gpu->identity.instruction_count = 512; + else + gpu->identity.instruction_count = 256; + break; + + case 1: + gpu->identity.instruction_count = 1024; + break; + + case 2: + gpu->identity.instruction_count = 2048; + break; + + default: + gpu->identity.instruction_count = 256; + break; + } + + if (gpu->identity.num_constants == 0) + gpu->identity.num_constants = 168; +} + +static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) +{ + u32 chipIdentity; + + chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); + + /* Special case for older graphic cores. */ + if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK) + >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) == 0x01) { + gpu->identity.model = 0x500; /* gc500 */ + gpu->identity.revision = + (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK) + >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT; + } else { + + gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); + gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); + + /* + * !!!! HACK ALERT !!!! + * Because people change device IDs without letting software + * know about it - here is the hack to make it all look the + * same. Only for GC400 family. + */ + if ((gpu->identity.model & 0xff00) == 0x0400 && + gpu->identity.model != 0x0420) { + gpu->identity.model = gpu->identity.model & 0x0400; + } + + /* Another special case */ + if (gpu->identity.model == 0x300 && + gpu->identity.revision == 0x2201) { + u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); + u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); + + if (chipDate == 0x20080814 && chipTime == 0x12051100) { + /* + * This IP has an ECO; put the correct + * revision in it. + */ + gpu->identity.revision = 0x1051; + } + } + } + + dev_info(gpu->dev, "model: GC%x, revision: %x\n", + gpu->identity.model, gpu->identity.revision); + + gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); + + /* Disable fast clear on GC700. */ + if (gpu->identity.model == 0x700) + gpu->identity.features &= ~chipFeatures_FAST_CLEAR; + + if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) || + (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) { + + /* + * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these + * registers. + */ + gpu->identity.minor_features0 = 0; + gpu->identity.minor_features1 = 0; + gpu->identity.minor_features2 = 0; + gpu->identity.minor_features3 = 0; + } else + gpu->identity.minor_features0 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); + + if (gpu->identity.minor_features0 & + chipMinorFeatures0_MORE_MINOR_FEATURES) { + gpu->identity.minor_features1 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1); + gpu->identity.minor_features2 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); + gpu->identity.minor_features3 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); + } + + /* GC600 idle register reports zero bits where modules aren't present */ + if (gpu->identity.model == chipModel_GC600) { + gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | + VIVS_HI_IDLE_STATE_RA | + VIVS_HI_IDLE_STATE_SE | + VIVS_HI_IDLE_STATE_PA | + VIVS_HI_IDLE_STATE_SH | + VIVS_HI_IDLE_STATE_PE | + VIVS_HI_IDLE_STATE_DE | + VIVS_HI_IDLE_STATE_FE; + } else { + gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP; + } + + etnaviv_hw_specs(gpu); +} + +static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) +{ + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock | + VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD); + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); +} + +static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) +{ + u32 control, idle; + unsigned long timeout; + bool failed = true; + + /* TODO + * + * - clock gating + * - puls eater + * - what about VG? + */ + + /* We hope that the GPU resets in under one second */ + timeout = jiffies + msecs_to_jiffies(1000); + + while (time_is_after_jiffies(timeout)) { + control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | + VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); + + /* enable clock */ + etnaviv_gpu_load_clock(gpu, control); + + /* Wait for stable clock. Vivante's code waited for 1ms */ + usleep_range(1000, 10000); + + /* isolate the GPU. */ + control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); + + /* set soft reset. */ + control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET; + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); + + /* wait for reset. */ + msleep(1); + + /* reset soft reset bit. */ + control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET; + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); + + /* reset GPU isolation. */ + control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); + + /* read idle register. */ + idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); + + /* try reseting again if FE it not idle */ + if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) { + dev_dbg(gpu->dev, "FE is not idle\n"); + continue; + } + + /* read reset register. */ + control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); + + /* is the GPU idle? */ + if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) || + ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) { + dev_dbg(gpu->dev, "GPU is not idle\n"); + continue; + } + + failed = false; + break; + } + + if (failed) { + idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); + control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); + + dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n", + idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ", + control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ", + control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not "); + + return -EBUSY; + } + + /* We rely on the GPU running, so program the clock */ + control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | + VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); + + /* enable clock */ + etnaviv_gpu_load_clock(gpu, control); + + return 0; +} + +static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) +{ + u16 prefetch; + + if (gpu->identity.model == chipModel_GC320 && + gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 && + (gpu->identity.revision == 0x5007 || + gpu->identity.revision == 0x5220)) { + u32 mc_memory_debug; + + mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; + + if (gpu->identity.revision == 0x5007) + mc_memory_debug |= 0x0c; + else + mc_memory_debug |= 0x08; + + gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug); + } + + /* + * Update GPU AXI cache atttribute to "cacheable, no allocate". + * This is necessary to prevent the iMX6 SoC locking up. + */ + gpu_write(gpu, VIVS_HI_AXI_CONFIG, + VIVS_HI_AXI_CONFIG_AWCACHE(2) | + VIVS_HI_AXI_CONFIG_ARCACHE(2)); + + /* GC2000 rev 5108 needs a special bus config */ + if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) { + u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); + bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | + VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); + bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) | + VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0); + gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config); + } + + /* set base addresses */ + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base); + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base); + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base); + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base); + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); + + /* setup the MMU page table pointers */ + etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain); + + /* Start command processor */ + prefetch = etnaviv_buffer_init(gpu); + + gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U); + gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, + gpu->buffer->paddr - gpu->memory_base); + gpu_write(gpu, VIVS_FE_COMMAND_CONTROL, + VIVS_FE_COMMAND_CONTROL_ENABLE | + VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch)); +} + +int etnaviv_gpu_init(struct etnaviv_gpu *gpu) +{ + int ret, i; + struct iommu_domain *iommu; + enum etnaviv_iommu_version version; + bool mmuv2; + + ret = pm_runtime_get_sync(gpu->dev); + if (ret < 0) + return ret; + + etnaviv_hw_identify(gpu); + + if (gpu->identity.model == 0) { + dev_err(gpu->dev, "Unknown GPU model\n"); + pm_runtime_put_autosuspend(gpu->dev); + return -ENXIO; + } + + ret = etnaviv_hw_reset(gpu); + if (ret) + goto fail; + + /* Setup IOMMU.. eventually we will (I think) do this once per context + * and have separate page tables per context. For now, to keep things + * simple and to get something working, just use a single address space: + */ + mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION; + dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2); + + if (!mmuv2) { + iommu = etnaviv_iommu_domain_alloc(gpu); + version = ETNAVIV_IOMMU_V1; + } else { + iommu = etnaviv_iommu_v2_domain_alloc(gpu); + version = ETNAVIV_IOMMU_V2; + } + + if (!iommu) { + ret = -ENOMEM; + goto fail; + } + + /* TODO: we will leak here memory - fix it! */ + + gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); + if (!gpu->mmu) { + ret = -ENOMEM; + goto fail; + } + + /* Create buffer: */ + gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0); + if (!gpu->buffer) { + ret = -ENOMEM; + dev_err(gpu->dev, "could not create command buffer\n"); + goto fail; + } + if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) { + ret = -EINVAL; + dev_err(gpu->dev, + "command buffer outside valid memory window\n"); + goto free_buffer; + } + + /* Setup event management */ + spin_lock_init(&gpu->event_spinlock); + init_completion(&gpu->event_free); + for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { + gpu->event[i].used = false; + complete(&gpu->event_free); + } + + /* Now program the hardware */ + mutex_lock(&gpu->lock); + etnaviv_gpu_hw_init(gpu); + mutex_unlock(&gpu->lock); + + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + return 0; + +free_buffer: + etnaviv_gpu_cmdbuf_free(gpu->buffer); + gpu->buffer = NULL; +fail: + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + return ret; +} + +#ifdef CONFIG_DEBUG_FS +struct dma_debug { + u32 address[2]; + u32 state[2]; +}; + +static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug) +{ + u32 i; + + debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); + debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); + + for (i = 0; i < 500; i++) { + debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); + debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); + + if (debug->address[0] != debug->address[1]) + break; + + if (debug->state[0] != debug->state[1]) + break; + } +} + +int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) +{ + struct dma_debug debug; + u32 dma_lo, dma_hi, axi, idle; + int ret; + + seq_printf(m, "%s Status:\n", dev_name(gpu->dev)); + + ret = pm_runtime_get_sync(gpu->dev); + if (ret < 0) + return ret; + + dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); + dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); + axi = gpu_read(gpu, VIVS_HI_AXI_STATUS); + idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); + + verify_dma(gpu, &debug); + + seq_puts(m, "\tfeatures\n"); + seq_printf(m, "\t minor_features0: 0x%08x\n", + gpu->identity.minor_features0); + seq_printf(m, "\t minor_features1: 0x%08x\n", + gpu->identity.minor_features1); + seq_printf(m, "\t minor_features2: 0x%08x\n", + gpu->identity.minor_features2); + seq_printf(m, "\t minor_features3: 0x%08x\n", + gpu->identity.minor_features3); + + seq_puts(m, "\tspecs\n"); + seq_printf(m, "\t stream_count: %d\n", + gpu->identity.stream_count); + seq_printf(m, "\t register_max: %d\n", + gpu->identity.register_max); + seq_printf(m, "\t thread_count: %d\n", + gpu->identity.thread_count); + seq_printf(m, "\t vertex_cache_size: %d\n", + gpu->identity.vertex_cache_size); + seq_printf(m, "\t shader_core_count: %d\n", + gpu->identity.shader_core_count); + seq_printf(m, "\t pixel_pipes: %d\n", + gpu->identity.pixel_pipes); + seq_printf(m, "\t vertex_output_buffer_size: %d\n", + gpu->identity.vertex_output_buffer_size); + seq_printf(m, "\t buffer_size: %d\n", + gpu->identity.buffer_size); + seq_printf(m, "\t instruction_count: %d\n", + gpu->identity.instruction_count); + seq_printf(m, "\t num_constants: %d\n", + gpu->identity.num_constants); + + seq_printf(m, "\taxi: 0x%08x\n", axi); + seq_printf(m, "\tidle: 0x%08x\n", idle); + idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP; + if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) + seq_puts(m, "\t FE is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_DE) == 0) + seq_puts(m, "\t DE is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_PE) == 0) + seq_puts(m, "\t PE is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_SH) == 0) + seq_puts(m, "\t SH is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_PA) == 0) + seq_puts(m, "\t PA is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_SE) == 0) + seq_puts(m, "\t SE is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_RA) == 0) + seq_puts(m, "\t RA is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_TX) == 0) + seq_puts(m, "\t TX is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_VG) == 0) + seq_puts(m, "\t VG is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_IM) == 0) + seq_puts(m, "\t IM is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_FP) == 0) + seq_puts(m, "\t FP is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_TS) == 0) + seq_puts(m, "\t TS is not idle\n"); + if (idle & VIVS_HI_IDLE_STATE_AXI_LP) + seq_puts(m, "\t AXI low power mode\n"); + + if (gpu->identity.features & chipFeatures_DEBUG_MODE) { + u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0); + u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1); + u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE); + + seq_puts(m, "\tMC\n"); + seq_printf(m, "\t read0: 0x%08x\n", read0); + seq_printf(m, "\t read1: 0x%08x\n", read1); + seq_printf(m, "\t write: 0x%08x\n", write); + } + + seq_puts(m, "\tDMA "); + + if (debug.address[0] == debug.address[1] && + debug.state[0] == debug.state[1]) { + seq_puts(m, "seems to be stuck\n"); + } else if (debug.address[0] == debug.address[1]) { + seq_puts(m, "adress is constant\n"); + } else { + seq_puts(m, "is runing\n"); + } + + seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]); + seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]); + seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]); + seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]); + seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n", + dma_lo, dma_hi); + + ret = 0; + + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + return ret; +} +#endif + +/* + * Power Management: + */ +static int enable_clk(struct etnaviv_gpu *gpu) +{ + if (gpu->clk_core) + clk_prepare_enable(gpu->clk_core); + if (gpu->clk_shader) + clk_prepare_enable(gpu->clk_shader); + + return 0; +} + +static int disable_clk(struct etnaviv_gpu *gpu) +{ + if (gpu->clk_core) + clk_disable_unprepare(gpu->clk_core); + if (gpu->clk_shader) + clk_disable_unprepare(gpu->clk_shader); + + return 0; +} + +static int enable_axi(struct etnaviv_gpu *gpu) +{ + if (gpu->clk_bus) + clk_prepare_enable(gpu->clk_bus); + + return 0; +} + +static int disable_axi(struct etnaviv_gpu *gpu) +{ + if (gpu->clk_bus) + clk_disable_unprepare(gpu->clk_bus); + + return 0; +} + +/* + * Hangcheck detection for locked gpu: + */ +static void recover_worker(struct work_struct *work) +{ + struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, + recover_work); + unsigned long flags; + unsigned int i; + + dev_err(gpu->dev, "hangcheck recover!\n"); + + if (pm_runtime_get_sync(gpu->dev) < 0) + return; + + mutex_lock(&gpu->lock); + + /* Only catch the first event, or when manually re-armed */ + if (etnaviv_dump_core) { + etnaviv_core_dump(gpu); + etnaviv_dump_core = false; + } + + etnaviv_hw_reset(gpu); + + /* complete all events, the GPU won't do it after the reset */ + spin_lock_irqsave(&gpu->event_spinlock, flags); + for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { + if (!gpu->event[i].used) + continue; + fence_signal(gpu->event[i].fence); + gpu->event[i].fence = NULL; + gpu->event[i].used = false; + complete(&gpu->event_free); + /* + * Decrement the PM count for each stuck event. This is safe + * even in atomic context as we use ASYNC RPM here. + */ + pm_runtime_put_autosuspend(gpu->dev); + } + spin_unlock_irqrestore(&gpu->event_spinlock, flags); + gpu->completed_fence = gpu->active_fence; + + etnaviv_gpu_hw_init(gpu); + gpu->switch_context = true; + + mutex_unlock(&gpu->lock); + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + /* Retire the buffer objects in a work */ + etnaviv_queue_work(gpu->drm, &gpu->retire_work); +} + +static void hangcheck_timer_reset(struct etnaviv_gpu *gpu) +{ + DBG("%s", dev_name(gpu->dev)); + mod_timer(&gpu->hangcheck_timer, + round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES)); +} + +static void hangcheck_handler(unsigned long data) +{ + struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data; + u32 fence = gpu->completed_fence; + bool progress = false; + + if (fence != gpu->hangcheck_fence) { + gpu->hangcheck_fence = fence; + progress = true; + } + + if (!progress) { + u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); + int change = dma_addr - gpu->hangcheck_dma_addr; + + if (change < 0 || change > 16) { + gpu->hangcheck_dma_addr = dma_addr; + progress = true; + } + } + + if (!progress && fence_after(gpu->active_fence, fence)) { + dev_err(gpu->dev, "hangcheck detected gpu lockup!\n"); + dev_err(gpu->dev, " completed fence: %u\n", fence); + dev_err(gpu->dev, " active fence: %u\n", + gpu->active_fence); + etnaviv_queue_work(gpu->drm, &gpu->recover_work); + } + + /* if still more pending work, reset the hangcheck timer: */ + if (fence_after(gpu->active_fence, gpu->hangcheck_fence)) + hangcheck_timer_reset(gpu); +} + +static void hangcheck_disable(struct etnaviv_gpu *gpu) +{ + del_timer_sync(&gpu->hangcheck_timer); + cancel_work_sync(&gpu->recover_work); +} + +/* fence object management */ +struct etnaviv_fence { + struct etnaviv_gpu *gpu; + struct fence base; +}; + +static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence) +{ + return container_of(fence, struct etnaviv_fence, base); +} + +static const char *etnaviv_fence_get_driver_name(struct fence *fence) +{ + return "etnaviv"; +} + +static const char *etnaviv_fence_get_timeline_name(struct fence *fence) +{ + struct etnaviv_fence *f = to_etnaviv_fence(fence); + + return dev_name(f->gpu->dev); +} + +static bool etnaviv_fence_enable_signaling(struct fence *fence) +{ + return true; +} + +static bool etnaviv_fence_signaled(struct fence *fence) +{ + struct etnaviv_fence *f = to_etnaviv_fence(fence); + + return fence_completed(f->gpu, f->base.seqno); +} + +static void etnaviv_fence_release(struct fence *fence) +{ + struct etnaviv_fence *f = to_etnaviv_fence(fence); + + kfree_rcu(f, base.rcu); +} + +static const struct fence_ops etnaviv_fence_ops = { + .get_driver_name = etnaviv_fence_get_driver_name, + .get_timeline_name = etnaviv_fence_get_timeline_name, + .enable_signaling = etnaviv_fence_enable_signaling, + .signaled = etnaviv_fence_signaled, + .wait = fence_default_wait, + .release = etnaviv_fence_release, +}; + +static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) +{ + struct etnaviv_fence *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + f->gpu = gpu; + + fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, + gpu->fence_context, ++gpu->next_fence); + + return &f->base; +} + +int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, + unsigned int context, bool exclusive) +{ + struct reservation_object *robj = etnaviv_obj->resv; + struct reservation_object_list *fobj; + struct fence *fence; + int i, ret; + + if (!exclusive) { + ret = reservation_object_reserve_shared(robj); + if (ret) + return ret; + } + + /* + * If we have any shared fences, then the exclusive fence + * should be ignored as it will already have been signalled. + */ + fobj = reservation_object_get_list(robj); + if (!fobj || fobj->shared_count == 0) { + /* Wait on any existing exclusive fence which isn't our own */ + fence = reservation_object_get_excl(robj); + if (fence && fence->context != context) { + ret = fence_wait(fence, true); + if (ret) + return ret; + } + } + + if (!exclusive || !fobj) + return 0; + + for (i = 0; i < fobj->shared_count; i++) { + fence = rcu_dereference_protected(fobj->shared[i], + reservation_object_held(robj)); + if (fence->context != context) { + ret = fence_wait(fence, true); + if (ret) + return ret; + } + } + + return 0; +} + +/* + * event management: + */ + +static unsigned int event_alloc(struct etnaviv_gpu *gpu) +{ + unsigned long ret, flags; + unsigned int i, event = ~0U; + + ret = wait_for_completion_timeout(&gpu->event_free, + msecs_to_jiffies(10 * 10000)); + if (!ret) + dev_err(gpu->dev, "wait_for_completion_timeout failed"); + + spin_lock_irqsave(&gpu->event_spinlock, flags); + + /* find first free event */ + for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { + if (gpu->event[i].used == false) { + gpu->event[i].used = true; + event = i; + break; + } + } + + spin_unlock_irqrestore(&gpu->event_spinlock, flags); + + return event; +} + +static void event_free(struct etnaviv_gpu *gpu, unsigned int event) +{ + unsigned long flags; + + spin_lock_irqsave(&gpu->event_spinlock, flags); + + if (gpu->event[event].used == false) { + dev_warn(gpu->dev, "event %u is already marked as free", + event); + spin_unlock_irqrestore(&gpu->event_spinlock, flags); + } else { + gpu->event[event].used = false; + spin_unlock_irqrestore(&gpu->event_spinlock, flags); + + complete(&gpu->event_free); + } +} + +/* + * Cmdstream submission/retirement: + */ + +struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size, + size_t nr_bos) +{ + struct etnaviv_cmdbuf *cmdbuf; + size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]), + sizeof(*cmdbuf)); + + cmdbuf = kzalloc(sz, GFP_KERNEL); + if (!cmdbuf) + return NULL; + + cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr, + GFP_KERNEL); + if (!cmdbuf->vaddr) { + kfree(cmdbuf); + return NULL; + } + + cmdbuf->gpu = gpu; + cmdbuf->size = size; + + return cmdbuf; +} + +void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf) +{ + dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size, + cmdbuf->vaddr, cmdbuf->paddr); + kfree(cmdbuf); +} + +static void retire_worker(struct work_struct *work) +{ + struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, + retire_work); + u32 fence = gpu->completed_fence; + struct etnaviv_cmdbuf *cmdbuf, *tmp; + unsigned int i; + + mutex_lock(&gpu->lock); + list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { + if (!fence_is_signaled(cmdbuf->fence)) + break; + + list_del(&cmdbuf->node); + fence_put(cmdbuf->fence); + + for (i = 0; i < cmdbuf->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i]; + + atomic_dec(&etnaviv_obj->gpu_active); + /* drop the refcount taken in etnaviv_gpu_submit */ + etnaviv_gem_put_iova(gpu, &etnaviv_obj->base); + } + + etnaviv_gpu_cmdbuf_free(cmdbuf); + } + + gpu->retired_fence = fence; + + mutex_unlock(&gpu->lock); + + wake_up_all(&gpu->fence_event); +} + +int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, + u32 fence, struct timespec *timeout) +{ + int ret; + + if (fence_after(fence, gpu->next_fence)) { + DRM_ERROR("waiting on invalid fence: %u (of %u)\n", + fence, gpu->next_fence); + return -EINVAL; + } + + if (!timeout) { + /* No timeout was requested: just test for completion */ + ret = fence_completed(gpu, fence) ? 0 : -EBUSY; + } else { + unsigned long remaining = etnaviv_timeout_to_jiffies(timeout); + + ret = wait_event_interruptible_timeout(gpu->fence_event, + fence_completed(gpu, fence), + remaining); + if (ret == 0) { + DBG("timeout waiting for fence: %u (retired: %u completed: %u)", + fence, gpu->retired_fence, + gpu->completed_fence); + ret = -ETIMEDOUT; + } else if (ret != -ERESTARTSYS) { + ret = 0; + } + } + + return ret; +} + +/* + * Wait for an object to become inactive. This, on it's own, is not race + * free: the object is moved by the retire worker off the active list, and + * then the iova is put. Moreover, the object could be re-submitted just + * after we notice that it's become inactive. + * + * Although the retirement happens under the gpu lock, we don't want to hold + * that lock in this function while waiting. + */ +int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, + struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout) +{ + unsigned long remaining; + long ret; + + if (!timeout) + return !is_active(etnaviv_obj) ? 0 : -EBUSY; + + remaining = etnaviv_timeout_to_jiffies(timeout); + + ret = wait_event_interruptible_timeout(gpu->fence_event, + !is_active(etnaviv_obj), + remaining); + if (ret > 0) { + struct etnaviv_drm_private *priv = gpu->drm->dev_private; + + /* Synchronise with the retire worker */ + flush_workqueue(priv->wq); + return 0; + } else if (ret == -ERESTARTSYS) { + return -ERESTARTSYS; + } else { + return -ETIMEDOUT; + } +} + +int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu) +{ + return pm_runtime_get_sync(gpu->dev); +} + +void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu) +{ + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); +} + +/* add bo's to gpu's ring, and kick gpu: */ +int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, + struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) +{ + struct fence *fence; + unsigned int event, i; + int ret; + + ret = etnaviv_gpu_pm_get_sync(gpu); + if (ret < 0) + return ret; + + mutex_lock(&gpu->lock); + + /* + * TODO + * + * - flush + * - data endian + * - prefetch + * + */ + + event = event_alloc(gpu); + if (unlikely(event == ~0U)) { + DRM_ERROR("no free event\n"); + ret = -EBUSY; + goto out_unlock; + } + + fence = etnaviv_gpu_fence_alloc(gpu); + if (!fence) { + event_free(gpu, event); + ret = -ENOMEM; + goto out_unlock; + } + + gpu->event[event].fence = fence; + submit->fence = fence->seqno; + gpu->active_fence = submit->fence; + + if (gpu->lastctx != cmdbuf->ctx) { + gpu->mmu->need_flush = true; + gpu->switch_context = true; + gpu->lastctx = cmdbuf->ctx; + } + + etnaviv_buffer_queue(gpu, event, cmdbuf); + + cmdbuf->fence = fence; + list_add_tail(&cmdbuf->node, &gpu->active_cmd_list); + + /* We're committed to adding this command buffer, hold a PM reference */ + pm_runtime_get_noresume(gpu->dev); + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + u32 iova; + + /* Each cmdbuf takes a refcount on the iova */ + etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova); + cmdbuf->bo[i] = etnaviv_obj; + atomic_inc(&etnaviv_obj->gpu_active); + + if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) + reservation_object_add_excl_fence(etnaviv_obj->resv, + fence); + else + reservation_object_add_shared_fence(etnaviv_obj->resv, + fence); + } + cmdbuf->nr_bos = submit->nr_bos; + hangcheck_timer_reset(gpu); + ret = 0; + +out_unlock: + mutex_unlock(&gpu->lock); + + etnaviv_gpu_pm_put(gpu); + + return ret; +} + +/* + * Init/Cleanup: + */ +static irqreturn_t irq_handler(int irq, void *data) +{ + struct etnaviv_gpu *gpu = data; + irqreturn_t ret = IRQ_NONE; + + u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE); + + if (intr != 0) { + int event; + + pm_runtime_mark_last_busy(gpu->dev); + + dev_dbg(gpu->dev, "intr 0x%08x\n", intr); + + if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) { + dev_err(gpu->dev, "AXI bus error\n"); + intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR; + } + + while ((event = ffs(intr)) != 0) { + struct fence *fence; + + event -= 1; + + intr &= ~(1 << event); + + dev_dbg(gpu->dev, "event %u\n", event); + + fence = gpu->event[event].fence; + gpu->event[event].fence = NULL; + fence_signal(fence); + + /* + * Events can be processed out of order. Eg, + * - allocate and queue event 0 + * - allocate event 1 + * - event 0 completes, we process it + * - allocate and queue event 0 + * - event 1 and event 0 complete + * we can end up processing event 0 first, then 1. + */ + if (fence_after(fence->seqno, gpu->completed_fence)) + gpu->completed_fence = fence->seqno; + + event_free(gpu, event); + + /* + * We need to balance the runtime PM count caused by + * each submission. Upon submission, we increment + * the runtime PM counter, and allocate one event. + * So here, we put the runtime PM count for each + * completed event. + */ + pm_runtime_put_autosuspend(gpu->dev); + } + + /* Retire the buffer objects in a work */ + etnaviv_queue_work(gpu->drm, &gpu->retire_work); + + ret = IRQ_HANDLED; + } + + return ret; +} + +static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) +{ + int ret; + + ret = enable_clk(gpu); + if (ret) + return ret; + + ret = enable_axi(gpu); + if (ret) { + disable_clk(gpu); + return ret; + } + + return 0; +} + +static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu) +{ + int ret; + + ret = disable_axi(gpu); + if (ret) + return ret; + + ret = disable_clk(gpu); + if (ret) + return ret; + + return 0; +} + +static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) +{ + if (gpu->buffer) { + unsigned long timeout; + + /* Replace the last WAIT with END */ + etnaviv_buffer_end(gpu); + + /* + * We know that only the FE is busy here, this should + * happen quickly (as the WAIT is only 200 cycles). If + * we fail, just warn and continue. + */ + timeout = jiffies + msecs_to_jiffies(100); + do { + u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); + + if ((idle & gpu->idle_mask) == gpu->idle_mask) + break; + + if (time_is_before_jiffies(timeout)) { + dev_warn(gpu->dev, + "timed out waiting for idle: idle=0x%x\n", + idle); + break; + } + + udelay(5); + } while (1); + } + + return etnaviv_gpu_clk_disable(gpu); +} + +#ifdef CONFIG_PM +static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) +{ + u32 clock; + int ret; + + ret = mutex_lock_killable(&gpu->lock); + if (ret) + return ret; + + clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | + VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); + + etnaviv_gpu_load_clock(gpu, clock); + etnaviv_gpu_hw_init(gpu); + + gpu->switch_context = true; + + mutex_unlock(&gpu->lock); + + return 0; +} +#endif + +static int etnaviv_gpu_bind(struct device *dev, struct device *master, + void *data) +{ + struct drm_device *drm = data; + struct etnaviv_drm_private *priv = drm->dev_private; + struct etnaviv_gpu *gpu = dev_get_drvdata(dev); + int ret; + +#ifdef CONFIG_PM + ret = pm_runtime_get_sync(gpu->dev); +#else + ret = etnaviv_gpu_clk_enable(gpu); +#endif + if (ret < 0) + return ret; + + gpu->drm = drm; + gpu->fence_context = fence_context_alloc(1); + spin_lock_init(&gpu->fence_spinlock); + + INIT_LIST_HEAD(&gpu->active_cmd_list); + INIT_WORK(&gpu->retire_work, retire_worker); + INIT_WORK(&gpu->recover_work, recover_worker); + init_waitqueue_head(&gpu->fence_event); + + setup_timer(&gpu->hangcheck_timer, hangcheck_handler, + (unsigned long)gpu); + + priv->gpu[priv->num_gpus++] = gpu; + + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + return 0; +} + +static void etnaviv_gpu_unbind(struct device *dev, struct device *master, + void *data) +{ + struct etnaviv_gpu *gpu = dev_get_drvdata(dev); + + DBG("%s", dev_name(gpu->dev)); + + hangcheck_disable(gpu); + +#ifdef CONFIG_PM + pm_runtime_get_sync(gpu->dev); + pm_runtime_put_sync_suspend(gpu->dev); +#else + etnaviv_gpu_hw_suspend(gpu); +#endif + + if (gpu->buffer) { + etnaviv_gpu_cmdbuf_free(gpu->buffer); + gpu->buffer = NULL; + } + + if (gpu->mmu) { + etnaviv_iommu_destroy(gpu->mmu); + gpu->mmu = NULL; + } + + gpu->drm = NULL; +} + +static const struct component_ops gpu_ops = { + .bind = etnaviv_gpu_bind, + .unbind = etnaviv_gpu_unbind, +}; + +static const struct of_device_id etnaviv_gpu_match[] = { + { + .compatible = "vivante,gc" + }, + { /* sentinel */ } +}; + +static int etnaviv_gpu_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct etnaviv_gpu *gpu; + int err = 0; + + gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); + if (!gpu) + return -ENOMEM; + + gpu->dev = &pdev->dev; + mutex_init(&gpu->lock); + + /* + * Set the GPU base address to the start of physical memory. This + * ensures that if we have up to 2GB, the v1 MMU can address the + * highest memory. This is important as command buffers may be + * allocated outside of this limit. + */ + gpu->memory_base = PHYS_OFFSET; + + /* Map registers: */ + gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); + if (IS_ERR(gpu->mmio)) + return PTR_ERR(gpu->mmio); + + /* Get Interrupt: */ + gpu->irq = platform_get_irq(pdev, 0); + if (gpu->irq < 0) { + err = gpu->irq; + dev_err(dev, "failed to get irq: %d\n", err); + goto fail; + } + + err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, + dev_name(gpu->dev), gpu); + if (err) { + dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); + goto fail; + } + + /* Get Clocks: */ + gpu->clk_bus = devm_clk_get(&pdev->dev, "bus"); + DBG("clk_bus: %p", gpu->clk_bus); + if (IS_ERR(gpu->clk_bus)) + gpu->clk_bus = NULL; + + gpu->clk_core = devm_clk_get(&pdev->dev, "core"); + DBG("clk_core: %p", gpu->clk_core); + if (IS_ERR(gpu->clk_core)) + gpu->clk_core = NULL; + + gpu->clk_shader = devm_clk_get(&pdev->dev, "shader"); + DBG("clk_shader: %p", gpu->clk_shader); + if (IS_ERR(gpu->clk_shader)) + gpu->clk_shader = NULL; + + /* TODO: figure out max mapped size */ + dev_set_drvdata(dev, gpu); + + /* + * We treat the device as initially suspended. The runtime PM + * autosuspend delay is rather arbitary: no measurements have + * yet been performed to determine an appropriate value. + */ + pm_runtime_use_autosuspend(gpu->dev); + pm_runtime_set_autosuspend_delay(gpu->dev, 200); + pm_runtime_enable(gpu->dev); + + err = component_add(&pdev->dev, &gpu_ops); + if (err < 0) { + dev_err(&pdev->dev, "failed to register component: %d\n", err); + goto fail; + } + + return 0; + +fail: + return err; +} + +static int etnaviv_gpu_platform_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &gpu_ops); + pm_runtime_disable(&pdev->dev); + return 0; +} + +#ifdef CONFIG_PM +static int etnaviv_gpu_rpm_suspend(struct device *dev) +{ + struct etnaviv_gpu *gpu = dev_get_drvdata(dev); + u32 idle, mask; + + /* If we have outstanding fences, we're not idle */ + if (gpu->completed_fence != gpu->active_fence) + return -EBUSY; + + /* Check whether the hardware (except FE) is idle */ + mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE; + idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask; + if (idle != mask) + return -EBUSY; + + return etnaviv_gpu_hw_suspend(gpu); +} + +static int etnaviv_gpu_rpm_resume(struct device *dev) +{ + struct etnaviv_gpu *gpu = dev_get_drvdata(dev); + int ret; + + ret = etnaviv_gpu_clk_enable(gpu); + if (ret) + return ret; + + /* Re-initialise the basic hardware state */ + if (gpu->drm && gpu->buffer) { + ret = etnaviv_gpu_hw_resume(gpu); + if (ret) { + etnaviv_gpu_clk_disable(gpu); + return ret; + } + } + + return 0; +} +#endif + +static const struct dev_pm_ops etnaviv_gpu_pm_ops = { + SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, + NULL) +}; + +struct platform_driver etnaviv_gpu_driver = { + .driver = { + .name = "etnaviv-gpu", + .owner = THIS_MODULE, + .pm = &etnaviv_gpu_pm_ops, + .of_match_table = etnaviv_gpu_match, + }, + .probe = etnaviv_gpu_platform_probe, + .remove = etnaviv_gpu_platform_remove, + .id_table = gpu_ids, +}; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h new file mode 100644 index 000000000000..c75d50359ab0 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_GPU_H__ +#define __ETNAVIV_GPU_H__ + +#include +#include + +#include "etnaviv_drv.h" + +struct etnaviv_gem_submit; + +struct etnaviv_chip_identity { + /* Chip model. */ + u32 model; + + /* Revision value.*/ + u32 revision; + + /* Supported feature fields. */ + u32 features; + + /* Supported minor feature fields. */ + u32 minor_features0; + + /* Supported minor feature 1 fields. */ + u32 minor_features1; + + /* Supported minor feature 2 fields. */ + u32 minor_features2; + + /* Supported minor feature 3 fields. */ + u32 minor_features3; + + /* Number of streams supported. */ + u32 stream_count; + + /* Total number of temporary registers per thread. */ + u32 register_max; + + /* Maximum number of threads. */ + u32 thread_count; + + /* Number of shader cores. */ + u32 shader_core_count; + + /* Size of the vertex cache. */ + u32 vertex_cache_size; + + /* Number of entries in the vertex output buffer. */ + u32 vertex_output_buffer_size; + + /* Number of pixel pipes. */ + u32 pixel_pipes; + + /* Number of instructions. */ + u32 instruction_count; + + /* Number of constants. */ + u32 num_constants; + + /* Buffer size */ + u32 buffer_size; +}; + +struct etnaviv_event { + bool used; + struct fence *fence; +}; + +struct etnaviv_cmdbuf; + +struct etnaviv_gpu { + struct drm_device *drm; + struct device *dev; + struct mutex lock; + struct etnaviv_chip_identity identity; + struct etnaviv_file_private *lastctx; + bool switch_context; + + /* 'ring'-buffer: */ + struct etnaviv_cmdbuf *buffer; + + /* bus base address of memory */ + u32 memory_base; + + /* event management: */ + struct etnaviv_event event[30]; + struct completion event_free; + spinlock_t event_spinlock; + + /* list of currently in-flight command buffers */ + struct list_head active_cmd_list; + + u32 idle_mask; + + /* Fencing support */ + u32 next_fence; + u32 active_fence; + u32 completed_fence; + u32 retired_fence; + wait_queue_head_t fence_event; + unsigned int fence_context; + spinlock_t fence_spinlock; + + /* worker for handling active-list retiring: */ + struct work_struct retire_work; + + void __iomem *mmio; + int irq; + + struct etnaviv_iommu *mmu; + + /* Power Control: */ + struct clk *clk_bus; + struct clk *clk_core; + struct clk *clk_shader; + + /* Hang Detction: */ +#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */ +#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD) + struct timer_list hangcheck_timer; + u32 hangcheck_fence; + u32 hangcheck_dma_addr; + struct work_struct recover_work; +}; + +struct etnaviv_cmdbuf { + /* device this cmdbuf is allocated for */ + struct etnaviv_gpu *gpu; + /* user context key, must be unique between all active users */ + struct etnaviv_file_private *ctx; + /* cmdbuf properties */ + void *vaddr; + dma_addr_t paddr; + u32 size; + u32 user_size; + /* fence after which this buffer is to be disposed */ + struct fence *fence; + /* target exec state */ + u32 exec_state; + /* per GPU in-flight list */ + struct list_head node; + /* BOs attached to this command buffer */ + unsigned int nr_bos; + struct etnaviv_gem_object *bo[0]; +}; + +static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) +{ + etnaviv_writel(data, gpu->mmio + reg); +} + +static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) +{ + return etnaviv_readl(gpu->mmio + reg); +} + +static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence) +{ + return fence_after_eq(gpu->completed_fence, fence); +} + +static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence) +{ + return fence_after_eq(gpu->retired_fence, fence); +} + +int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); + +int etnaviv_gpu_init(struct etnaviv_gpu *gpu); + +#ifdef CONFIG_DEBUG_FS +int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); +#endif + +int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, + unsigned int context, bool exclusive); + +void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); +int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, + u32 fence, struct timespec *timeout); +int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, + struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout); +int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, + struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf); +struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, + u32 size, size_t nr_bos); +void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf); +int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu); +void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu); + +extern struct platform_driver etnaviv_gpu_driver; + +#endif /* __ETNAVIV_GPU_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c new file mode 100644 index 000000000000..522cfd447892 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2014 Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#include "etnaviv_gpu.h" +#include "etnaviv_mmu.h" +#include "etnaviv_iommu.h" +#include "state_hi.xml.h" + +#define PT_SIZE SZ_2M +#define PT_ENTRIES (PT_SIZE / sizeof(u32)) + +#define GPU_MEM_START 0x80000000 + +struct etnaviv_iommu_domain_pgtable { + u32 *pgtable; + dma_addr_t paddr; +}; + +struct etnaviv_iommu_domain { + struct iommu_domain domain; + struct device *dev; + void *bad_page_cpu; + dma_addr_t bad_page_dma; + struct etnaviv_iommu_domain_pgtable pgtable; + spinlock_t map_lock; +}; + +static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain) +{ + return container_of(domain, struct etnaviv_iommu_domain, domain); +} + +static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable, + size_t size) +{ + pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL); + if (!pgtable->pgtable) + return -ENOMEM; + + return 0; +} + +static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable, + size_t size) +{ + dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr); +} + +static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable, + unsigned long iova) +{ + /* calcuate index into page table */ + unsigned int index = (iova - GPU_MEM_START) / SZ_4K; + phys_addr_t paddr; + + paddr = pgtable->pgtable[index]; + + return paddr; +} + +static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable, + unsigned long iova, phys_addr_t paddr) +{ + /* calcuate index into page table */ + unsigned int index = (iova - GPU_MEM_START) / SZ_4K; + + pgtable->pgtable[index] = paddr; +} + +static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain) +{ + u32 *p; + int ret, i; + + etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev, + SZ_4K, + &etnaviv_domain->bad_page_dma, + GFP_KERNEL); + if (!etnaviv_domain->bad_page_cpu) + return -ENOMEM; + + p = etnaviv_domain->bad_page_cpu; + for (i = 0; i < SZ_4K / 4; i++) + *p++ = 0xdead55aa; + + ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE); + if (ret < 0) { + dma_free_coherent(etnaviv_domain->dev, SZ_4K, + etnaviv_domain->bad_page_cpu, + etnaviv_domain->bad_page_dma); + return ret; + } + + for (i = 0; i < PT_ENTRIES; i++) + etnaviv_domain->pgtable.pgtable[i] = + etnaviv_domain->bad_page_dma; + + spin_lock_init(&etnaviv_domain->map_lock); + + return 0; +} + +static void etnaviv_domain_free(struct iommu_domain *domain) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + pgtable_free(&etnaviv_domain->pgtable, PT_SIZE); + + dma_free_coherent(etnaviv_domain->dev, SZ_4K, + etnaviv_domain->bad_page_cpu, + etnaviv_domain->bad_page_dma); + + kfree(etnaviv_domain); +} + +static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + if (size != SZ_4K) + return -EINVAL; + + spin_lock(&etnaviv_domain->map_lock); + pgtable_write(&etnaviv_domain->pgtable, iova, paddr); + spin_unlock(&etnaviv_domain->map_lock); + + return 0; +} + +static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + if (size != SZ_4K) + return -EINVAL; + + spin_lock(&etnaviv_domain->map_lock); + pgtable_write(&etnaviv_domain->pgtable, iova, + etnaviv_domain->bad_page_dma); + spin_unlock(&etnaviv_domain->map_lock); + + return SZ_4K; +} + +static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + return pgtable_read(&etnaviv_domain->pgtable, iova); +} + +static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain) +{ + return PT_SIZE; +} + +static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE); +} + +static struct etnaviv_iommu_ops etnaviv_iommu_ops = { + .ops = { + .domain_free = etnaviv_domain_free, + .map = etnaviv_iommuv1_map, + .unmap = etnaviv_iommuv1_unmap, + .iova_to_phys = etnaviv_iommu_iova_to_phys, + .pgsize_bitmap = SZ_4K, + }, + .dump_size = etnaviv_iommuv1_dump_size, + .dump = etnaviv_iommuv1_dump, +}; + +void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu, + struct iommu_domain *domain) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + u32 pgtable; + + /* set page table address in MC */ + pgtable = (u32)etnaviv_domain->pgtable.paddr; + + gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); + gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); + gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable); + gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable); + gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); +} + +struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu) +{ + struct etnaviv_iommu_domain *etnaviv_domain; + int ret; + + etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); + if (!etnaviv_domain) + return NULL; + + etnaviv_domain->dev = gpu->dev; + + etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; + etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; + etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; + etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; + + ret = __etnaviv_iommu_init(etnaviv_domain); + if (ret) + goto out_free; + + return &etnaviv_domain->domain; + +out_free: + kfree(etnaviv_domain); + return NULL; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h new file mode 100644 index 000000000000..cf45503f6b6f --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_IOMMU_H__ +#define __ETNAVIV_IOMMU_H__ + +#include +struct etnaviv_gpu; + +struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu); +void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu, + struct iommu_domain *domain); +struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu); + +#endif /* __ETNAVIV_IOMMU_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c new file mode 100644 index 000000000000..fbb4aed3dc80 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2014 Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#include "etnaviv_gpu.h" +#include "etnaviv_iommu.h" +#include "state_hi.xml.h" + + +struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu) +{ + /* TODO */ + return NULL; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h new file mode 100644 index 000000000000..603ea41c5389 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2014 Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_IOMMU_V2_H__ +#define __ETNAVIV_IOMMU_V2_H__ + +#include +struct etnaviv_gpu; + +struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu); + +#endif /* __ETNAVIV_IOMMU_V2_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c new file mode 100644 index 000000000000..6743bc648dc8 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "etnaviv_drv.h" +#include "etnaviv_gem.h" +#include "etnaviv_gpu.h" +#include "etnaviv_mmu.h" + +static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev, + unsigned long iova, int flags, void *arg) +{ + DBG("*** fault: iova=%08lx, flags=%d", iova, flags); + return 0; +} + +int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, + struct sg_table *sgt, unsigned len, int prot) +{ + struct iommu_domain *domain = iommu->domain; + struct scatterlist *sg; + unsigned int da = iova; + unsigned int i, j; + int ret; + + if (!domain || !sgt) + return -EINVAL; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + u32 pa = sg_dma_address(sg) - sg->offset; + size_t bytes = sg_dma_len(sg) + sg->offset; + + VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); + + ret = iommu_map(domain, da, pa, bytes, prot); + if (ret) + goto fail; + + da += bytes; + } + + return 0; + +fail: + da = iova; + + for_each_sg(sgt->sgl, sg, i, j) { + size_t bytes = sg_dma_len(sg) + sg->offset; + + iommu_unmap(domain, da, bytes); + da += bytes; + } + return ret; +} + +int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, + struct sg_table *sgt, unsigned len) +{ + struct iommu_domain *domain = iommu->domain; + struct scatterlist *sg; + unsigned int da = iova; + int i; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes = sg_dma_len(sg) + sg->offset; + size_t unmapped; + + unmapped = iommu_unmap(domain, da, bytes); + if (unmapped < bytes) + return unmapped; + + VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); + + BUG_ON(!PAGE_ALIGNED(bytes)); + + da += bytes; + } + + return 0; +} + +static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, + struct etnaviv_vram_mapping *mapping) +{ + struct etnaviv_gem_object *etnaviv_obj = mapping->object; + + etnaviv_iommu_unmap(mmu, mapping->vram_node.start, + etnaviv_obj->sgt, etnaviv_obj->base.size); + drm_mm_remove_node(&mapping->vram_node); +} + +int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, + struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, + struct etnaviv_vram_mapping *mapping) +{ + struct etnaviv_vram_mapping *free = NULL; + struct sg_table *sgt = etnaviv_obj->sgt; + struct drm_mm_node *node; + int ret; + + lockdep_assert_held(&etnaviv_obj->lock); + + mutex_lock(&mmu->lock); + + /* v1 MMU can optimize single entry (contiguous) scatterlists */ + if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) { + u32 iova; + + iova = sg_dma_address(sgt->sgl) - memory_base; + if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { + mapping->iova = iova; + list_add_tail(&mapping->mmu_node, &mmu->mappings); + mutex_unlock(&mmu->lock); + return 0; + } + } + + node = &mapping->vram_node; + while (1) { + struct etnaviv_vram_mapping *m, *n; + struct list_head list; + bool found; + + ret = drm_mm_insert_node_in_range(&mmu->mm, node, + etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL, + DRM_MM_SEARCH_DEFAULT); + + if (ret != -ENOSPC) + break; + + /* + * If we did not search from the start of the MMU region, + * try again in case there are free slots. + */ + if (mmu->last_iova) { + mmu->last_iova = 0; + mmu->need_flush = true; + continue; + } + + /* Try to retire some entries */ + drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0); + + found = 0; + INIT_LIST_HEAD(&list); + list_for_each_entry(free, &mmu->mappings, mmu_node) { + /* If this vram node has not been used, skip this. */ + if (!free->vram_node.mm) + continue; + + /* + * If the iova is pinned, then it's in-use, + * so we must keep its mapping. + */ + if (free->use) + continue; + + list_add(&free->scan_node, &list); + if (drm_mm_scan_add_block(&free->vram_node)) { + found = true; + break; + } + } + + if (!found) { + /* Nothing found, clean up and fail */ + list_for_each_entry_safe(m, n, &list, scan_node) + BUG_ON(drm_mm_scan_remove_block(&m->vram_node)); + break; + } + + /* + * drm_mm does not allow any other operations while + * scanning, so we have to remove all blocks first. + * If drm_mm_scan_remove_block() returns false, we + * can leave the block pinned. + */ + list_for_each_entry_safe(m, n, &list, scan_node) + if (!drm_mm_scan_remove_block(&m->vram_node)) + list_del_init(&m->scan_node); + + /* + * Unmap the blocks which need to be reaped from the MMU. + * Clear the mmu pointer to prevent the get_iova finding + * this mapping. + */ + list_for_each_entry_safe(m, n, &list, scan_node) { + etnaviv_iommu_remove_mapping(mmu, m); + m->mmu = NULL; + list_del_init(&m->mmu_node); + list_del_init(&m->scan_node); + } + + /* + * We removed enough mappings so that the new allocation will + * succeed. Ensure that the MMU will be flushed before the + * associated commit requesting this mapping, and retry the + * allocation one more time. + */ + mmu->need_flush = true; + } + + if (ret < 0) { + mutex_unlock(&mmu->lock); + return ret; + } + + mmu->last_iova = node->start + etnaviv_obj->base.size; + mapping->iova = node->start; + ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, + IOMMU_READ | IOMMU_WRITE); + + if (ret < 0) { + drm_mm_remove_node(node); + mutex_unlock(&mmu->lock); + return ret; + } + + list_add_tail(&mapping->mmu_node, &mmu->mappings); + mutex_unlock(&mmu->lock); + + return ret; +} + +void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, + struct etnaviv_vram_mapping *mapping) +{ + WARN_ON(mapping->use); + + mutex_lock(&mmu->lock); + + /* If the vram node is on the mm, unmap and remove the node */ + if (mapping->vram_node.mm == &mmu->mm) + etnaviv_iommu_remove_mapping(mmu, mapping); + + list_del(&mapping->mmu_node); + mutex_unlock(&mmu->lock); +} + +void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) +{ + drm_mm_takedown(&mmu->mm); + iommu_domain_free(mmu->domain); + kfree(mmu); +} + +struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu, + struct iommu_domain *domain, enum etnaviv_iommu_version version) +{ + struct etnaviv_iommu *mmu; + + mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return ERR_PTR(-ENOMEM); + + mmu->domain = domain; + mmu->gpu = gpu; + mmu->version = version; + mutex_init(&mmu->lock); + INIT_LIST_HEAD(&mmu->mappings); + + drm_mm_init(&mmu->mm, domain->geometry.aperture_start, + domain->geometry.aperture_end - + domain->geometry.aperture_start + 1); + + iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev); + + return mmu; +} + +size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) +{ + struct etnaviv_iommu_ops *ops; + + ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); + + return ops->dump_size(iommu->domain); +} + +void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) +{ + struct etnaviv_iommu_ops *ops; + + ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); + + ops->dump(iommu->domain, buf); +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h new file mode 100644 index 000000000000..fff215a47630 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_MMU_H__ +#define __ETNAVIV_MMU_H__ + +#include + +enum etnaviv_iommu_version { + ETNAVIV_IOMMU_V1 = 0, + ETNAVIV_IOMMU_V2, +}; + +struct etnaviv_gpu; +struct etnaviv_vram_mapping; + +struct etnaviv_iommu_ops { + struct iommu_ops ops; + size_t (*dump_size)(struct iommu_domain *); + void (*dump)(struct iommu_domain *, void *); +}; + +struct etnaviv_iommu { + struct etnaviv_gpu *gpu; + struct iommu_domain *domain; + + enum etnaviv_iommu_version version; + + /* memory manager for GPU address area */ + struct mutex lock; + struct list_head mappings; + struct drm_mm mm; + u32 last_iova; + bool need_flush; +}; + +struct etnaviv_gem_object; + +int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names, + int cnt); +int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, + struct sg_table *sgt, unsigned len, int prot); +int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, + struct sg_table *sgt, unsigned len); +int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, + struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, + struct etnaviv_vram_mapping *mapping); +void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, + struct etnaviv_vram_mapping *mapping); +void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu); + +size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu); +void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf); + +struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu, + struct iommu_domain *domain, enum etnaviv_iommu_version version); + +#endif /* __ETNAVIV_MMU_H__ */ diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h new file mode 100644 index 000000000000..368218304566 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/state.xml.h @@ -0,0 +1,351 @@ +#ifndef STATE_XML +#define STATE_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- state.xml ( 18882 bytes, from 2015-03-25 11:42:32) +- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) +- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21) +- state_2d.xml ( 51549 bytes, from 2015-03-25 11:25:06) +- state_3d.xml ( 54600 bytes, from 2015-03-25 11:25:19) +- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01) + +Copyright (C) 2015 +*/ + + +#define VARYING_COMPONENT_USE_UNUSED 0x00000000 +#define VARYING_COMPONENT_USE_USED 0x00000001 +#define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002 +#define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003 +#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff +#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0 +#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK) +#define VIVS_FE 0x00000000 + +#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0)) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE 0x00000004 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE 0x00000000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE 0x00000001 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT 0x00000002 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT 0x00000003 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT 0x00000004 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT 0x00000005 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT 0x00000008 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT 0x00000009 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED 0x0000000b +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2 0x0000000c +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE 0x00000080 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK 0x00000700 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT 8 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK 0x00003000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT 12 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK 0x0000c000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT 14 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF 0x00000000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON 0x00008000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK 0x00ff0000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT 16 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK 0xff000000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT 24 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK) + +#define VIVS_FE_CMD_STREAM_BASE_ADDR 0x00000640 + +#define VIVS_FE_INDEX_STREAM_BASE_ADDR 0x00000644 + +#define VIVS_FE_INDEX_STREAM_CONTROL 0x00000648 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK 0x00000003 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT 0 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002 + +#define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c + +#define VIVS_FE_VERTEX_STREAM_CONTROL 0x00000650 + +#define VIVS_FE_COMMAND_ADDRESS 0x00000654 + +#define VIVS_FE_COMMAND_CONTROL 0x00000658 +#define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff +#define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT 0 +#define VIVS_FE_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK) +#define VIVS_FE_COMMAND_CONTROL_ENABLE 0x00010000 + +#define VIVS_FE_DMA_STATUS 0x0000065c + +#define VIVS_FE_DMA_DEBUG_STATE 0x00000660 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK 0x0000001f +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT 0 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC 0x00000001 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0 0x00000002 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0 0x00000003 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1 0x00000004 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1 0x00000005 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR 0x00000006 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD 0x00000007 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL 0x00000008 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL 0x00000009 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA 0x0000000a +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX 0x0000000b +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW 0x0000000c +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0 0x0000000d +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1 0x0000000e +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0 0x0000000f +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1 0x00000010 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO 0x00000011 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT 0x00000012 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK 0x00000013 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END 0x00000014 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL 0x00000015 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK 0x00000300 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT 8 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START 0x00000100 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ 0x00000200 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END 0x00000300 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK 0x00000c00 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT 10 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID 0x00000400 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID 0x00000800 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK 0x00003000 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT 12 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX 0x00001000 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL 0x00002000 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK 0x0000c000 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT 14 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR 0x00004000 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC 0x00008000 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK 0x00030000 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT 16 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE 0x00010000 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS 0x00020000 + +#define VIVS_FE_DMA_ADDRESS 0x00000664 + +#define VIVS_FE_DMA_LOW 0x00000668 + +#define VIVS_FE_DMA_HIGH 0x0000066c + +#define VIVS_FE_AUTO_FLUSH 0x00000670 + +#define VIVS_FE_UNK00678 0x00000678 + +#define VIVS_FE_UNK0067C 0x0000067c + +#define VIVS_FE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0)) +#define VIVS_FE_VERTEX_STREAMS__ESIZE 0x00000004 +#define VIVS_FE_VERTEX_STREAMS__LEN 0x00000008 + +#define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00000680 + 0x4*(i0)) + +#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0)) + +#define VIVS_FE_UNK00700(i0) (0x00000700 + 0x4*(i0)) +#define VIVS_FE_UNK00700__ESIZE 0x00000004 +#define VIVS_FE_UNK00700__LEN 0x00000010 + +#define VIVS_FE_UNK00740(i0) (0x00000740 + 0x4*(i0)) +#define VIVS_FE_UNK00740__ESIZE 0x00000004 +#define VIVS_FE_UNK00740__LEN 0x00000010 + +#define VIVS_FE_UNK00780(i0) (0x00000780 + 0x4*(i0)) +#define VIVS_FE_UNK00780__ESIZE 0x00000004 +#define VIVS_FE_UNK00780__LEN 0x00000010 + +#define VIVS_GL 0x00000000 + +#define VIVS_GL_PIPE_SELECT 0x00003800 +#define VIVS_GL_PIPE_SELECT_PIPE__MASK 0x00000001 +#define VIVS_GL_PIPE_SELECT_PIPE__SHIFT 0 +#define VIVS_GL_PIPE_SELECT_PIPE(x) (((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK) + +#define VIVS_GL_EVENT 0x00003804 +#define VIVS_GL_EVENT_EVENT_ID__MASK 0x0000001f +#define VIVS_GL_EVENT_EVENT_ID__SHIFT 0 +#define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK) +#define VIVS_GL_EVENT_FROM_FE 0x00000020 +#define VIVS_GL_EVENT_FROM_PE 0x00000040 +#define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00 +#define VIVS_GL_EVENT_SOURCE__SHIFT 8 +#define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK) + +#define VIVS_GL_SEMAPHORE_TOKEN 0x00003808 +#define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK 0x0000001f +#define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT 0 +#define VIVS_GL_SEMAPHORE_TOKEN_FROM(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK) +#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00 +#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8 +#define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK) + +#define VIVS_GL_FLUSH_CACHE 0x0000380c +#define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001 +#define VIVS_GL_FLUSH_CACHE_COLOR 0x00000002 +#define VIVS_GL_FLUSH_CACHE_TEXTURE 0x00000004 +#define VIVS_GL_FLUSH_CACHE_PE2D 0x00000008 +#define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010 +#define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020 +#define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040 + +#define VIVS_GL_FLUSH_MMU 0x00003810 +#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001 +#define VIVS_GL_FLUSH_MMU_FLUSH_UNK1 0x00000002 +#define VIVS_GL_FLUSH_MMU_FLUSH_UNK2 0x00000004 +#define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU 0x00000008 +#define VIVS_GL_FLUSH_MMU_FLUSH_UNK4 0x00000010 + +#define VIVS_GL_VERTEX_ELEMENT_CONFIG 0x00003814 + +#define VIVS_GL_MULTI_SAMPLE_CONFIG 0x00003818 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK 0x00000003 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT 0 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE 0x00000000 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X 0x00000001 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X 0x00000002 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK 0x00000008 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK 0x000000f0 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT 4 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK) +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK 0x00000100 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK 0x00007000 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT 12 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK) +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK 0x00008000 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK 0x00030000 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT 16 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK) +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK 0x00080000 + +#define VIVS_GL_VARYING_TOTAL_COMPONENTS 0x0000381c +#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK 0x000000ff +#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT 0 +#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK) + +#define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK 0x00000007 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT 0 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK 0x00000070 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT 4 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK 0x00000700 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT 8 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK 0x00007000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT 12 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK 0x00070000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT 16 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK 0x00700000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT 20 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK 0x07000000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT 24 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK 0x70000000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT 28 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK) + +#define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0)) +#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004 +#define VIVS_GL_VARYING_COMPONENT_USE__LEN 0x00000002 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK 0x00000003 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT 0 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK 0x0000000c +#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT 2 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK 0x00000030 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT 4 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK 0x000000c0 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT 6 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK 0x00000300 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT 8 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK 0x00000c00 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT 10 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK 0x00003000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT 12 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK 0x0000c000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT 14 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK 0x00030000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT 16 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK 0x000c0000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT 18 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK 0x00300000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT 20 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK 0x00c00000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT 22 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK 0x03000000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT 24 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK 0x0c000000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT 26 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK 0x30000000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT 28 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK 0xc0000000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK) + +#define VIVS_GL_UNK03834 0x00003834 + +#define VIVS_GL_UNK03838 0x00003838 + +#define VIVS_GL_API_MODE 0x0000384c +#define VIVS_GL_API_MODE_OPENGL 0x00000000 +#define VIVS_GL_API_MODE_OPENVG 0x00000001 +#define VIVS_GL_API_MODE_OPENCL 0x00000002 + +#define VIVS_GL_CONTEXT_POINTER 0x00003850 + +#define VIVS_GL_UNK03A00 0x00003a00 + +#define VIVS_GL_STALL_TOKEN 0x00003c00 +#define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f +#define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0 +#define VIVS_GL_STALL_TOKEN_FROM(x) (((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK) +#define VIVS_GL_STALL_TOKEN_TO__MASK 0x00001f00 +#define VIVS_GL_STALL_TOKEN_TO__SHIFT 8 +#define VIVS_GL_STALL_TOKEN_TO(x) (((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK) +#define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000 +#define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000 + +#define VIVS_DUMMY 0x00000000 + +#define VIVS_DUMMY_DUMMY 0x0003fffc + + +#endif /* STATE_XML */ diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h new file mode 100644 index 000000000000..0064f2640396 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -0,0 +1,407 @@ +#ifndef STATE_HI_XML +#define STATE_HI_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21) +- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) + +Copyright (C) 2015 +*/ + + +#define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001 +#define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002 +#define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003 +#define VIVS_HI 0x00000000 + +#define VIVS_HI_CLOCK_CONTROL 0x00000000 +#define VIVS_HI_CLOCK_CONTROL_CLK3D_DIS 0x00000001 +#define VIVS_HI_CLOCK_CONTROL_CLK2D_DIS 0x00000002 +#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK 0x000001fc +#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT 2 +#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(x) (((x) << VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT) & VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK) +#define VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD 0x00000200 +#define VIVS_HI_CLOCK_CONTROL_DISABLE_RAM_CLK_GATING 0x00000400 +#define VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS 0x00000800 +#define VIVS_HI_CLOCK_CONTROL_SOFT_RESET 0x00001000 +#define VIVS_HI_CLOCK_CONTROL_IDLE_3D 0x00010000 +#define VIVS_HI_CLOCK_CONTROL_IDLE_2D 0x00020000 +#define VIVS_HI_CLOCK_CONTROL_IDLE_VG 0x00040000 +#define VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU 0x00080000 +#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK 0x00f00000 +#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT 20 +#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(x) (((x) << VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT) & VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK) + +#define VIVS_HI_IDLE_STATE 0x00000004 +#define VIVS_HI_IDLE_STATE_FE 0x00000001 +#define VIVS_HI_IDLE_STATE_DE 0x00000002 +#define VIVS_HI_IDLE_STATE_PE 0x00000004 +#define VIVS_HI_IDLE_STATE_SH 0x00000008 +#define VIVS_HI_IDLE_STATE_PA 0x00000010 +#define VIVS_HI_IDLE_STATE_SE 0x00000020 +#define VIVS_HI_IDLE_STATE_RA 0x00000040 +#define VIVS_HI_IDLE_STATE_TX 0x00000080 +#define VIVS_HI_IDLE_STATE_VG 0x00000100 +#define VIVS_HI_IDLE_STATE_IM 0x00000200 +#define VIVS_HI_IDLE_STATE_FP 0x00000400 +#define VIVS_HI_IDLE_STATE_TS 0x00000800 +#define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000 + +#define VIVS_HI_AXI_CONFIG 0x00000008 +#define VIVS_HI_AXI_CONFIG_AWID__MASK 0x0000000f +#define VIVS_HI_AXI_CONFIG_AWID__SHIFT 0 +#define VIVS_HI_AXI_CONFIG_AWID(x) (((x) << VIVS_HI_AXI_CONFIG_AWID__SHIFT) & VIVS_HI_AXI_CONFIG_AWID__MASK) +#define VIVS_HI_AXI_CONFIG_ARID__MASK 0x000000f0 +#define VIVS_HI_AXI_CONFIG_ARID__SHIFT 4 +#define VIVS_HI_AXI_CONFIG_ARID(x) (((x) << VIVS_HI_AXI_CONFIG_ARID__SHIFT) & VIVS_HI_AXI_CONFIG_ARID__MASK) +#define VIVS_HI_AXI_CONFIG_AWCACHE__MASK 0x00000f00 +#define VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT 8 +#define VIVS_HI_AXI_CONFIG_AWCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_AWCACHE__MASK) +#define VIVS_HI_AXI_CONFIG_ARCACHE__MASK 0x0000f000 +#define VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT 12 +#define VIVS_HI_AXI_CONFIG_ARCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_ARCACHE__MASK) + +#define VIVS_HI_AXI_STATUS 0x0000000c +#define VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK 0x0000000f +#define VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT 0 +#define VIVS_HI_AXI_STATUS_WR_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK) +#define VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK 0x000000f0 +#define VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT 4 +#define VIVS_HI_AXI_STATUS_RD_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK) +#define VIVS_HI_AXI_STATUS_DET_WR_ERR 0x00000100 +#define VIVS_HI_AXI_STATUS_DET_RD_ERR 0x00000200 + +#define VIVS_HI_INTR_ACKNOWLEDGE 0x00000010 +#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x7fffffff +#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT 0 +#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x) (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK) +#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR 0x80000000 + +#define VIVS_HI_INTR_ENBL 0x00000014 +#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK 0xffffffff +#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT 0 +#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC(x) (((x) << VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT) & VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK) + +#define VIVS_HI_CHIP_IDENTITY 0x00000018 +#define VIVS_HI_CHIP_IDENTITY_FAMILY__MASK 0xff000000 +#define VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT 24 +#define VIVS_HI_CHIP_IDENTITY_FAMILY(x) (((x) << VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK) +#define VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK 0x00ff0000 +#define VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT 16 +#define VIVS_HI_CHIP_IDENTITY_PRODUCT(x) (((x) << VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT) & VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK) +#define VIVS_HI_CHIP_IDENTITY_REVISION__MASK 0x0000f000 +#define VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT 12 +#define VIVS_HI_CHIP_IDENTITY_REVISION(x) (((x) << VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT) & VIVS_HI_CHIP_IDENTITY_REVISION__MASK) + +#define VIVS_HI_CHIP_FEATURE 0x0000001c + +#define VIVS_HI_CHIP_MODEL 0x00000020 + +#define VIVS_HI_CHIP_REV 0x00000024 + +#define VIVS_HI_CHIP_DATE 0x00000028 + +#define VIVS_HI_CHIP_TIME 0x0000002c + +#define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034 + +#define VIVS_HI_CACHE_CONTROL 0x00000038 + +#define VIVS_HI_MEMORY_COUNTER_RESET 0x0000003c + +#define VIVS_HI_PROFILE_READ_BYTES8 0x00000040 + +#define VIVS_HI_PROFILE_WRITE_BYTES8 0x00000044 + +#define VIVS_HI_CHIP_SPECS 0x00000048 +#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK 0x0000000f +#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT 0 +#define VIVS_HI_CHIP_SPECS_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) +#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK 0x000000f0 +#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT 4 +#define VIVS_HI_CHIP_SPECS_REGISTER_MAX(x) (((x) << VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT) & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) +#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK 0x00000f00 +#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT 8 +#define VIVS_HI_CHIP_SPECS_THREAD_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) +#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK 0x0001f000 +#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT 12 +#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) +#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK 0x01f00000 +#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT 20 +#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) +#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK 0x0e000000 +#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT 25 +#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES(x) (((x) << VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT) & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK) +#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK 0xf0000000 +#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT 28 +#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) + +#define VIVS_HI_PROFILE_WRITE_BURSTS 0x0000004c + +#define VIVS_HI_PROFILE_WRITE_REQUESTS 0x00000050 + +#define VIVS_HI_PROFILE_READ_BURSTS 0x00000058 + +#define VIVS_HI_PROFILE_READ_REQUESTS 0x0000005c + +#define VIVS_HI_PROFILE_READ_LASTS 0x00000060 + +#define VIVS_HI_GP_OUT0 0x00000064 + +#define VIVS_HI_GP_OUT1 0x00000068 + +#define VIVS_HI_GP_OUT2 0x0000006c + +#define VIVS_HI_AXI_CONTROL 0x00000070 +#define VIVS_HI_AXI_CONTROL_WR_FULL_BURST_MODE 0x00000001 + +#define VIVS_HI_CHIP_MINOR_FEATURE_1 0x00000074 + +#define VIVS_HI_PROFILE_TOTAL_CYCLES 0x00000078 + +#define VIVS_HI_PROFILE_IDLE_CYCLES 0x0000007c + +#define VIVS_HI_CHIP_SPECS_2 0x00000080 +#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK 0x000000ff +#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT 0 +#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) +#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK 0x0000ff00 +#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT 8 +#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) +#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK 0xffff0000 +#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT 16 +#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS(x) (((x) << VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT) & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) + +#define VIVS_HI_CHIP_MINOR_FEATURE_2 0x00000084 + +#define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088 + +#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094 + +#define VIVS_PM 0x00000000 + +#define VIVS_PM_POWER_CONTROLS 0x00000100 +#define VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING 0x00000001 +#define VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING 0x00000002 +#define VIVS_PM_POWER_CONTROLS_DISABLE_STARVE_MODULE_CLOCK_GATING 0x00000004 +#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK 0x000000f0 +#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT 4 +#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK) +#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK 0xffff0000 +#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT 16 +#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK) + +#define VIVS_PM_MODULE_CONTROLS 0x00000104 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004 + +#define VIVS_PM_MODULE_STATUS 0x00000108 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004 + +#define VIVS_PM_PULSE_EATER 0x0000010c + +#define VIVS_MMUv2 0x00000000 + +#define VIVS_MMUv2_SAFE_ADDRESS 0x00000180 + +#define VIVS_MMUv2_CONFIGURATION 0x00000184 +#define VIVS_MMUv2_CONFIGURATION_MODE__MASK 0x00000001 +#define VIVS_MMUv2_CONFIGURATION_MODE__SHIFT 0 +#define VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K 0x00000000 +#define VIVS_MMUv2_CONFIGURATION_MODE_MODE1_K 0x00000001 +#define VIVS_MMUv2_CONFIGURATION_MODE_MASK 0x00000008 +#define VIVS_MMUv2_CONFIGURATION_FLUSH__MASK 0x00000010 +#define VIVS_MMUv2_CONFIGURATION_FLUSH__SHIFT 4 +#define VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH 0x00000010 +#define VIVS_MMUv2_CONFIGURATION_FLUSH_MASK 0x00000080 +#define VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK 0x00000100 +#define VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK 0xfffffc00 +#define VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT 10 +#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x) (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK) + +#define VIVS_MMUv2_STATUS 0x00000188 +#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x00000003 +#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT 0 +#define VIVS_MMUv2_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK) +#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x00000030 +#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT 4 +#define VIVS_MMUv2_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK) +#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000300 +#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT 8 +#define VIVS_MMUv2_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK) +#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x00003000 +#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT 12 +#define VIVS_MMUv2_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK) + +#define VIVS_MMUv2_CONTROL 0x0000018c +#define VIVS_MMUv2_CONTROL_ENABLE 0x00000001 + +#define VIVS_MMUv2_EXCEPTION_ADDR(i0) (0x00000190 + 0x4*(i0)) +#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE 0x00000004 +#define VIVS_MMUv2_EXCEPTION_ADDR__LEN 0x00000004 + +#define VIVS_MC 0x00000000 + +#define VIVS_MC_MMU_FE_PAGE_TABLE 0x00000400 + +#define VIVS_MC_MMU_TX_PAGE_TABLE 0x00000404 + +#define VIVS_MC_MMU_PE_PAGE_TABLE 0x00000408 + +#define VIVS_MC_MMU_PEZ_PAGE_TABLE 0x0000040c + +#define VIVS_MC_MMU_RA_PAGE_TABLE 0x00000410 + +#define VIVS_MC_DEBUG_MEMORY 0x00000414 +#define VIVS_MC_DEBUG_MEMORY_SPECIAL_PATCH_GC320 0x00000008 +#define VIVS_MC_DEBUG_MEMORY_FAST_CLEAR_BYPASS 0x00100000 +#define VIVS_MC_DEBUG_MEMORY_COMPRESSION_BYPASS 0x00200000 + +#define VIVS_MC_MEMORY_BASE_ADDR_RA 0x00000418 + +#define VIVS_MC_MEMORY_BASE_ADDR_FE 0x0000041c + +#define VIVS_MC_MEMORY_BASE_ADDR_TX 0x00000420 + +#define VIVS_MC_MEMORY_BASE_ADDR_PEZ 0x00000424 + +#define VIVS_MC_MEMORY_BASE_ADDR_PE 0x00000428 + +#define VIVS_MC_MEMORY_TIMING_CONTROL 0x0000042c + +#define VIVS_MC_MEMORY_FLUSH 0x00000430 + +#define VIVS_MC_PROFILE_CYCLE_COUNTER 0x00000438 + +#define VIVS_MC_DEBUG_READ0 0x0000043c + +#define VIVS_MC_DEBUG_READ1 0x00000440 + +#define VIVS_MC_DEBUG_WRITE 0x00000444 + +#define VIVS_MC_PROFILE_RA_READ 0x00000448 + +#define VIVS_MC_PROFILE_TX_READ 0x0000044c + +#define VIVS_MC_PROFILE_FE_READ 0x00000450 + +#define VIVS_MC_PROFILE_PE_READ 0x00000454 + +#define VIVS_MC_PROFILE_DE_READ 0x00000458 + +#define VIVS_MC_PROFILE_SH_READ 0x0000045c + +#define VIVS_MC_PROFILE_PA_READ 0x00000460 + +#define VIVS_MC_PROFILE_SE_READ 0x00000464 + +#define VIVS_MC_PROFILE_MC_READ 0x00000468 + +#define VIVS_MC_PROFILE_HI_READ 0x0000046c + +#define VIVS_MC_PROFILE_CONFIG0 0x00000470 +#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x0000000f +#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f +#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8 +#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x000f0000 +#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT 16 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE 0x00000000 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE 0x00010000 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE 0x00020000 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE 0x00030000 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D 0x000b0000 +#define VIVS_MC_PROFILE_CONFIG0_PE_RESET 0x000f0000 +#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0x0f000000 +#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT 24 +#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES 0x04000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER 0x07000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER 0x08000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER 0x09000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER 0x0a000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER 0x0b000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER 0x0c000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER 0x0d000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER 0x0e000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_RESET 0x0f000000 + +#define VIVS_MC_PROFILE_CONFIG1 0x00000474 +#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x0000000f +#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER 0x00000003 +#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER 0x00000004 +#define VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER 0x00000005 +#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER 0x00000006 +#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007 +#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008 +#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f +#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8 +#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000 +#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100 +#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x000f0000 +#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16 +#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT 0x00000000 +#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT 0x00010000 +#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z 0x00020000 +#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT 0x00030000 +#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER 0x00090000 +#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000 +#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000 +#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000 +#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0x0f000000 +#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24 +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS 0x01000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS 0x02000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS 0x03000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_UNKNOWN 0x04000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT 0x05000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT 0x06000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT 0x07000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT 0x08000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT 0x09000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_RESET 0x0f000000 + +#define VIVS_MC_PROFILE_CONFIG2 0x00000478 +#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x0000000f +#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001 +#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002 +#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003 +#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f +#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8 +#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000 +#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100 +#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200 +#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00 + +#define VIVS_MC_PROFILE_CONFIG3 0x0000047c + +#define VIVS_MC_BUS_CONFIG 0x00000480 +#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK 0x0000000f +#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT 0 +#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK) +#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK 0x000000f0 +#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT 4 +#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK) + +#define VIVS_MC_START_COMPOSITION 0x00000554 + +#define VIVS_MC_128B_MERGE 0x00000558 + + +#endif /* STATE_HI_XML */ diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 96e86cf4455b..83efca941388 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -118,7 +118,7 @@ config DRM_EXYNOS_ROTATOR config DRM_EXYNOS_GSC bool "GScaler" - depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM + depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !VIDEO_SAMSUNG_EXYNOS_GSC help Choose this option if you want to use Exynos GSC for DRM. diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index fbe1b3174f75..1bf6a21130c7 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -21,11 +21,11 @@ #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" +#include "exynos_drm_fb.h" #include "exynos_drm_plane.h" #include "exynos_drm_iommu.h" #define WINDOWS_NR 3 -#define CURSOR_WIN 2 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 static const char * const decon_clks_name[] = { @@ -56,6 +56,7 @@ struct decon_context { struct drm_device *drm_dev; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; + struct exynos_drm_plane_config configs[WINDOWS_NR]; void __iomem *addr; struct clk *clks[ARRAY_SIZE(decon_clks_name)]; int pipe; @@ -71,6 +72,12 @@ static const uint32_t decon_formats[] = { DRM_FORMAT_ARGB8888, }; +static const enum drm_plane_type decon_win_types[WINDOWS_NR] = { + DRM_PLANE_TYPE_PRIMARY, + DRM_PLANE_TYPE_OVERLAY, + DRM_PLANE_TYPE_CURSOR, +}; + static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask, u32 val) { @@ -241,15 +248,16 @@ static void decon_shadow_protect_win(struct decon_context *ctx, int win, protect ? ~0 : 0); } -static void decon_atomic_begin(struct exynos_drm_crtc *crtc, - struct exynos_drm_plane *plane) +static void decon_atomic_begin(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; + int i; if (test_bit(BIT_SUSPENDED, &ctx->flags)) return; - decon_shadow_protect_win(ctx, plane->zpos, true); + for (i = ctx->first_win; i < WINDOWS_NR; i++) + decon_shadow_protect_win(ctx, i, true); } #define BIT_VAL(x, e, s) (((x) & ((1 << ((e) - (s) + 1)) - 1)) << (s)) @@ -259,21 +267,24 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc, static void decon_update_plane(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane) { + struct exynos_drm_plane_state *state = + to_exynos_plane_state(plane->base.state); struct decon_context *ctx = crtc->ctx; - struct drm_plane_state *state = plane->base.state; - unsigned int win = plane->zpos; - unsigned int bpp = state->fb->bits_per_pixel >> 3; - unsigned int pitch = state->fb->pitches[0]; + struct drm_framebuffer *fb = state->base.fb; + unsigned int win = plane->index; + unsigned int bpp = fb->bits_per_pixel >> 3; + unsigned int pitch = fb->pitches[0]; + dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0); u32 val; if (test_bit(BIT_SUSPENDED, &ctx->flags)) return; - val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y); + val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y); writel(val, ctx->addr + DECON_VIDOSDxA(win)); - val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) | - COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1); + val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) | + COORDINATE_Y(state->crtc.y + state->crtc.h - 1); writel(val, ctx->addr + DECON_VIDOSDxB(win)); val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | @@ -284,20 +295,20 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, VIDOSD_Wx_ALPHA_B_F(0x0); writel(val, ctx->addr + DECON_VIDOSDxD(win)); - writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win)); + writel(dma_addr, ctx->addr + DECON_VIDW0xADD0B0(win)); - val = plane->dma_addr[0] + pitch * plane->crtc_h; + val = dma_addr + pitch * state->src.h; writel(val, ctx->addr + DECON_VIDW0xADD1B0(win)); if (ctx->out_type != IFTYPE_HDMI) - val = BIT_VAL(pitch - plane->crtc_w * bpp, 27, 14) - | BIT_VAL(plane->crtc_w * bpp, 13, 0); + val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14) + | BIT_VAL(state->crtc.w * bpp, 13, 0); else - val = BIT_VAL(pitch - plane->crtc_w * bpp, 29, 15) - | BIT_VAL(plane->crtc_w * bpp, 14, 0); + val = BIT_VAL(pitch - state->crtc.w * bpp, 29, 15) + | BIT_VAL(state->crtc.w * bpp, 14, 0); writel(val, ctx->addr + DECON_VIDW0xADD2(win)); - decon_win_set_pixfmt(ctx, win, state->fb); + decon_win_set_pixfmt(ctx, win, fb); /* window enable */ decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); @@ -310,7 +321,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; - unsigned int win = plane->zpos; + unsigned int win = plane->index; if (test_bit(BIT_SUSPENDED, &ctx->flags)) return; @@ -326,15 +337,16 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); } -static void decon_atomic_flush(struct exynos_drm_crtc *crtc, - struct exynos_drm_plane *plane) +static void decon_atomic_flush(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; + int i; if (test_bit(BIT_SUSPENDED, &ctx->flags)) return; - decon_shadow_protect_win(ctx, plane->zpos, false); + for (i = ctx->first_win; i < WINDOWS_NR; i++) + decon_shadow_protect_win(ctx, i, false); if (ctx->out_type == IFTYPE_I80) set_bit(BIT_WIN_UPDATED, &ctx->flags); @@ -377,20 +389,12 @@ static void decon_swreset(struct decon_context *ctx) static void decon_enable(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; - int ret; - int i; if (!test_and_clear_bit(BIT_SUSPENDED, &ctx->flags)) return; pm_runtime_get_sync(ctx->dev); - for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) { - ret = clk_prepare_enable(ctx->clks[i]); - if (ret < 0) - goto err; - } - set_bit(BIT_CLKS_ENABLED, &ctx->flags); /* if vblank was enabled status, enable it again. */ @@ -399,11 +403,6 @@ static void decon_enable(struct exynos_drm_crtc *crtc) decon_commit(ctx->crtc); - return; -err: - while (--i >= 0) - clk_disable_unprepare(ctx->clks[i]); - set_bit(BIT_SUSPENDED, &ctx->flags); } @@ -425,9 +424,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) decon_swreset(ctx); - for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) - clk_disable_unprepare(ctx->clks[i]); - clear_bit(BIT_CLKS_ENABLED, &ctx->flags); pm_runtime_put_sync(ctx->dev); @@ -478,7 +474,6 @@ err: static struct exynos_drm_crtc_ops decon_crtc_ops = { .enable = decon_enable, .disable = decon_disable, - .commit = decon_commit, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, .atomic_begin = decon_atomic_begin, @@ -495,7 +490,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data) struct exynos_drm_private *priv = drm_dev->dev_private; struct exynos_drm_plane *exynos_plane; enum exynos_drm_output_type out_type; - enum drm_plane_type type; unsigned int win; int ret; @@ -505,10 +499,13 @@ static int decon_bind(struct device *dev, struct device *master, void *data) for (win = ctx->first_win; win < WINDOWS_NR; win++) { int tmp = (win == ctx->first_win) ? 0 : win; - type = exynos_plane_get_type(tmp, CURSOR_WIN); - ret = exynos_plane_init(drm_dev, &ctx->planes[win], - 1 << ctx->pipe, type, decon_formats, - ARRAY_SIZE(decon_formats), win); + ctx->configs[win].pixel_formats = decon_formats; + ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats); + ctx->configs[win].zpos = win; + ctx->configs[win].type = decon_win_types[tmp]; + + ret = exynos_plane_init(drm_dev, &ctx->planes[win], win, + 1 << ctx->pipe, &ctx->configs[win]); if (ret) return ret; } @@ -581,6 +578,44 @@ out: return IRQ_HANDLED; } +#ifdef CONFIG_PM +static int exynos5433_decon_suspend(struct device *dev) +{ + struct decon_context *ctx = dev_get_drvdata(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) + clk_disable_unprepare(ctx->clks[i]); + + return 0; +} + +static int exynos5433_decon_resume(struct device *dev) +{ + struct decon_context *ctx = dev_get_drvdata(dev); + int i, ret; + + for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) { + ret = clk_prepare_enable(ctx->clks[i]); + if (ret < 0) + goto err; + } + + return 0; + +err: + while (--i >= 0) + clk_disable_unprepare(ctx->clks[i]); + + return ret; +} +#endif + +static const struct dev_pm_ops exynos5433_decon_pm_ops = { + SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume, + NULL) +}; + static const struct of_device_id exynos5433_decon_driver_dt_match[] = { { .compatible = "samsung,exynos5433-decon", @@ -684,6 +719,7 @@ struct platform_driver exynos5433_decon_driver = { .remove = exynos5433_decon_remove, .driver = { .name = "exynos5433-decon", + .pm = &exynos5433_decon_pm_ops, .of_match_table = exynos5433_decon_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index ead2b16e237d..52bda3b42fe0 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -30,6 +30,7 @@ #include "exynos_drm_crtc.h" #include "exynos_drm_plane.h" #include "exynos_drm_drv.h" +#include "exynos_drm_fb.h" #include "exynos_drm_fbdev.h" #include "exynos_drm_iommu.h" @@ -40,13 +41,13 @@ #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 #define WINDOWS_NR 2 -#define CURSOR_WIN 1 struct decon_context { struct device *dev; struct drm_device *drm_dev; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; + struct exynos_drm_plane_config configs[WINDOWS_NR]; struct clk *pclk; struct clk *aclk; struct clk *eclk; @@ -81,6 +82,11 @@ static const uint32_t decon_formats[] = { DRM_FORMAT_BGRA8888, }; +static const enum drm_plane_type decon_win_types[WINDOWS_NR] = { + DRM_PLANE_TYPE_PRIMARY, + DRM_PLANE_TYPE_CURSOR, +}; + static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -119,13 +125,8 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc) } /* Wait for vsync, as disable channel takes effect at next vsync */ - if (ch_enabled) { - unsigned int state = ctx->suspended; - - ctx->suspended = 0; + if (ch_enabled) decon_wait_for_vblank(ctx->crtc); - ctx->suspended = state; - } } static int decon_ctx_initialize(struct decon_context *ctx, @@ -384,30 +385,32 @@ static void decon_shadow_protect_win(struct decon_context *ctx, writel(val, ctx->regs + SHADOWCON); } -static void decon_atomic_begin(struct exynos_drm_crtc *crtc, - struct exynos_drm_plane *plane) +static void decon_atomic_begin(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; + int i; if (ctx->suspended) return; - decon_shadow_protect_win(ctx, plane->zpos, true); + for (i = 0; i < WINDOWS_NR; i++) + decon_shadow_protect_win(ctx, i, true); } static void decon_update_plane(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane) { + struct exynos_drm_plane_state *state = + to_exynos_plane_state(plane->base.state); struct decon_context *ctx = crtc->ctx; - struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; - struct drm_plane_state *state = plane->base.state; + struct drm_framebuffer *fb = state->base.fb; int padding; unsigned long val, alpha; unsigned int last_x; unsigned int last_y; - unsigned int win = plane->zpos; - unsigned int bpp = state->fb->bits_per_pixel >> 3; - unsigned int pitch = state->fb->pitches[0]; + unsigned int win = plane->index; + unsigned int bpp = fb->bits_per_pixel >> 3; + unsigned int pitch = fb->pitches[0]; if (ctx->suspended) return; @@ -423,41 +426,32 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, */ /* buffer start address */ - val = (unsigned long)plane->dma_addr[0]; + val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0); writel(val, ctx->regs + VIDW_BUF_START(win)); - padding = (pitch / bpp) - state->fb->width; + padding = (pitch / bpp) - fb->width; /* buffer size */ - writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win)); - writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win)); + writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win)); + writel(fb->height, ctx->regs + VIDW_WHOLE_Y(win)); /* offset from the start of the buffer to read */ - writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win)); - writel(plane->src_y, ctx->regs + VIDW_OFFSET_Y(win)); + writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win)); + writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win)); DRM_DEBUG_KMS("start addr = 0x%lx\n", (unsigned long)val); DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", - plane->crtc_w, plane->crtc_h); + state->crtc.w, state->crtc.h); - /* - * OSD position. - * In case the window layout goes of LCD layout, DECON fails. - */ - if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay) - plane->crtc_x = mode->hdisplay - plane->crtc_w; - if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay) - plane->crtc_y = mode->vdisplay - plane->crtc_h; - - val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) | - VIDOSDxA_TOPLEFT_Y(plane->crtc_y); + val = VIDOSDxA_TOPLEFT_X(state->crtc.x) | + VIDOSDxA_TOPLEFT_Y(state->crtc.y); writel(val, ctx->regs + VIDOSD_A(win)); - last_x = plane->crtc_x + plane->crtc_w; + last_x = state->crtc.x + state->crtc.w; if (last_x) last_x--; - last_y = plane->crtc_y + plane->crtc_h; + last_y = state->crtc.y + state->crtc.h; if (last_y) last_y--; @@ -466,7 +460,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, writel(val, ctx->regs + VIDOSD_B(win)); DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", - plane->crtc_x, plane->crtc_y, last_x, last_y); + state->crtc.x, state->crtc.y, last_x, last_y); /* OSD alpha */ alpha = VIDOSDxC_ALPHA0_R_F(0x0) | @@ -481,7 +475,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, writel(alpha, ctx->regs + VIDOSD_D(win)); - decon_win_set_pixfmt(ctx, win, state->fb); + decon_win_set_pixfmt(ctx, win, fb); /* hardware window 0 doesn't support color key. */ if (win != 0) @@ -505,7 +499,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; - unsigned int win = plane->zpos; + unsigned int win = plane->index; u32 val; if (ctx->suspended) @@ -524,15 +518,16 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, writel(val, ctx->regs + DECON_UPDATE); } -static void decon_atomic_flush(struct exynos_drm_crtc *crtc, - struct exynos_drm_plane *plane) +static void decon_atomic_flush(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; + int i; if (ctx->suspended) return; - decon_shadow_protect_win(ctx, plane->zpos, false); + for (i = 0; i < WINDOWS_NR; i++) + decon_shadow_protect_win(ctx, i, false); } static void decon_init(struct decon_context *ctx) @@ -555,39 +550,12 @@ static void decon_init(struct decon_context *ctx) static void decon_enable(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; - int ret; if (!ctx->suspended) return; - ctx->suspended = false; - pm_runtime_get_sync(ctx->dev); - ret = clk_prepare_enable(ctx->pclk); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret); - return; - } - - ret = clk_prepare_enable(ctx->aclk); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret); - return; - } - - ret = clk_prepare_enable(ctx->eclk); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret); - return; - } - - ret = clk_prepare_enable(ctx->vclk); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret); - return; - } - decon_init(ctx); /* if vblank was enabled status, enable it again. */ @@ -595,6 +563,8 @@ static void decon_enable(struct exynos_drm_crtc *crtc) decon_enable_vblank(ctx->crtc); decon_commit(ctx->crtc); + + ctx->suspended = false; } static void decon_disable(struct exynos_drm_crtc *crtc) @@ -613,11 +583,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) for (i = 0; i < WINDOWS_NR; i++) decon_disable_plane(crtc, &ctx->planes[i]); - clk_disable_unprepare(ctx->vclk); - clk_disable_unprepare(ctx->eclk); - clk_disable_unprepare(ctx->aclk); - clk_disable_unprepare(ctx->pclk); - pm_runtime_put_sync(ctx->dev); ctx->suspended = true; @@ -679,8 +644,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data) struct decon_context *ctx = dev_get_drvdata(dev); struct drm_device *drm_dev = data; struct exynos_drm_plane *exynos_plane; - enum drm_plane_type type; - unsigned int zpos; + unsigned int i; int ret; ret = decon_ctx_initialize(ctx, drm_dev); @@ -689,11 +653,14 @@ static int decon_bind(struct device *dev, struct device *master, void *data) return ret; } - for (zpos = 0; zpos < WINDOWS_NR; zpos++) { - type = exynos_plane_get_type(zpos, CURSOR_WIN); - ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], - 1 << ctx->pipe, type, decon_formats, - ARRAY_SIZE(decon_formats), zpos); + for (i = 0; i < WINDOWS_NR; i++) { + ctx->configs[i].pixel_formats = decon_formats; + ctx->configs[i].num_pixel_formats = ARRAY_SIZE(decon_formats); + ctx->configs[i].zpos = i; + ctx->configs[i].type = decon_win_types[i]; + + ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, + 1 << ctx->pipe, &ctx->configs[i]); if (ret) return ret; } @@ -843,11 +810,63 @@ static int decon_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int exynos7_decon_suspend(struct device *dev) +{ + struct decon_context *ctx = dev_get_drvdata(dev); + + clk_disable_unprepare(ctx->vclk); + clk_disable_unprepare(ctx->eclk); + clk_disable_unprepare(ctx->aclk); + clk_disable_unprepare(ctx->pclk); + + return 0; +} + +static int exynos7_decon_resume(struct device *dev) +{ + struct decon_context *ctx = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(ctx->pclk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret); + return ret; + } + + ret = clk_prepare_enable(ctx->aclk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret); + return ret; + } + + ret = clk_prepare_enable(ctx->eclk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret); + return ret; + } + + ret = clk_prepare_enable(ctx->vclk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret); + return ret; + } + + return 0; +} +#endif + +static const struct dev_pm_ops exynos7_decon_pm_ops = { + SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume, + NULL) +}; + struct platform_driver decon_driver = { .probe = decon_probe, .remove = decon_remove, .driver = { .name = "exynos-decon", + .pm = &exynos7_decon_pm_ops, .of_match_table = decon_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index 124fb9a56f02..b79c316c2ad2 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -953,7 +953,7 @@ static void exynos_dp_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs exynos_dp_connector_funcs = { +static const struct drm_connector_funcs exynos_dp_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = exynos_dp_detect, @@ -998,7 +998,7 @@ static struct drm_encoder *exynos_dp_best_encoder( return &dp->encoder; } -static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { +static const struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { .get_modes = exynos_dp_get_modes, .best_encoder = exynos_dp_best_encoder, }; @@ -1009,9 +1009,9 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp, { int ret; - encoder->bridge = dp->bridge; - dp->bridge->encoder = encoder; - ret = drm_bridge_attach(encoder->dev, dp->bridge); + encoder->bridge->next = dp->ptn_bridge; + dp->ptn_bridge->encoder = encoder; + ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge); if (ret) { DRM_ERROR("Failed to attach bridge to drm\n"); return ret; @@ -1020,14 +1020,15 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp, return 0; } -static int exynos_dp_create_connector(struct drm_encoder *encoder) +static int exynos_dp_bridge_attach(struct drm_bridge *bridge) { - struct exynos_dp_device *dp = encoder_to_dp(encoder); + struct exynos_dp_device *dp = bridge->driver_private; + struct drm_encoder *encoder = &dp->encoder; struct drm_connector *connector = &dp->connector; int ret; /* Pre-empt DP connector creation if there's a bridge */ - if (dp->bridge) { + if (dp->ptn_bridge) { ret = exynos_drm_attach_lcd_bridge(dp, encoder); if (!ret) return 0; @@ -1052,27 +1053,16 @@ static int exynos_dp_create_connector(struct drm_encoder *encoder) return ret; } -static bool exynos_dp_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void exynos_dp_bridge_enable(struct drm_bridge *bridge) { - return true; -} - -static void exynos_dp_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ -} - -static void exynos_dp_enable(struct drm_encoder *encoder) -{ - struct exynos_dp_device *dp = encoder_to_dp(encoder); + struct exynos_dp_device *dp = bridge->driver_private; struct exynos_drm_crtc *crtc = dp_to_crtc(dp); if (dp->dpms_mode == DRM_MODE_DPMS_ON) return; + pm_runtime_get_sync(dp->dev); + if (dp->panel) { if (drm_panel_prepare(dp->panel)) { DRM_ERROR("failed to setup the panel\n"); @@ -1083,7 +1073,6 @@ static void exynos_dp_enable(struct drm_encoder *encoder) if (crtc->ops->clock_enable) crtc->ops->clock_enable(dp_to_crtc(dp), true); - clk_prepare_enable(dp->clock); phy_power_on(dp->phy); exynos_dp_init_dp(dp); enable_irq(dp->irq); @@ -1092,9 +1081,9 @@ static void exynos_dp_enable(struct drm_encoder *encoder) dp->dpms_mode = DRM_MODE_DPMS_ON; } -static void exynos_dp_disable(struct drm_encoder *encoder) +static void exynos_dp_bridge_disable(struct drm_bridge *bridge) { - struct exynos_dp_device *dp = encoder_to_dp(encoder); + struct exynos_dp_device *dp = bridge->driver_private; struct exynos_drm_crtc *crtc = dp_to_crtc(dp); if (dp->dpms_mode != DRM_MODE_DPMS_ON) @@ -1110,7 +1099,6 @@ static void exynos_dp_disable(struct drm_encoder *encoder) disable_irq(dp->irq); flush_work(&dp->hotplug_work); phy_power_off(dp->phy); - clk_disable_unprepare(dp->clock); if (crtc->ops->clock_enable) crtc->ops->clock_enable(dp_to_crtc(dp), false); @@ -1120,17 +1108,82 @@ static void exynos_dp_disable(struct drm_encoder *encoder) DRM_ERROR("failed to turnoff the panel\n"); } + pm_runtime_put_sync(dp->dev); + dp->dpms_mode = DRM_MODE_DPMS_OFF; } -static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { +static void exynos_dp_bridge_nop(struct drm_bridge *bridge) +{ + /* do nothing */ +} + +static const struct drm_bridge_funcs exynos_dp_bridge_funcs = { + .enable = exynos_dp_bridge_enable, + .disable = exynos_dp_bridge_disable, + .pre_enable = exynos_dp_bridge_nop, + .post_disable = exynos_dp_bridge_nop, + .attach = exynos_dp_bridge_attach, +}; + +static int exynos_dp_create_connector(struct drm_encoder *encoder) +{ + struct exynos_dp_device *dp = encoder_to_dp(encoder); + struct drm_device *drm_dev = dp->drm_dev; + struct drm_bridge *bridge; + int ret; + + bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL); + if (!bridge) { + DRM_ERROR("failed to allocate for drm bridge\n"); + return -ENOMEM; + } + + dp->bridge = bridge; + + encoder->bridge = bridge; + bridge->driver_private = dp; + bridge->encoder = encoder; + bridge->funcs = &exynos_dp_bridge_funcs; + + ret = drm_bridge_attach(drm_dev, bridge); + if (ret) { + DRM_ERROR("failed to attach drm bridge\n"); + return -EINVAL; + } + + return 0; +} + +static bool exynos_dp_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void exynos_dp_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static void exynos_dp_enable(struct drm_encoder *encoder) +{ +} + +static void exynos_dp_disable(struct drm_encoder *encoder) +{ +} + +static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { .mode_fixup = exynos_dp_mode_fixup, .mode_set = exynos_dp_mode_set, .enable = exynos_dp_enable, .disable = exynos_dp_disable, }; -static struct drm_encoder_funcs exynos_dp_encoder_funcs = { +static const struct drm_encoder_funcs exynos_dp_encoder_funcs = { .destroy = drm_encoder_cleanup, }; @@ -1238,7 +1291,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) } } - if (!dp->panel && !dp->bridge) { + if (!dp->panel && !dp->ptn_bridge) { ret = exynos_dp_dt_parse_panel(dp); if (ret) return ret; @@ -1289,10 +1342,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug); - phy_power_on(dp->phy); - - exynos_dp_init_dp(dp); - ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, irq_flags, "exynos-dp", dp); if (ret) { @@ -1313,7 +1362,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); @@ -1343,8 +1392,9 @@ static const struct component_ops exynos_dp_ops = { static int exynos_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *panel_node, *bridge_node, *endpoint; + struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL; struct exynos_dp_device *dp; + int ret; dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), GFP_KERNEL); @@ -1353,36 +1403,96 @@ static int exynos_dp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dp); + /* This is for the backward compatibility. */ panel_node = of_parse_phandle(dev->of_node, "panel", 0); if (panel_node) { dp->panel = of_drm_find_panel(panel_node); of_node_put(panel_node); if (!dp->panel) return -EPROBE_DEFER; + } else { + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (endpoint) { + panel_node = of_graph_get_remote_port_parent(endpoint); + if (panel_node) { + dp->panel = of_drm_find_panel(panel_node); + of_node_put(panel_node); + if (!dp->panel) + return -EPROBE_DEFER; + } else { + DRM_ERROR("no port node for panel device.\n"); + return -EINVAL; + } + } } + if (endpoint) + goto out; + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); if (endpoint) { bridge_node = of_graph_get_remote_port_parent(endpoint); if (bridge_node) { - dp->bridge = of_drm_find_bridge(bridge_node); + dp->ptn_bridge = of_drm_find_bridge(bridge_node); of_node_put(bridge_node); - if (!dp->bridge) + if (!dp->ptn_bridge) return -EPROBE_DEFER; } else return -EPROBE_DEFER; } - return component_add(&pdev->dev, &exynos_dp_ops); +out: + pm_runtime_enable(dev); + + ret = component_add(&pdev->dev, &exynos_dp_ops); + if (ret) + goto err_disable_pm_runtime; + + return ret; + +err_disable_pm_runtime: + pm_runtime_disable(dev); + + return ret; } static int exynos_dp_remove(struct platform_device *pdev) { + pm_runtime_disable(&pdev->dev); component_del(&pdev->dev, &exynos_dp_ops); return 0; } +#ifdef CONFIG_PM +static int exynos_dp_suspend(struct device *dev) +{ + struct exynos_dp_device *dp = dev_get_drvdata(dev); + + clk_disable_unprepare(dp->clock); + + return 0; +} + +static int exynos_dp_resume(struct device *dev) +{ + struct exynos_dp_device *dp = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(dp->clock); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret); + return ret; + } + + return 0; +} +#endif + +static const struct dev_pm_ops exynos_dp_pm_ops = { + SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL) +}; + static const struct of_device_id exynos_dp_match[] = { { .compatible = "samsung,exynos5-dp" }, {}, @@ -1395,6 +1505,7 @@ struct platform_driver dp_driver = { .driver = { .name = "exynos-dp", .owner = THIS_MODULE, + .pm = &exynos_dp_pm_ops, .of_match_table = exynos_dp_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h index e413b6f7b0e7..66eec4b2d5c6 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.h +++ b/drivers/gpu/drm/exynos/exynos_dp_core.h @@ -153,6 +153,7 @@ struct exynos_dp_device { struct drm_connector connector; struct drm_panel *panel; struct drm_bridge *bridge; + struct drm_bridge *ptn_bridge; struct clk *clock; unsigned int irq; void __iomem *reg_base; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index e69357172ffb..e36579c1c025 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -68,35 +68,23 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - struct drm_plane *plane; exynos_crtc->event = crtc->state->event; - drm_atomic_crtc_for_each_plane(plane, crtc) { - struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); - - if (exynos_crtc->ops->atomic_begin) - exynos_crtc->ops->atomic_begin(exynos_crtc, - exynos_plane); - } + if (exynos_crtc->ops->atomic_begin) + exynos_crtc->ops->atomic_begin(exynos_crtc); } static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - struct drm_plane *plane; - drm_atomic_crtc_for_each_plane(plane, crtc) { - struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); - - if (exynos_crtc->ops->atomic_flush) - exynos_crtc->ops->atomic_flush(exynos_crtc, - exynos_plane); - } + if (exynos_crtc->ops->atomic_flush) + exynos_crtc->ops->atomic_flush(exynos_crtc); } -static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { +static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .enable = exynos_drm_crtc_enable, .disable = exynos_drm_crtc_disable, .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, @@ -116,7 +104,7 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) kfree(exynos_crtc); } -static struct drm_crtc_funcs exynos_crtc_funcs = { +static const struct drm_crtc_funcs exynos_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .destroy = exynos_drm_crtc_destroy, @@ -153,7 +141,7 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, private->crtc[pipe] = crtc; ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL, - &exynos_crtc_funcs); + &exynos_crtc_funcs, NULL); if (ret < 0) goto err_crtc; @@ -215,29 +203,6 @@ void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } -void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb) -{ - struct exynos_drm_crtc *exynos_crtc; - struct drm_device *dev = fb->dev; - struct drm_crtc *crtc; - - /* - * make sure that overlay data are updated to real hardware - * for all encoders. - */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - exynos_crtc = to_exynos_crtc(crtc); - - /* - * wait for vblank interrupt - * - this makes sure that overlay data are updated to - * real hardware. - */ - if (exynos_crtc->ops->wait_for_vblank) - exynos_crtc->ops->wait_for_vblank(exynos_crtc); - } -} - int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, enum exynos_drm_output_type out_type) { @@ -261,3 +226,29 @@ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc) if (exynos_crtc->ops->te_handler) exynos_crtc->ops->te_handler(exynos_crtc); } + +void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc, + struct drm_file *file) +{ + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + struct drm_pending_vblank_event *e; + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + e = exynos_crtc->event; + if (e && e->base.file_priv == file) { + exynos_crtc->event = NULL; + /* + * event will be destroyed by core part + * so below line should be removed later with core changes + */ + e->base.destroy(&e->base); + /* + * event_space will be increased by core part + * so below line should be removed later with core changes. + */ + file->event_space += sizeof(e->event); + atomic_dec(&exynos_crtc->pending_update); + } + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index f9f365bd0257..cfdcf3e4eb1b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -28,7 +28,6 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe); void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc); void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, struct exynos_drm_plane *exynos_plane); -void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb); /* This function gets pipe value to crtc device matched with out_type. */ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, @@ -41,4 +40,8 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, */ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); +/* This function cancels a page flip request. */ +void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc, + struct drm_file *file); + #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index c748b8790de3..05350ae0785b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -57,7 +57,7 @@ static void exynos_dpi_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs exynos_dpi_connector_funcs = { +static const struct drm_connector_funcs exynos_dpi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = exynos_dpi_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -100,7 +100,7 @@ exynos_dpi_best_encoder(struct drm_connector *connector) return &ctx->encoder; } -static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { +static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { .get_modes = exynos_dpi_get_modes, .best_encoder = exynos_dpi_best_encoder, }; @@ -161,14 +161,14 @@ static void exynos_dpi_disable(struct drm_encoder *encoder) } } -static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = { +static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = { .mode_fixup = exynos_dpi_mode_fixup, .mode_set = exynos_dpi_mode_set, .enable = exynos_dpi_enable, .disable = exynos_dpi_disable, }; -static struct drm_encoder_funcs exynos_dpi_encoder_funcs = { +static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; @@ -309,7 +309,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder) DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 2c6019d6a205..68f0f36f6e7e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -304,45 +304,6 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) -{ - struct drm_connector *connector; - - drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - int old_dpms = connector->dpms; - - if (connector->funcs->dpms) - connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); - - /* Set the old mode back to the connector for resume */ - connector->dpms = old_dpms; - } - drm_modeset_unlock_all(dev); - - return 0; -} - -static int exynos_drm_resume(struct drm_device *dev) -{ - struct drm_connector *connector; - - drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->funcs->dpms) { - int dpms = connector->dpms; - - connector->dpms = DRM_MODE_DPMS_OFF; - connector->funcs->dpms(connector, dpms); - } - } - drm_modeset_unlock_all(dev); - - return 0; -} -#endif - static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) { struct drm_exynos_file_private *file_priv; @@ -369,7 +330,12 @@ err_file_priv_free: static void exynos_drm_preclose(struct drm_device *dev, struct drm_file *file) { + struct drm_crtc *crtc; + exynos_drm_subdrv_close(dev, file); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + exynos_drm_crtc_cancel_page_flip(crtc, file); } static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) @@ -476,31 +442,54 @@ static struct drm_driver exynos_drm_driver = { }; #ifdef CONFIG_PM_SLEEP -static int exynos_drm_sys_suspend(struct device *dev) +static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - pm_message_t message; + struct drm_connector *connector; if (pm_runtime_suspended(dev) || !drm_dev) return 0; - message.event = PM_EVENT_SUSPEND; - return exynos_drm_suspend(drm_dev, message); + drm_modeset_lock_all(drm_dev); + drm_for_each_connector(connector, drm_dev) { + int old_dpms = connector->dpms; + + if (connector->funcs->dpms) + connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); + + /* Set the old mode back to the connector for resume */ + connector->dpms = old_dpms; + } + drm_modeset_unlock_all(drm_dev); + + return 0; } -static int exynos_drm_sys_resume(struct device *dev) +static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct drm_connector *connector; if (pm_runtime_suspended(dev) || !drm_dev) return 0; - return exynos_drm_resume(drm_dev); + drm_modeset_lock_all(drm_dev); + drm_for_each_connector(connector, drm_dev) { + if (connector->funcs->dpms) { + int dpms = connector->dpms; + + connector->dpms = DRM_MODE_DPMS_OFF; + connector->funcs->dpms(connector, dpms); + } + } + drm_modeset_unlock_all(drm_dev); + + return 0; } #endif static const struct dev_pm_ops exynos_drm_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume) + SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume) }; /* forward declaration */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index f1eda7fa4e3c..17b5ded72ff1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -38,25 +38,46 @@ enum exynos_drm_output_type { EXYNOS_DISPLAY_TYPE_VIDI, }; +struct exynos_drm_rect { + unsigned int x, y; + unsigned int w, h; +}; + +/* + * Exynos drm plane state structure. + * + * @base: plane_state object (contains drm_framebuffer pointer) + * @src: rectangle of the source image data to be displayed (clipped to + * visible part). + * @crtc: rectangle of the target image position on hardware screen + * (clipped to visible part). + * @h_ratio: horizontal scaling ratio, 16.16 fixed point + * @v_ratio: vertical scaling ratio, 16.16 fixed point + * + * this structure consists plane state data that will be applied to hardware + * specific overlay info. + */ + +struct exynos_drm_plane_state { + struct drm_plane_state base; + struct exynos_drm_rect crtc; + struct exynos_drm_rect src; + unsigned int h_ratio; + unsigned int v_ratio; + unsigned int zpos; +}; + +static inline struct exynos_drm_plane_state * +to_exynos_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct exynos_drm_plane_state, base); +} + /* * Exynos drm common overlay structure. * * @base: plane object - * @src_x: offset x on a framebuffer to be displayed. - * - the unit is screen coordinates. - * @src_y: offset y on a framebuffer to be displayed. - * - the unit is screen coordinates. - * @src_w: width of a partial image to be displayed from framebuffer. - * @src_h: height of a partial image to be displayed from framebuffer. - * @crtc_x: offset x on hardware screen. - * @crtc_y: offset y on hardware screen. - * @crtc_w: window width to be displayed (hardware screen). - * @crtc_h: window height to be displayed (hardware screen). - * @h_ratio: horizontal scaling ratio, 16.16 fixed point - * @v_ratio: vertical scaling ratio, 16.16 fixed point - * @dma_addr: array of bus(accessed by dma) address to the memory region - * allocated for a overlay. - * @zpos: order of overlay layer(z position). + * @index: hardware index of the overlay layer * * this structure is common to exynos SoC and its contents would be copied * to hardware specific overlay info. @@ -64,21 +85,33 @@ enum exynos_drm_output_type { struct exynos_drm_plane { struct drm_plane base; - unsigned int src_x; - unsigned int src_y; - unsigned int src_w; - unsigned int src_h; - unsigned int crtc_x; - unsigned int crtc_y; - unsigned int crtc_w; - unsigned int crtc_h; - unsigned int h_ratio; - unsigned int v_ratio; - dma_addr_t dma_addr[MAX_FB_BUFFER]; - unsigned int zpos; + const struct exynos_drm_plane_config *config; + unsigned int index; struct drm_framebuffer *pending_fb; }; +#define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0) +#define EXYNOS_DRM_PLANE_CAP_SCALE (1 << 1) +#define EXYNOS_DRM_PLANE_CAP_ZPOS (1 << 2) + +/* + * Exynos DRM plane configuration structure. + * + * @zpos: initial z-position of the plane. + * @type: type of the plane (primary, cursor or overlay). + * @pixel_formats: supported pixel formats. + * @num_pixel_formats: number of elements in 'pixel_formats'. + * @capabilities: supported features (see EXYNOS_DRM_PLANE_CAP_*) + */ + +struct exynos_drm_plane_config { + unsigned int zpos; + enum drm_plane_type type; + const uint32_t *pixel_formats; + unsigned int num_pixel_formats; + unsigned int capabilities; +}; + /* * Exynos drm crtc ops * @@ -90,8 +123,8 @@ struct exynos_drm_plane { * @wait_for_vblank: wait for vblank interrupt to make sure that * hardware overlay is updated. * @atomic_check: validate state - * @atomic_begin: prepare a window to receive a update - * @atomic_flush: mark the end of a window update + * @atomic_begin: prepare device to receive an update + * @atomic_flush: mark the end of device update * @update_plane: apply hardware specific overlay data to registers. * @disable_plane: disable hardware specific overlay. * @te_handler: trigger to transfer video image at the tearing effect @@ -111,14 +144,12 @@ struct exynos_drm_crtc_ops { void (*wait_for_vblank)(struct exynos_drm_crtc *crtc); int (*atomic_check)(struct exynos_drm_crtc *crtc, struct drm_crtc_state *state); - void (*atomic_begin)(struct exynos_drm_crtc *crtc, - struct exynos_drm_plane *plane); + void (*atomic_begin)(struct exynos_drm_crtc *crtc); void (*update_plane)(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane); void (*disable_plane)(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane); - void (*atomic_flush)(struct exynos_drm_crtc *crtc, - struct exynos_drm_plane *plane); + void (*atomic_flush)(struct exynos_drm_crtc *crtc); void (*te_handler)(struct exynos_drm_crtc *crtc); void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable); }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 12b03b364703..d84a498ef099 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1458,66 +1458,6 @@ static const struct mipi_dsi_host_ops exynos_dsi_ops = { .transfer = exynos_dsi_host_transfer, }; -static int exynos_dsi_poweron(struct exynos_dsi *dsi) -{ - struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - int ret, i; - - ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies); - if (ret < 0) { - dev_err(dsi->dev, "cannot enable regulators %d\n", ret); - return ret; - } - - for (i = 0; i < driver_data->num_clks; i++) { - ret = clk_prepare_enable(dsi->clks[i]); - if (ret < 0) - goto err_clk; - } - - ret = phy_power_on(dsi->phy); - if (ret < 0) { - dev_err(dsi->dev, "cannot enable phy %d\n", ret); - goto err_clk; - } - - return 0; - -err_clk: - while (--i > -1) - clk_disable_unprepare(dsi->clks[i]); - regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); - - return ret; -} - -static void exynos_dsi_poweroff(struct exynos_dsi *dsi) -{ - struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - int ret, i; - - usleep_range(10000, 20000); - - if (dsi->state & DSIM_STATE_INITIALIZED) { - dsi->state &= ~DSIM_STATE_INITIALIZED; - - exynos_dsi_disable_clock(dsi); - - exynos_dsi_disable_irq(dsi); - } - - dsi->state &= ~DSIM_STATE_CMD_LPM; - - phy_power_off(dsi->phy); - - for (i = driver_data->num_clks - 1; i > -1; i--) - clk_disable_unprepare(dsi->clks[i]); - - ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); - if (ret < 0) - dev_err(dsi->dev, "cannot disable regulators %d\n", ret); -} - static void exynos_dsi_enable(struct drm_encoder *encoder) { struct exynos_dsi *dsi = encoder_to_dsi(encoder); @@ -1526,16 +1466,14 @@ static void exynos_dsi_enable(struct drm_encoder *encoder) if (dsi->state & DSIM_STATE_ENABLED) return; - ret = exynos_dsi_poweron(dsi); - if (ret < 0) - return; + pm_runtime_get_sync(dsi->dev); dsi->state |= DSIM_STATE_ENABLED; ret = drm_panel_prepare(dsi->panel); if (ret < 0) { dsi->state &= ~DSIM_STATE_ENABLED; - exynos_dsi_poweroff(dsi); + pm_runtime_put_sync(dsi->dev); return; } @@ -1547,7 +1485,7 @@ static void exynos_dsi_enable(struct drm_encoder *encoder) dsi->state &= ~DSIM_STATE_ENABLED; exynos_dsi_set_display_enable(dsi, false); drm_panel_unprepare(dsi->panel); - exynos_dsi_poweroff(dsi); + pm_runtime_put_sync(dsi->dev); return; } @@ -1569,7 +1507,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder) dsi->state &= ~DSIM_STATE_ENABLED; - exynos_dsi_poweroff(dsi); + pm_runtime_put_sync(dsi->dev); } static enum drm_connector_status @@ -1603,7 +1541,7 @@ static void exynos_dsi_connector_destroy(struct drm_connector *connector) connector->dev = NULL; } -static struct drm_connector_funcs exynos_dsi_connector_funcs = { +static const struct drm_connector_funcs exynos_dsi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = exynos_dsi_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -1631,7 +1569,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector) return &dsi->encoder; } -static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { +static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { .get_modes = exynos_dsi_get_modes, .best_encoder = exynos_dsi_best_encoder, }; @@ -1684,14 +1622,14 @@ static void exynos_dsi_mode_set(struct drm_encoder *encoder, vm->hsync_len = m->hsync_end - m->hsync_start; } -static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = { +static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = { .mode_fixup = exynos_dsi_mode_fixup, .mode_set = exynos_dsi_mode_set, .enable = exynos_dsi_enable, .disable = exynos_dsi_disable, }; -static struct drm_encoder_funcs exynos_dsi_encoder_funcs = { +static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; @@ -1797,13 +1735,13 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) ep = of_graph_get_next_endpoint(node, NULL); if (!ep) { - ret = -ENXIO; + ret = -EINVAL; goto end; } dsi->bridge_node = of_graph_get_remote_port_parent(ep); if (!dsi->bridge_node) { - ret = -ENXIO; + ret = -EINVAL; goto end; } end: @@ -1831,7 +1769,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs); @@ -1954,22 +1892,99 @@ static int exynos_dsi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, &dsi->encoder); + pm_runtime_enable(dev); + return component_add(dev, &exynos_dsi_component_ops); } static int exynos_dsi_remove(struct platform_device *pdev) { + pm_runtime_disable(&pdev->dev); + component_del(&pdev->dev, &exynos_dsi_component_ops); return 0; } +#ifdef CONFIG_PM +static int exynos_dsi_suspend(struct device *dev) +{ + struct drm_encoder *encoder = dev_get_drvdata(dev); + struct exynos_dsi *dsi = encoder_to_dsi(encoder); + struct exynos_dsi_driver_data *driver_data = dsi->driver_data; + int ret, i; + + usleep_range(10000, 20000); + + if (dsi->state & DSIM_STATE_INITIALIZED) { + dsi->state &= ~DSIM_STATE_INITIALIZED; + + exynos_dsi_disable_clock(dsi); + + exynos_dsi_disable_irq(dsi); + } + + dsi->state &= ~DSIM_STATE_CMD_LPM; + + phy_power_off(dsi->phy); + + for (i = driver_data->num_clks - 1; i > -1; i--) + clk_disable_unprepare(dsi->clks[i]); + + ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); + if (ret < 0) + dev_err(dsi->dev, "cannot disable regulators %d\n", ret); + + return 0; +} + +static int exynos_dsi_resume(struct device *dev) +{ + struct drm_encoder *encoder = dev_get_drvdata(dev); + struct exynos_dsi *dsi = encoder_to_dsi(encoder); + struct exynos_dsi_driver_data *driver_data = dsi->driver_data; + int ret, i; + + ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies); + if (ret < 0) { + dev_err(dsi->dev, "cannot enable regulators %d\n", ret); + return ret; + } + + for (i = 0; i < driver_data->num_clks; i++) { + ret = clk_prepare_enable(dsi->clks[i]); + if (ret < 0) + goto err_clk; + } + + ret = phy_power_on(dsi->phy); + if (ret < 0) { + dev_err(dsi->dev, "cannot enable phy %d\n", ret); + goto err_clk; + } + + return 0; + +err_clk: + while (--i > -1) + clk_disable_unprepare(dsi->clks[i]); + regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); + + return ret; +} +#endif + +static const struct dev_pm_ops exynos_dsi_pm_ops = { + SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL) +}; + struct platform_driver dsi_driver = { .probe = exynos_dsi_probe, .remove = exynos_dsi_remove, .driver = { .name = "exynos-dsi", .owner = THIS_MODULE, + .pm = &exynos_dsi_pm_ops, .of_match_table = exynos_dsi_of_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 49b9bc302e87..d614194644c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -37,6 +37,7 @@ struct exynos_drm_fb { struct drm_framebuffer fb; struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; + dma_addr_t dma_addr[MAX_FB_BUFFER]; }; static int check_fb_gem_memory_type(struct drm_device *drm_dev, @@ -70,9 +71,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); unsigned int i; - /* make sure that overlay data are updated before relesing fb. */ - exynos_drm_crtc_complete_scanout(fb); - drm_framebuffer_cleanup(fb); for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) { @@ -109,7 +107,7 @@ static int exynos_drm_fb_dirty(struct drm_framebuffer *fb, return 0; } -static struct drm_framebuffer_funcs exynos_drm_fb_funcs = { +static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { .destroy = exynos_drm_fb_destroy, .create_handle = exynos_drm_fb_create_handle, .dirty = exynos_drm_fb_dirty, @@ -135,6 +133,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev, goto err; exynos_fb->exynos_gem[i] = exynos_gem[i]; + exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr + + mode_cmd->offsets[i]; } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); @@ -189,21 +189,14 @@ err: return ERR_PTR(ret); } -struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index) +dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - struct exynos_drm_gem *exynos_gem; if (index >= MAX_FB_BUFFER) - return NULL; + return DMA_ERROR_CODE; - exynos_gem = exynos_fb->exynos_gem[index]; - if (!exynos_gem) - return NULL; - - DRM_DEBUG_KMS("dma_addr: 0x%lx\n", (unsigned long)exynos_gem->dma_addr); - - return exynos_gem; + return exynos_fb->dma_addr[index]; } static void exynos_drm_output_poll_changed(struct drm_device *dev) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 6fa0e47a1415..3a9e75b2cf6b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h @@ -22,8 +22,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev, struct exynos_drm_gem **exynos_gem, int count); -/* get gem object of a drm framebuffer */ -struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index); +dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index); void exynos_drm_mode_config_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index bd75c1531cac..70194d0e4fe4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -29,6 +29,7 @@ #include #include "exynos_drm_drv.h" +#include "exynos_drm_fb.h" #include "exynos_drm_fbdev.h" #include "exynos_drm_crtc.h" #include "exynos_drm_plane.h" @@ -87,7 +88,6 @@ /* FIMD has totally five hardware windows. */ #define WINDOWS_NR 5 -#define CURSOR_WIN 4 struct fimd_driver_data { unsigned int timing_base; @@ -150,6 +150,7 @@ struct fimd_context { struct drm_device *drm_dev; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; + struct exynos_drm_plane_config configs[WINDOWS_NR]; struct clk *bus_clk; struct clk *lcd_clk; void __iomem *regs; @@ -187,6 +188,14 @@ static const struct of_device_id fimd_driver_dt_match[] = { }; MODULE_DEVICE_TABLE(of, fimd_driver_dt_match); +static const enum drm_plane_type fimd_win_types[WINDOWS_NR] = { + DRM_PLANE_TYPE_PRIMARY, + DRM_PLANE_TYPE_OVERLAY, + DRM_PLANE_TYPE_OVERLAY, + DRM_PLANE_TYPE_OVERLAY, + DRM_PLANE_TYPE_CURSOR, +}; + static const uint32_t fimd_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_XRGB1555, @@ -478,7 +487,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, - struct drm_framebuffer *fb) + uint32_t pixel_format, int width) { unsigned long val; @@ -489,11 +498,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, * So the request format is ARGB8888 then change it to XRGB8888. */ if (ctx->driver_data->has_limited_fmt && !win) { - if (fb->pixel_format == DRM_FORMAT_ARGB8888) - fb->pixel_format = DRM_FORMAT_XRGB8888; + if (pixel_format == DRM_FORMAT_ARGB8888) + pixel_format = DRM_FORMAT_XRGB8888; } - switch (fb->pixel_format) { + switch (pixel_format) { case DRM_FORMAT_C8: val |= WINCON0_BPPMODE_8BPP_PALETTE; val |= WINCONx_BURSTLEN_8WORD; @@ -529,17 +538,15 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, break; } - DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel); - /* - * In case of exynos, setting dma-burst to 16Word causes permanent - * tearing for very small buffers, e.g. cursor buffer. Burst Mode - * switching which is based on plane size is not recommended as - * plane size varies alot towards the end of the screen and rapid - * movement causes unstable DMA which results into iommu crash/tear. + * Setting dma-burst to 16Word causes permanent tearing for very small + * buffers, e.g. cursor buffer. Burst Mode switching which based on + * plane size is not recommended as plane size varies alot towards the + * end of the screen and rapid movement causes unstable DMA, but it is + * still better to change dma-burst than displaying garbage. */ - if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) { + if (width < MIN_FB_WIDTH_FOR_16WORD_BURST) { val &= ~WINCONx_BURSTLEN_MASK; val |= WINCONx_BURSTLEN_4WORD; } @@ -615,64 +622,68 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx, writel(val, ctx->regs + reg); } -static void fimd_atomic_begin(struct exynos_drm_crtc *crtc, - struct exynos_drm_plane *plane) +static void fimd_atomic_begin(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; + int i; if (ctx->suspended) return; - fimd_shadow_protect_win(ctx, plane->zpos, true); + for (i = 0; i < WINDOWS_NR; i++) + fimd_shadow_protect_win(ctx, i, true); } -static void fimd_atomic_flush(struct exynos_drm_crtc *crtc, - struct exynos_drm_plane *plane) +static void fimd_atomic_flush(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; + int i; if (ctx->suspended) return; - fimd_shadow_protect_win(ctx, plane->zpos, false); + for (i = 0; i < WINDOWS_NR; i++) + fimd_shadow_protect_win(ctx, i, false); } static void fimd_update_plane(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane) { + struct exynos_drm_plane_state *state = + to_exynos_plane_state(plane->base.state); struct fimd_context *ctx = crtc->ctx; - struct drm_plane_state *state = plane->base.state; + struct drm_framebuffer *fb = state->base.fb; dma_addr_t dma_addr; unsigned long val, size, offset; unsigned int last_x, last_y, buf_offsize, line_size; - unsigned int win = plane->zpos; - unsigned int bpp = state->fb->bits_per_pixel >> 3; - unsigned int pitch = state->fb->pitches[0]; + unsigned int win = plane->index; + unsigned int bpp = fb->bits_per_pixel >> 3; + unsigned int pitch = fb->pitches[0]; if (ctx->suspended) return; - offset = plane->src_x * bpp; - offset += plane->src_y * pitch; + offset = state->src.x * bpp; + offset += state->src.y * pitch; /* buffer start address */ - dma_addr = plane->dma_addr[0] + offset; + dma_addr = exynos_drm_fb_dma_addr(fb, 0) + offset; val = (unsigned long)dma_addr; writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); /* buffer end address */ - size = pitch * plane->crtc_h; + size = pitch * state->crtc.h; val = (unsigned long)(dma_addr + size); writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", (unsigned long)dma_addr, val, size); DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", - plane->crtc_w, plane->crtc_h); + state->crtc.w, state->crtc.h); /* buffer size */ - buf_offsize = pitch - (plane->crtc_w * bpp); - line_size = plane->crtc_w * bpp; + buf_offsize = pitch - (state->crtc.w * bpp); + line_size = state->crtc.w * bpp; val = VIDW_BUF_SIZE_OFFSET(buf_offsize) | VIDW_BUF_SIZE_PAGEWIDTH(line_size) | VIDW_BUF_SIZE_OFFSET_E(buf_offsize) | @@ -680,16 +691,16 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc, writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); /* OSD position */ - val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) | - VIDOSDxA_TOPLEFT_Y(plane->crtc_y) | - VIDOSDxA_TOPLEFT_X_E(plane->crtc_x) | - VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y); + val = VIDOSDxA_TOPLEFT_X(state->crtc.x) | + VIDOSDxA_TOPLEFT_Y(state->crtc.y) | + VIDOSDxA_TOPLEFT_X_E(state->crtc.x) | + VIDOSDxA_TOPLEFT_Y_E(state->crtc.y); writel(val, ctx->regs + VIDOSD_A(win)); - last_x = plane->crtc_x + plane->crtc_w; + last_x = state->crtc.x + state->crtc.w; if (last_x) last_x--; - last_y = plane->crtc_y + plane->crtc_h; + last_y = state->crtc.y + state->crtc.h; if (last_y) last_y--; @@ -699,20 +710,20 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc, writel(val, ctx->regs + VIDOSD_B(win)); DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", - plane->crtc_x, plane->crtc_y, last_x, last_y); + state->crtc.x, state->crtc.y, last_x, last_y); /* OSD size */ if (win != 3 && win != 4) { u32 offset = VIDOSD_D(win); if (win == 0) offset = VIDOSD_C(win); - val = plane->crtc_w * plane->crtc_h; + val = state->crtc.w * state->crtc.h; writel(val, ctx->regs + offset); DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); } - fimd_win_set_pixfmt(ctx, win, state->fb); + fimd_win_set_pixfmt(ctx, win, fb->pixel_format, state->src.w); /* hardware window 0 doesn't support color key. */ if (win != 0) @@ -731,7 +742,7 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane) { struct fimd_context *ctx = crtc->ctx; - unsigned int win = plane->zpos; + unsigned int win = plane->index; if (ctx->suspended) return; @@ -745,7 +756,6 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc, static void fimd_enable(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; - int ret; if (!ctx->suspended) return; @@ -754,18 +764,6 @@ static void fimd_enable(struct exynos_drm_crtc *crtc) pm_runtime_get_sync(ctx->dev); - ret = clk_prepare_enable(ctx->bus_clk); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret); - return; - } - - ret = clk_prepare_enable(ctx->lcd_clk); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret); - return; - } - /* if vblank was enabled status, enable it again. */ if (test_and_clear_bit(0, &ctx->irq_flags)) fimd_enable_vblank(ctx->crtc); @@ -795,11 +793,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc) writel(0, ctx->regs + VIDCON0); - clk_disable_unprepare(ctx->lcd_clk); - clk_disable_unprepare(ctx->bus_clk); - pm_runtime_put_sync(ctx->dev); - ctx->suspended = true; } @@ -941,18 +935,19 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm_dev = data; struct exynos_drm_private *priv = drm_dev->dev_private; struct exynos_drm_plane *exynos_plane; - enum drm_plane_type type; - unsigned int zpos; + unsigned int i; int ret; ctx->drm_dev = drm_dev; ctx->pipe = priv->pipe++; - for (zpos = 0; zpos < WINDOWS_NR; zpos++) { - type = exynos_plane_get_type(zpos, CURSOR_WIN); - ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], - 1 << ctx->pipe, type, fimd_formats, - ARRAY_SIZE(fimd_formats), zpos); + for (i = 0; i < WINDOWS_NR; i++) { + ctx->configs[i].pixel_formats = fimd_formats; + ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats); + ctx->configs[i].zpos = i; + ctx->configs[i].type = fimd_win_types[i]; + ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, + 1 << ctx->pipe, &ctx->configs[i]); if (ret) return ret; } @@ -1121,12 +1116,49 @@ static int fimd_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int exynos_fimd_suspend(struct device *dev) +{ + struct fimd_context *ctx = dev_get_drvdata(dev); + + clk_disable_unprepare(ctx->lcd_clk); + clk_disable_unprepare(ctx->bus_clk); + + return 0; +} + +static int exynos_fimd_resume(struct device *dev) +{ + struct fimd_context *ctx = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(ctx->bus_clk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret); + return ret; + } + + ret = clk_prepare_enable(ctx->lcd_clk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret); + return ret; + } + + return 0; +} +#endif + +static const struct dev_pm_ops exynos_fimd_pm_ops = { + SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL) +}; + struct platform_driver fimd_driver = { .probe = fimd_probe, .remove = fimd_remove, .driver = { .name = "exynos4-fb", .owner = THIS_MODULE, + .pm = &exynos_fimd_pm_ops, .of_match_table = fimd_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 37ab8b282db6..9ca5047959ec 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -55,8 +55,6 @@ struct exynos_drm_gem { struct sg_table *sgt; }; -struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); - /* destroy a buffer with gem object */ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem); @@ -91,10 +89,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, unsigned int gem_handle, struct drm_file *filp); -/* map user space allocated by malloc to pages. */ -int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - /* get buffer information to memory region allocated by gem. */ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -123,28 +117,6 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); /* set vm_flags and we can change the vm attribute to other one at here. */ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); -static inline int vma_is_io(struct vm_area_struct *vma) -{ - return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); -} - -/* get a copy of a virtual memory region. */ -struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma); - -/* release a userspace virtual memory area. */ -void exynos_gem_put_vma(struct vm_area_struct *vma); - -/* get pages from user space. */ -int exynos_gem_get_pages_from_userptr(unsigned long start, - unsigned int npages, - struct page **pages, - struct vm_area_struct *vma); - -/* drop the reference to pages. */ -void exynos_gem_put_pages_to_userptr(struct page **pages, - unsigned int npages, - struct vm_area_struct *vma); - /* map sgt with dma region. */ int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, struct sg_table *sgt, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 11b87d2a7913..7aecd23cfa11 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -15,7 +15,8 @@ #include #include #include -#include +#include +#include #include #include @@ -126,6 +127,7 @@ struct gsc_capability { * @ippdrv: prepare initialization using ippdrv. * @regs_res: register resources. * @regs: memory mapped io registers. + * @sysreg: handle to SYSREG block regmap. * @lock: locking of operations. * @gsc_clk: gsc gate clock. * @sc: scaler infomations. @@ -138,6 +140,7 @@ struct gsc_context { struct exynos_drm_ippdrv ippdrv; struct resource *regs_res; void __iomem *regs; + struct regmap *sysreg; struct mutex lock; struct clk *gsc_clk; struct gsc_scaler sc; @@ -437,9 +440,12 @@ static int gsc_sw_reset(struct gsc_context *ctx) static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable) { - u32 gscblk_cfg; + unsigned int gscblk_cfg; - gscblk_cfg = readl(SYSREG_GSCBLK_CFG1); + if (!ctx->sysreg) + return; + + regmap_read(ctx->sysreg, SYSREG_GSCBLK_CFG1, &gscblk_cfg); if (enable) gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) | @@ -448,7 +454,7 @@ static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable) else gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id); - writel(gscblk_cfg, SYSREG_GSCBLK_CFG1); + regmap_write(ctx->sysreg, SYSREG_GSCBLK_CFG1, gscblk_cfg); } static void gsc_handle_irq(struct gsc_context *ctx, bool enable, @@ -1215,10 +1221,10 @@ static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable) DRM_DEBUG_KMS("enable[%d]\n", enable); if (enable) { - clk_enable(ctx->gsc_clk); + clk_prepare_enable(ctx->gsc_clk); ctx->suspended = false; } else { - clk_disable(ctx->gsc_clk); + clk_disable_unprepare(ctx->gsc_clk); ctx->suspended = true; } @@ -1663,6 +1669,15 @@ static int gsc_probe(struct platform_device *pdev) if (!ctx) return -ENOMEM; + if (dev->of_node) { + ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, + "samsung,sysreg"); + if (IS_ERR(ctx->sysreg)) { + dev_warn(dev, "failed to get system register.\n"); + ctx->sysreg = NULL; + } + } + /* clock control */ ctx->gsc_clk = devm_clk_get(dev, "gscl"); if (IS_ERR(ctx->gsc_clk)) { @@ -1713,7 +1728,6 @@ static int gsc_probe(struct platform_device *pdev) mutex_init(&ctx->lock); platform_set_drvdata(pdev, ctx); - pm_runtime_set_active(dev); pm_runtime_enable(dev); ret = exynos_drm_ippdrv_register(ippdrv); @@ -1797,6 +1811,12 @@ static const struct dev_pm_ops gsc_pm_ops = { SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL) }; +static const struct of_device_id exynos_drm_gsc_of_match[] = { + { .compatible = "samsung,exynos5-gsc" }, + { }, +}; +MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match); + struct platform_driver gsc_driver = { .probe = gsc_probe, .remove = gsc_remove, @@ -1804,6 +1824,7 @@ struct platform_driver gsc_driver = { .name = "exynos-drm-gsc", .owner = THIS_MODULE, .pm = &gsc_pm_ops, + .of_match_table = of_match_ptr(exynos_drm_gsc_of_match), }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index 8994eab56ba8..4eaef36aec5a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -389,7 +389,7 @@ already_disabled: mutex_unlock(&mic_mutex); } -struct drm_bridge_funcs mic_bridge_funcs = { +static const struct drm_bridge_funcs mic_bridge_funcs = { .disable = mic_disable, .post_disable = mic_post_disable, .pre_enable = mic_pre_enable, diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 179311760bb7..d86227236f55 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -56,93 +56,213 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last) return size; } -static void exynos_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) +static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state) + { - struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); + struct drm_plane_state *state = &exynos_state->base; + struct drm_crtc *crtc = exynos_state->base.crtc; struct drm_display_mode *mode = &crtc->state->adjusted_mode; + int crtc_x, crtc_y; + unsigned int crtc_w, crtc_h; + unsigned int src_x, src_y; + unsigned int src_w, src_h; unsigned int actual_w; unsigned int actual_h; + /* + * The original src/dest coordinates are stored in exynos_state->base, + * but we want to keep another copy internal to our driver that we can + * clip/modify ourselves. + */ + + crtc_x = state->crtc_x; + crtc_y = state->crtc_y; + crtc_w = state->crtc_w; + crtc_h = state->crtc_h; + + src_x = state->src_x >> 16; + src_y = state->src_y >> 16; + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + /* set ratio */ + exynos_state->h_ratio = (src_w << 16) / crtc_w; + exynos_state->v_ratio = (src_h << 16) / crtc_h; + + /* clip to visible area */ actual_w = exynos_plane_get_size(crtc_x, crtc_w, mode->hdisplay); actual_h = exynos_plane_get_size(crtc_y, crtc_h, mode->vdisplay); if (crtc_x < 0) { if (actual_w) - src_x -= crtc_x; + src_x += ((-crtc_x) * exynos_state->h_ratio) >> 16; crtc_x = 0; } if (crtc_y < 0) { if (actual_h) - src_y -= crtc_y; + src_y += ((-crtc_y) * exynos_state->v_ratio) >> 16; crtc_y = 0; } - /* set ratio */ - exynos_plane->h_ratio = (src_w << 16) / crtc_w; - exynos_plane->v_ratio = (src_h << 16) / crtc_h; - /* set drm framebuffer data. */ - exynos_plane->src_x = src_x; - exynos_plane->src_y = src_y; - exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16; - exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16; + exynos_state->src.x = src_x; + exynos_state->src.y = src_y; + exynos_state->src.w = (actual_w * exynos_state->h_ratio) >> 16; + exynos_state->src.h = (actual_h * exynos_state->v_ratio) >> 16; /* set plane range to be displayed. */ - exynos_plane->crtc_x = crtc_x; - exynos_plane->crtc_y = crtc_y; - exynos_plane->crtc_w = actual_w; - exynos_plane->crtc_h = actual_h; + exynos_state->crtc.x = crtc_x; + exynos_state->crtc.y = crtc_y; + exynos_state->crtc.w = actual_w; + exynos_state->crtc.h = actual_h; DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)", - exynos_plane->crtc_x, exynos_plane->crtc_y, - exynos_plane->crtc_w, exynos_plane->crtc_h); + exynos_state->crtc.x, exynos_state->crtc.y, + exynos_state->crtc.w, exynos_state->crtc.h); +} - plane->crtc = crtc; +static void exynos_drm_plane_reset(struct drm_plane *plane) +{ + struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); + struct exynos_drm_plane_state *exynos_state; + + if (plane->state) { + exynos_state = to_exynos_plane_state(plane->state); + if (exynos_state->base.fb) + drm_framebuffer_unreference(exynos_state->base.fb); + kfree(exynos_state); + plane->state = NULL; + } + + exynos_state = kzalloc(sizeof(*exynos_state), GFP_KERNEL); + if (exynos_state) { + exynos_state->zpos = exynos_plane->config->zpos; + plane->state = &exynos_state->base; + plane->state->plane = plane; + } +} + +static struct drm_plane_state * +exynos_drm_plane_duplicate_state(struct drm_plane *plane) +{ + struct exynos_drm_plane_state *exynos_state; + struct exynos_drm_plane_state *copy; + + exynos_state = to_exynos_plane_state(plane->state); + copy = kzalloc(sizeof(*exynos_state), GFP_KERNEL); + if (!copy) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, ©->base); + copy->zpos = exynos_state->zpos; + return ©->base; +} + +static void exynos_drm_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct exynos_drm_plane_state *old_exynos_state = + to_exynos_plane_state(old_state); + __drm_atomic_helper_plane_destroy_state(plane, old_state); + kfree(old_exynos_state); +} + +static int exynos_drm_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); + struct exynos_drm_plane_state *exynos_state = + to_exynos_plane_state(state); + struct exynos_drm_private *dev_priv = plane->dev->dev_private; + const struct exynos_drm_plane_config *config = exynos_plane->config; + + if (property == dev_priv->plane_zpos_property && + (config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS)) + exynos_state->zpos = val; + else + return -EINVAL; + + return 0; +} + +static int exynos_drm_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + const struct exynos_drm_plane_state *exynos_state = + container_of(state, const struct exynos_drm_plane_state, base); + struct exynos_drm_private *dev_priv = plane->dev->dev_private; + + if (property == dev_priv->plane_zpos_property) + *val = exynos_state->zpos; + else + return -EINVAL; + + return 0; } static struct drm_plane_funcs exynos_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .set_property = drm_atomic_helper_plane_set_property, + .reset = exynos_drm_plane_reset, + .atomic_duplicate_state = exynos_drm_plane_duplicate_state, + .atomic_destroy_state = exynos_drm_plane_destroy_state, + .atomic_set_property = exynos_drm_plane_atomic_set_property, + .atomic_get_property = exynos_drm_plane_atomic_get_property, }; +static int +exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config, + struct exynos_drm_plane_state *state) +{ + bool width_ok = false, height_ok = false; + + if (config->capabilities & EXYNOS_DRM_PLANE_CAP_SCALE) + return 0; + + if (state->src.w == state->crtc.w) + width_ok = true; + + if (state->src.h == state->crtc.h) + height_ok = true; + + if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) && + state->h_ratio == (1 << 15)) + width_ok = true; + + if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) && + state->v_ratio == (1 << 15)) + height_ok = true; + + if (width_ok & height_ok) + return 0; + + DRM_DEBUG_KMS("scaling mode is not supported"); + return -ENOTSUPP; +} + static int exynos_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); - int nr; - int i; + struct exynos_drm_plane_state *exynos_state = + to_exynos_plane_state(state); + int ret = 0; - if (!state->fb) + if (!state->crtc || !state->fb) return 0; - nr = drm_format_num_planes(state->fb->pixel_format); - for (i = 0; i < nr; i++) { - struct exynos_drm_gem *exynos_gem = - exynos_drm_fb_gem(state->fb, i); - if (!exynos_gem) { - DRM_DEBUG_KMS("gem object is null\n"); - return -EFAULT; - } + /* translate state into exynos_state */ + exynos_plane_mode_set(exynos_state); - exynos_plane->dma_addr[i] = exynos_gem->dma_addr + - state->fb->offsets[i]; - - DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", - i, (unsigned long)exynos_plane->dma_addr[i]); - } - - return 0; + ret = exynos_drm_plane_check_size(exynos_plane->config, exynos_state); + return ret; } static void exynos_plane_atomic_update(struct drm_plane *plane, @@ -155,12 +275,7 @@ static void exynos_plane_atomic_update(struct drm_plane *plane, if (!state->crtc) return; - exynos_plane_mode_set(plane, state->crtc, state->fb, - state->crtc_x, state->crtc_y, - state->crtc_w, state->crtc_h, - state->src_x >> 16, state->src_y >> 16, - state->src_w >> 16, state->src_h >> 16); - + plane->crtc = state->crtc; exynos_plane->pending_fb = state->fb; if (exynos_crtc->ops->update_plane) @@ -177,8 +292,7 @@ static void exynos_plane_atomic_disable(struct drm_plane *plane, return; if (exynos_crtc->ops->disable_plane) - exynos_crtc->ops->disable_plane(exynos_crtc, - exynos_plane); + exynos_crtc->ops->disable_plane(exynos_crtc, exynos_plane); } static const struct drm_plane_helper_funcs plane_helper_funcs = { @@ -196,8 +310,8 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane, prop = dev_priv->plane_zpos_property; if (!prop) { - prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, - "zpos", 0, MAX_PLANE - 1); + prop = drm_property_create_range(dev, 0, "zpos", + 0, MAX_PLANE - 1); if (!prop) return; @@ -207,28 +321,19 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane, drm_object_attach_property(&plane->base, prop, zpos); } -enum drm_plane_type exynos_plane_get_type(unsigned int zpos, - unsigned int cursor_win) -{ - if (zpos == DEFAULT_WIN) - return DRM_PLANE_TYPE_PRIMARY; - else if (zpos == cursor_win) - return DRM_PLANE_TYPE_CURSOR; - else - return DRM_PLANE_TYPE_OVERLAY; -} - int exynos_plane_init(struct drm_device *dev, struct exynos_drm_plane *exynos_plane, - unsigned long possible_crtcs, enum drm_plane_type type, - const uint32_t *formats, unsigned int fcount, - unsigned int zpos) + unsigned int index, unsigned long possible_crtcs, + const struct exynos_drm_plane_config *config) { int err; - err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs, - &exynos_plane_funcs, formats, fcount, - type); + err = drm_universal_plane_init(dev, &exynos_plane->base, + possible_crtcs, + &exynos_plane_funcs, + config->pixel_formats, + config->num_pixel_formats, + config->type, NULL); if (err) { DRM_ERROR("failed to initialize plane\n"); return err; @@ -236,10 +341,10 @@ int exynos_plane_init(struct drm_device *dev, drm_plane_helper_add(&exynos_plane->base, &plane_helper_funcs); - exynos_plane->zpos = zpos; + exynos_plane->index = index; + exynos_plane->config = config; - if (type == DRM_PLANE_TYPE_OVERLAY) - exynos_plane_attach_zpos_property(&exynos_plane->base, zpos); + exynos_plane_attach_zpos_property(&exynos_plane->base, config->zpos); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h index abb641e64c23..9aafad164cdf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.h +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h @@ -9,10 +9,7 @@ * */ -enum drm_plane_type exynos_plane_get_type(unsigned int zpos, - unsigned int cursor_win); int exynos_plane_init(struct drm_device *dev, - struct exynos_drm_plane *exynos_plane, - unsigned long possible_crtcs, enum drm_plane_type type, - const uint32_t *formats, unsigned int fcount, - unsigned int zpos); + struct exynos_drm_plane *exynos_plane, unsigned int index, + unsigned long possible_crtcs, + const struct exynos_drm_plane_config *config); diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 2f5c118f4c8e..bea0f7826d30 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -790,10 +790,10 @@ static int rotator_remove(struct platform_device *pdev) static int rotator_clk_crtl(struct rot_context *rot, bool enable) { if (enable) { - clk_enable(rot->clock); + clk_prepare_enable(rot->clock); rot->suspended = false; } else { - clk_disable(rot->clock); + clk_disable_unprepare(rot->clock); rot->suspended = true; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 669362c53f49..62ac4e5fa51d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -24,12 +24,12 @@ #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" +#include "exynos_drm_fb.h" #include "exynos_drm_plane.h" #include "exynos_drm_vidi.h" /* vidi has totally three virtual windows. */ #define WINDOWS_NR 3 -#define CURSOR_WIN 2 #define ctx_from_connector(c) container_of(c, struct vidi_context, \ connector) @@ -89,6 +89,12 @@ static const uint32_t formats[] = { DRM_FORMAT_NV12, }; +static const enum drm_plane_type vidi_win_types[WINDOWS_NR] = { + DRM_PLANE_TYPE_PRIMARY, + DRM_PLANE_TYPE_OVERLAY, + DRM_PLANE_TYPE_CURSOR, +}; + static int vidi_enable_vblank(struct exynos_drm_crtc *crtc) { struct vidi_context *ctx = crtc->ctx; @@ -125,12 +131,15 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc) static void vidi_update_plane(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane) { + struct drm_plane_state *state = plane->base.state; struct vidi_context *ctx = crtc->ctx; + dma_addr_t addr; if (ctx->suspended) return; - DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr); + addr = exynos_drm_fb_dma_addr(state->fb, 0); + DRM_DEBUG_KMS("dma_addr = %pad\n", &addr); if (ctx->vblank_on) schedule_work(&ctx->work); @@ -330,7 +339,7 @@ static void vidi_connector_destroy(struct drm_connector *connector) { } -static struct drm_connector_funcs vidi_connector_funcs = { +static const struct drm_connector_funcs vidi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = vidi_detect, @@ -374,7 +383,7 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector) return &ctx->encoder; } -static struct drm_connector_helper_funcs vidi_connector_helper_funcs = { +static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { .get_modes = vidi_get_modes, .best_encoder = vidi_best_encoder, }; @@ -422,14 +431,14 @@ static void exynos_vidi_disable(struct drm_encoder *encoder) { } -static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = { +static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = { .mode_fixup = exynos_vidi_mode_fixup, .mode_set = exynos_vidi_mode_set, .enable = exynos_vidi_enable, .disable = exynos_vidi_disable, }; -static struct drm_encoder_funcs exynos_vidi_encoder_funcs = { +static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; @@ -439,17 +448,21 @@ static int vidi_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm_dev = data; struct drm_encoder *encoder = &ctx->encoder; struct exynos_drm_plane *exynos_plane; - enum drm_plane_type type; - unsigned int zpos; + struct exynos_drm_plane_config plane_config = { 0 }; + unsigned int i; int pipe, ret; vidi_ctx_initialize(ctx, drm_dev); - for (zpos = 0; zpos < WINDOWS_NR; zpos++) { - type = exynos_plane_get_type(zpos, CURSOR_WIN); - ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], - 1 << ctx->pipe, type, formats, - ARRAY_SIZE(formats), zpos); + plane_config.pixel_formats = formats; + plane_config.num_pixel_formats = ARRAY_SIZE(formats); + + for (i = 0; i < WINDOWS_NR; i++) { + plane_config.zpos = i; + plane_config.type = vidi_win_types[i]; + + ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, + 1 << ctx->pipe, &plane_config); if (ret) return ret; } @@ -473,7 +486,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data) DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 57b675563e94..21a29dbce18c 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -113,7 +113,7 @@ struct hdmi_context { void __iomem *regs_hdmiphy; struct i2c_client *hdmiphy_port; struct i2c_adapter *ddc_adpt; - struct gpio_desc *hpd_gpio; + struct gpio_desc *hpd_gpio; int irq; struct regmap *pmureg; struct clk *hdmi; @@ -956,7 +956,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs hdmi_connector_funcs = { +static const struct drm_connector_funcs hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = hdmi_detect, @@ -1030,7 +1030,7 @@ static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector) return &hdata->encoder; } -static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { +static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .get_modes = hdmi_get_modes, .mode_valid = hdmi_mode_valid, .best_encoder = hdmi_best_encoder, @@ -1588,8 +1588,6 @@ static void hdmi_enable(struct drm_encoder *encoder) if (hdata->powered) return; - hdata->powered = true; - pm_runtime_get_sync(hdata->dev); if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk)) @@ -1599,10 +1597,9 @@ static void hdmi_enable(struct drm_encoder *encoder) regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, PMU_HDMI_PHY_ENABLE_BIT, 1); - clk_prepare_enable(hdata->hdmi); - clk_prepare_enable(hdata->sclk_hdmi); - hdmi_conf_apply(hdata); + + hdata->powered = true; } static void hdmi_disable(struct drm_encoder *encoder) @@ -1633,9 +1630,6 @@ static void hdmi_disable(struct drm_encoder *encoder) cancel_delayed_work(&hdata->hotplug_work); - clk_disable_unprepare(hdata->sclk_hdmi); - clk_disable_unprepare(hdata->hdmi); - /* reset pmu hdmiphy control bit to disable hdmiphy */ regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, PMU_HDMI_PHY_ENABLE_BIT, 0); @@ -1647,14 +1641,14 @@ static void hdmi_disable(struct drm_encoder *encoder) hdata->powered = false; } -static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { +static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { .mode_fixup = hdmi_mode_fixup, .mode_set = hdmi_mode_set, .enable = hdmi_enable, .disable = hdmi_disable, }; -static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = { +static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; @@ -1793,7 +1787,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs); @@ -1978,12 +1972,49 @@ static int hdmi_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int exynos_hdmi_suspend(struct device *dev) +{ + struct hdmi_context *hdata = dev_get_drvdata(dev); + + clk_disable_unprepare(hdata->sclk_hdmi); + clk_disable_unprepare(hdata->hdmi); + + return 0; +} + +static int exynos_hdmi_resume(struct device *dev) +{ + struct hdmi_context *hdata = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(hdata->hdmi); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret); + return ret; + } + ret = clk_prepare_enable(hdata->sclk_hdmi); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the sclk_mixer clk [%d]\n", + ret); + return ret; + } + + return 0; +} +#endif + +static const struct dev_pm_ops exynos_hdmi_pm_ops = { + SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL) +}; + struct platform_driver hdmi_driver = { .probe = hdmi_probe, .remove = hdmi_remove, .driver = { .name = "exynos-hdmi", .owner = THIS_MODULE, + .pm = &exynos_hdmi_pm_ops, .of_match_table = hdmi_match_types, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index d09f8f9a8939..b5fbc1cbf024 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -37,12 +37,12 @@ #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" +#include "exynos_drm_fb.h" #include "exynos_drm_plane.h" #include "exynos_drm_iommu.h" #define MIXER_WIN_NR 3 #define VP_DEFAULT_WIN 2 -#define CURSOR_WIN 1 /* The pixelformats that are natively supported by the mixer. */ #define MXR_FORMAT_RGB565 4 @@ -76,7 +76,9 @@ enum mixer_flag_bits { static const uint32_t mixer_formats[] = { DRM_FORMAT_XRGB4444, + DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555, + DRM_FORMAT_ARGB1555, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, @@ -111,6 +113,31 @@ struct mixer_drv_data { bool has_sclk; }; +static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = { + { + .zpos = 0, + .type = DRM_PLANE_TYPE_PRIMARY, + .pixel_formats = mixer_formats, + .num_pixel_formats = ARRAY_SIZE(mixer_formats), + .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE | + EXYNOS_DRM_PLANE_CAP_ZPOS, + }, { + .zpos = 1, + .type = DRM_PLANE_TYPE_CURSOR, + .pixel_formats = mixer_formats, + .num_pixel_formats = ARRAY_SIZE(mixer_formats), + .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE | + EXYNOS_DRM_PLANE_CAP_ZPOS, + }, { + .zpos = 2, + .type = DRM_PLANE_TYPE_OVERLAY, + .pixel_formats = vp_formats, + .num_pixel_formats = ARRAY_SIZE(vp_formats), + .capabilities = EXYNOS_DRM_PLANE_CAP_SCALE | + EXYNOS_DRM_PLANE_CAP_ZPOS, + }, +}; + static const u8 filter_y_horiz_tap8[] = { 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, @@ -140,6 +167,18 @@ static const u8 filter_cr_horiz_tap4[] = { 70, 59, 48, 37, 27, 19, 11, 5, }; +static inline bool is_alpha_format(unsigned int pixel_format) +{ + switch (pixel_format) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ARGB4444: + return true; + default: + return false; + } +} + static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id) { return readl(res->vp_regs + reg_id); @@ -269,6 +308,37 @@ static void vp_default_filter(struct mixer_resources *res) filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4)); } +static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win, + bool alpha) +{ + struct mixer_resources *res = &ctx->mixer_res; + u32 val; + + val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ + if (alpha) { + /* blending based on pixel alpha */ + val |= MXR_GRP_CFG_BLEND_PRE_MUL; + val |= MXR_GRP_CFG_PIXEL_BLEND_EN; + } + mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win), + val, MXR_GRP_CFG_MISC_MASK); +} + +static void mixer_cfg_vp_blend(struct mixer_context *ctx) +{ + struct mixer_resources *res = &ctx->mixer_res; + u32 val; + + /* + * No blending at the moment since the NV12/NV21 pixelformats don't + * have an alpha channel. However the mixer supports a global alpha + * value for a layer. Once this functionality is exposed, we can + * support blending of the video layer through this. + */ + val = 0; + mixer_reg_write(res, MXR_VIDEO_CFG, val); +} + static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) { struct mixer_resources *res = &ctx->mixer_res; @@ -350,7 +420,7 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height) } static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win, - bool enable) + unsigned int priority, bool enable) { struct mixer_resources *res = &ctx->mixer_res; u32 val = enable ? ~0 : 0; @@ -358,20 +428,24 @@ static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win, switch (win) { case 0: mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE); + mixer_reg_writemask(res, MXR_LAYER_CFG, + MXR_LAYER_CFG_GRP0_VAL(priority), + MXR_LAYER_CFG_GRP0_MASK); break; case 1: mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); + mixer_reg_writemask(res, MXR_LAYER_CFG, + MXR_LAYER_CFG_GRP1_VAL(priority), + MXR_LAYER_CFG_GRP1_MASK); break; - case 2: + case VP_DEFAULT_WIN: if (ctx->vp_enabled) { vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE); - - /* control blending of graphic layer 0 */ - mixer_reg_writemask(res, MXR_GRAPHIC_CFG(0), val, - MXR_GRP_CFG_BLEND_PRE_MUL | - MXR_GRP_CFG_PIXEL_BLEND_EN); + mixer_reg_writemask(res, MXR_LAYER_CFG, + MXR_LAYER_CFG_VP_VAL(priority), + MXR_LAYER_CFG_VP_MASK); } break; } @@ -399,10 +473,11 @@ static void mixer_stop(struct mixer_context *ctx) static void vp_video_buffer(struct mixer_context *ctx, struct exynos_drm_plane *plane) { + struct exynos_drm_plane_state *state = + to_exynos_plane_state(plane->base.state); + struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode; struct mixer_resources *res = &ctx->mixer_res; - struct drm_plane_state *state = plane->base.state; - struct drm_framebuffer *fb = state->fb; - struct drm_display_mode *mode = &state->crtc->mode; + struct drm_framebuffer *fb = state->base.fb; unsigned long flags; dma_addr_t luma_addr[2], chroma_addr[2]; bool tiled_mode = false; @@ -422,8 +497,8 @@ static void vp_video_buffer(struct mixer_context *ctx, return; } - luma_addr[0] = plane->dma_addr[0]; - chroma_addr[0] = plane->dma_addr[1]; + luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0); + chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1); if (mode->flags & DRM_MODE_FLAG_INTERLACE) { ctx->interlace = true; @@ -441,7 +516,6 @@ static void vp_video_buffer(struct mixer_context *ctx, } spin_lock_irqsave(&res->reg_slock, flags); - mixer_vsync_set_update(ctx, false); /* interlace or progressive scan mode */ val = (ctx->interlace ? ~0 : 0); @@ -459,24 +533,24 @@ static void vp_video_buffer(struct mixer_context *ctx, vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | VP_IMG_VSIZE(fb->height / 2)); - vp_reg_write(res, VP_SRC_WIDTH, plane->src_w); - vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h); + vp_reg_write(res, VP_SRC_WIDTH, state->src.w); + vp_reg_write(res, VP_SRC_HEIGHT, state->src.h); vp_reg_write(res, VP_SRC_H_POSITION, - VP_SRC_H_POSITION_VAL(plane->src_x)); - vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y); + VP_SRC_H_POSITION_VAL(state->src.x)); + vp_reg_write(res, VP_SRC_V_POSITION, state->src.y); - vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w); - vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x); + vp_reg_write(res, VP_DST_WIDTH, state->crtc.w); + vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x); if (ctx->interlace) { - vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2); - vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2); + vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2); + vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2); } else { - vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h); - vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y); + vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h); + vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y); } - vp_reg_write(res, VP_H_RATIO, plane->h_ratio); - vp_reg_write(res, VP_V_RATIO, plane->v_ratio); + vp_reg_write(res, VP_H_RATIO, state->h_ratio); + vp_reg_write(res, VP_V_RATIO, state->v_ratio); vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE); @@ -488,10 +562,10 @@ static void vp_video_buffer(struct mixer_context *ctx, mixer_cfg_scan(ctx, mode->vdisplay); mixer_cfg_rgb_fmt(ctx, mode->vdisplay); - mixer_cfg_layer(ctx, plane->zpos, true); + mixer_cfg_layer(ctx, plane->index, state->zpos + 1, true); + mixer_cfg_vp_blend(ctx); mixer_run(ctx); - mixer_vsync_set_update(ctx, true); spin_unlock_irqrestore(&res->reg_slock, flags); mixer_regs_dump(ctx); @@ -505,39 +579,16 @@ static void mixer_layer_update(struct mixer_context *ctx) mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); } -static int mixer_setup_scale(const struct exynos_drm_plane *plane, - unsigned int *x_ratio, unsigned int *y_ratio) -{ - if (plane->crtc_w != plane->src_w) { - if (plane->crtc_w == 2 * plane->src_w) - *x_ratio = 1; - else - goto fail; - } - - if (plane->crtc_h != plane->src_h) { - if (plane->crtc_h == 2 * plane->src_h) - *y_ratio = 1; - else - goto fail; - } - - return 0; - -fail: - DRM_DEBUG_KMS("only 2x width/height scaling of plane supported\n"); - return -ENOTSUPP; -} - static void mixer_graph_buffer(struct mixer_context *ctx, struct exynos_drm_plane *plane) { + struct exynos_drm_plane_state *state = + to_exynos_plane_state(plane->base.state); + struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode; struct mixer_resources *res = &ctx->mixer_res; - struct drm_plane_state *state = plane->base.state; - struct drm_framebuffer *fb = state->fb; - struct drm_display_mode *mode = &state->crtc->mode; + struct drm_framebuffer *fb = state->base.fb; unsigned long flags; - unsigned int win = plane->zpos; + unsigned int win = plane->index; unsigned int x_ratio = 0, y_ratio = 0; unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset; dma_addr_t dma_addr; @@ -546,10 +597,12 @@ static void mixer_graph_buffer(struct mixer_context *ctx, switch (fb->pixel_format) { case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB4444: fmt = MXR_FORMAT_ARGB4444; break; case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: fmt = MXR_FORMAT_ARGB1555; break; @@ -567,17 +620,17 @@ static void mixer_graph_buffer(struct mixer_context *ctx, return; } - /* check if mixer supports requested scaling setup */ - if (mixer_setup_scale(plane, &x_ratio, &y_ratio)) - return; + /* ratio is already checked by common plane code */ + x_ratio = state->h_ratio == (1 << 15); + y_ratio = state->v_ratio == (1 << 15); - dst_x_offset = plane->crtc_x; - dst_y_offset = plane->crtc_y; + dst_x_offset = state->crtc.x; + dst_y_offset = state->crtc.y; /* converting dma address base and source offset */ - dma_addr = plane->dma_addr[0] - + (plane->src_x * fb->bits_per_pixel >> 3) - + (plane->src_y * fb->pitches[0]); + dma_addr = exynos_drm_fb_dma_addr(fb, 0) + + (state->src.x * fb->bits_per_pixel >> 3) + + (state->src.y * fb->pitches[0]); src_x_offset = 0; src_y_offset = 0; @@ -587,7 +640,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx, ctx->interlace = false; spin_lock_irqsave(&res->reg_slock, flags); - mixer_vsync_set_update(ctx, false); /* setup format */ mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win), @@ -605,8 +657,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, mixer_reg_write(res, MXR_RESOLUTION, val); } - val = MXR_GRP_WH_WIDTH(plane->src_w); - val |= MXR_GRP_WH_HEIGHT(plane->src_h); + val = MXR_GRP_WH_WIDTH(state->src.w); + val |= MXR_GRP_WH_HEIGHT(state->src.h); val |= MXR_GRP_WH_H_SCALE(x_ratio); val |= MXR_GRP_WH_V_SCALE(y_ratio); mixer_reg_write(res, MXR_GRAPHIC_WH(win), val); @@ -626,7 +678,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, mixer_cfg_scan(ctx, mode->vdisplay); mixer_cfg_rgb_fmt(ctx, mode->vdisplay); - mixer_cfg_layer(ctx, win, true); + mixer_cfg_layer(ctx, win, state->zpos + 1, true); + mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->pixel_format)); /* layer update mandatory for mixer 16.0.33.0 */ if (ctx->mxr_ver == MXR_VER_16_0_33_0 || @@ -635,7 +688,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx, mixer_run(ctx); - mixer_vsync_set_update(ctx, true); spin_unlock_irqrestore(&res->reg_slock, flags); mixer_regs_dump(ctx); @@ -660,10 +712,8 @@ static void mixer_win_reset(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; unsigned long flags; - u32 val; /* value stored to register */ spin_lock_irqsave(&res->reg_slock, flags); - mixer_vsync_set_update(ctx, false); mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK); @@ -674,40 +724,14 @@ static void mixer_win_reset(struct mixer_context *ctx) mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST, MXR_STATUS_BURST_MASK); - /* setting default layer priority: layer1 > layer0 > video - * because typical usage scenario would be - * layer1 - OSD - * layer0 - framebuffer - * video - video overlay - */ - val = MXR_LAYER_CFG_GRP1_VAL(3); - val |= MXR_LAYER_CFG_GRP0_VAL(2); - if (ctx->vp_enabled) - val |= MXR_LAYER_CFG_VP_VAL(1); - mixer_reg_write(res, MXR_LAYER_CFG, val); + /* reset default layer priority */ + mixer_reg_write(res, MXR_LAYER_CFG, 0); /* setting background color */ mixer_reg_write(res, MXR_BG_COLOR0, 0x008080); mixer_reg_write(res, MXR_BG_COLOR1, 0x008080); mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); - /* setting graphical layers */ - val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ - val |= MXR_GRP_CFG_WIN_BLEND_EN; - val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ - - /* Don't blend layer 0 onto the mixer background */ - mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); - - /* Blend layer 1 into layer 0 */ - val |= MXR_GRP_CFG_BLEND_PRE_MUL; - val |= MXR_GRP_CFG_PIXEL_BLEND_EN; - mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); - - /* setting video layers */ - val = MXR_GRP_CFG_ALPHA_VAL(0); - mixer_reg_write(res, MXR_VIDEO_CFG, val); - if (ctx->vp_enabled) { /* configuration of Video Processor Registers */ vp_win_reset(ctx); @@ -720,7 +744,6 @@ static void mixer_win_reset(struct mixer_context *ctx) if (ctx->vp_enabled) mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); - mixer_vsync_set_update(ctx, true); spin_unlock_irqrestore(&res->reg_slock, flags); } @@ -951,17 +974,27 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); } +static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) +{ + struct mixer_context *mixer_ctx = crtc->ctx; + + if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) + return; + + mixer_vsync_set_update(mixer_ctx, false); +} + static void mixer_update_plane(struct exynos_drm_crtc *crtc, struct exynos_drm_plane *plane) { struct mixer_context *mixer_ctx = crtc->ctx; - DRM_DEBUG_KMS("win: %d\n", plane->zpos); + DRM_DEBUG_KMS("win: %d\n", plane->index); if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - if (plane->zpos > 1 && mixer_ctx->vp_enabled) + if (plane->index == VP_DEFAULT_WIN) vp_video_buffer(mixer_ctx, plane); else mixer_graph_buffer(mixer_ctx, plane); @@ -974,18 +1007,24 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc, struct mixer_resources *res = &mixer_ctx->mixer_res; unsigned long flags; - DRM_DEBUG_KMS("win: %d\n", plane->zpos); + DRM_DEBUG_KMS("win: %d\n", plane->index); if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; spin_lock_irqsave(&res->reg_slock, flags); - mixer_vsync_set_update(mixer_ctx, false); + mixer_cfg_layer(mixer_ctx, plane->index, 0, false); + spin_unlock_irqrestore(&res->reg_slock, flags); +} - mixer_cfg_layer(mixer_ctx, plane->zpos, false); +static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) +{ + struct mixer_context *mixer_ctx = crtc->ctx; + + if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) + return; mixer_vsync_set_update(mixer_ctx, true); - spin_unlock_irqrestore(&res->reg_slock, flags); } static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc) @@ -1020,42 +1059,13 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) { struct mixer_context *ctx = crtc->ctx; struct mixer_resources *res = &ctx->mixer_res; - int ret; if (test_bit(MXR_BIT_POWERED, &ctx->flags)) return; pm_runtime_get_sync(ctx->dev); - ret = clk_prepare_enable(res->mixer); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret); - return; - } - ret = clk_prepare_enable(res->hdmi); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret); - return; - } - if (ctx->vp_enabled) { - ret = clk_prepare_enable(res->vp); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n", - ret); - return; - } - if (ctx->has_sclk) { - ret = clk_prepare_enable(res->sclk_mixer); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the " \ - "sclk_mixer clk [%d]\n", - ret); - return; - } - } - } - - set_bit(MXR_BIT_POWERED, &ctx->flags); + mixer_vsync_set_update(ctx, false); mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); @@ -1064,12 +1074,15 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC); } mixer_win_reset(ctx); + + mixer_vsync_set_update(ctx, true); + + set_bit(MXR_BIT_POWERED, &ctx->flags); } static void mixer_disable(struct exynos_drm_crtc *crtc) { struct mixer_context *ctx = crtc->ctx; - struct mixer_resources *res = &ctx->mixer_res; int i; if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) @@ -1081,17 +1094,9 @@ static void mixer_disable(struct exynos_drm_crtc *crtc) for (i = 0; i < MIXER_WIN_NR; i++) mixer_disable_plane(crtc, &ctx->planes[i]); + pm_runtime_put(ctx->dev); + clear_bit(MXR_BIT_POWERED, &ctx->flags); - - clk_disable_unprepare(res->hdmi); - clk_disable_unprepare(res->mixer); - if (ctx->vp_enabled) { - clk_disable_unprepare(res->vp); - if (ctx->has_sclk) - clk_disable_unprepare(res->sclk_mixer); - } - - pm_runtime_put_sync(ctx->dev); } /* Only valid for Mixer version 16.0.33.0 */ @@ -1122,8 +1127,10 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = { .enable_vblank = mixer_enable_vblank, .disable_vblank = mixer_disable_vblank, .wait_for_vblank = mixer_wait_for_vblank, + .atomic_begin = mixer_atomic_begin, .update_plane = mixer_update_plane, .disable_plane = mixer_disable_plane, + .atomic_flush = mixer_atomic_flush, .atomic_check = mixer_atomic_check, }; @@ -1187,30 +1194,19 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data) struct mixer_context *ctx = dev_get_drvdata(dev); struct drm_device *drm_dev = data; struct exynos_drm_plane *exynos_plane; - unsigned int zpos; + unsigned int i; int ret; ret = mixer_initialize(ctx, drm_dev); if (ret) return ret; - for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) { - enum drm_plane_type type; - const uint32_t *formats; - unsigned int fcount; + for (i = 0; i < MIXER_WIN_NR; i++) { + if (i == VP_DEFAULT_WIN && !ctx->vp_enabled) + continue; - if (zpos < VP_DEFAULT_WIN) { - formats = mixer_formats; - fcount = ARRAY_SIZE(mixer_formats); - } else { - formats = vp_formats; - fcount = ARRAY_SIZE(vp_formats); - } - - type = exynos_plane_get_type(zpos, CURSOR_WIN); - ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], - 1 << ctx->pipe, type, formats, fcount, - zpos); + ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, + 1 << ctx->pipe, &plane_configs[i]); if (ret) return ret; } @@ -1293,10 +1289,70 @@ static int mixer_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int exynos_mixer_suspend(struct device *dev) +{ + struct mixer_context *ctx = dev_get_drvdata(dev); + struct mixer_resources *res = &ctx->mixer_res; + + clk_disable_unprepare(res->hdmi); + clk_disable_unprepare(res->mixer); + if (ctx->vp_enabled) { + clk_disable_unprepare(res->vp); + if (ctx->has_sclk) + clk_disable_unprepare(res->sclk_mixer); + } + + return 0; +} + +static int exynos_mixer_resume(struct device *dev) +{ + struct mixer_context *ctx = dev_get_drvdata(dev); + struct mixer_resources *res = &ctx->mixer_res; + int ret; + + ret = clk_prepare_enable(res->mixer); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret); + return ret; + } + ret = clk_prepare_enable(res->hdmi); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret); + return ret; + } + if (ctx->vp_enabled) { + ret = clk_prepare_enable(res->vp); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n", + ret); + return ret; + } + if (ctx->has_sclk) { + ret = clk_prepare_enable(res->sclk_mixer); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the " \ + "sclk_mixer clk [%d]\n", + ret); + return ret; + } + } + } + + return 0; +} +#endif + +static const struct dev_pm_ops exynos_mixer_pm_ops = { + SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL) +}; + struct platform_driver mixer_driver = { .driver = { .name = "exynos-mixer", .owner = THIS_MODULE, + .pm = &exynos_mixer_pm_ops, .of_match_table = mixer_match_types, }, .probe = mixer_probe, diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h index 9ad592707aaf..4704a993cbb7 100644 --- a/drivers/gpu/drm/exynos/regs-gsc.h +++ b/drivers/gpu/drm/exynos/regs-gsc.h @@ -273,12 +273,12 @@ #define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0) /* SYSCON. GSCBLK_CFG */ -#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224) +#define SYSREG_GSCBLK_CFG1 0x0224 #define GSC_BLK_DISP1WB_DEST(x) (x << 10) #define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x)) #define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x)) #define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x)) -#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000) +#define SYSREG_GSCBLK_CFG2 0x2000 #define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x)) #endif /* EXYNOS_REGS_GSC_H_ */ diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h index ac60260c2389..7f22df5bf707 100644 --- a/drivers/gpu/drm/exynos/regs-mixer.h +++ b/drivers/gpu/drm/exynos/regs-mixer.h @@ -113,6 +113,7 @@ #define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20) #define MXR_GRP_CFG_WIN_BLEND_EN (1 << 17) #define MXR_GRP_CFG_PIXEL_BLEND_EN (1 << 16) +#define MXR_GRP_CFG_MISC_MASK ((3 << 16) | (3 << 20)) #define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8) #define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0) #define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0) @@ -145,8 +146,11 @@ /* bit for MXR_LAYER_CFG */ #define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8) +#define MXR_LAYER_CFG_GRP1_MASK MXR_LAYER_CFG_GRP1_VAL(~0) #define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4) +#define MXR_LAYER_CFG_GRP0_MASK MXR_LAYER_CFG_GRP0_VAL(~0) #define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0) +#define MXR_LAYER_CFG_VP_MASK MXR_LAYER_CFG_VP_VAL(~0) #endif /* SAMSUNG_REGS_MIXER_H */ diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 82a3d311e164..d8ab8f0af10c 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -175,7 +175,7 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL, - &fsl_dcu_drm_crtc_funcs); + &fsl_dcu_drm_crtc_funcs, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 1930234ba5f1..fca97d3fc846 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -363,7 +363,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) fsl_dev->np = dev->of_node; drm->dev_private = fsl_dev; dev_set_drvdata(dev, fsl_dev); - drm_dev_set_unique(drm, dev_name(dev)); ret = drm_dev_register(drm, 0); if (ret < 0) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 51daaea40b4d..4b13cf919575 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -249,7 +249,7 @@ struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) &fsl_dcu_drm_plane_funcs, fsl_dcu_drm_plane_formats, ARRAY_SIZE(fsl_dcu_drm_plane_formats), - DRM_PLANE_TYPE_PRIMARY); + DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); primary = NULL; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index fe8ab5da04fb..8780deba5e8a 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -57,7 +57,7 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, encoder->possible_crtcs = 1; ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 3531f90e53d0..8745971a7680 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -619,6 +619,8 @@ const struct psb_ops cdv_chip_ops = { .init_pm = cdv_init_pm, .save_regs = cdv_save_display_registers, .restore_regs = cdv_restore_display_registers, + .save_crtc = gma_crtc_save, + .restore_crtc = gma_crtc_restore, .power_down = cdv_power_down, .power_up = cdv_power_up, .update_wm = cdv_update_wm, diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index 248c33a35ebf..d0717a85c7ec 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -273,7 +273,7 @@ void cdv_intel_crt_init(struct drm_device *dev, encoder = &gma_encoder->base; drm_encoder_init(dev, encoder, - &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); + &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 7d47b3d5cc0d..6126546295e9 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -983,8 +983,6 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { }; const struct drm_crtc_funcs cdv_intel_crtc_funcs = { - .save = gma_crtc_save, - .restore = gma_crtc_restore, .cursor_set = gma_crtc_cursor_set, .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 17cea400ae32..7bb1f1aff932 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -2020,7 +2020,8 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev encoder = &gma_encoder->base; drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); - drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, + DRM_MODE_ENCODER_TMDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 6b1d3340ba14..ddf2d7700759 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -270,8 +270,6 @@ static const struct drm_connector_helper_funcs static const struct drm_connector_funcs cdv_hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = cdv_hdmi_save, - .restore = cdv_hdmi_restore, .detect = cdv_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = cdv_hdmi_set_property, @@ -306,13 +304,16 @@ void cdv_hdmi_init(struct drm_device *dev, connector = &gma_connector->base; connector->polled = DRM_CONNECTOR_POLL_HPD; + gma_connector->save = cdv_hdmi_save; + gma_connector->restore = cdv_hdmi_restore; + encoder = &gma_encoder->base; drm_connector_init(dev, connector, &cdv_hdmi_connector_funcs, DRM_MODE_CONNECTOR_DVID); drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_HDMI; diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 211069b2b951..813ef23a8054 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -530,8 +530,6 @@ static const struct drm_connector_helper_funcs static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = cdv_intel_lvds_save, - .restore = cdv_intel_lvds_restore, .detect = cdv_intel_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = cdv_intel_lvds_set_property, @@ -643,6 +641,8 @@ void cdv_intel_lvds_init(struct drm_device *dev, gma_encoder->dev_priv = lvds_priv; connector = &gma_connector->base; + gma_connector->save = cdv_intel_lvds_save; + gma_connector->restore = cdv_intel_lvds_restore; encoder = &gma_encoder->base; @@ -652,7 +652,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &cdv_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c index 265ad0de44a6..e2ab858122f9 100644 --- a/drivers/gpu/drm/gma500/mdfld_device.c +++ b/drivers/gpu/drm/gma500/mdfld_device.c @@ -546,6 +546,8 @@ const struct psb_ops mdfld_chip_ops = { .save_regs = mdfld_save_registers, .restore_regs = mdfld_restore_registers, + .save_crtc = gma_crtc_save, + .restore_crtc = gma_crtc_restore, .power_down = mdfld_power_down, .power_up = mdfld_power_up, }; diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c index d4813e03f5ee..7cd87a0c2385 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c @@ -821,14 +821,18 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder, struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); - u32 pipeconf_reg = PIPEACONF; u32 dspcntr_reg = DSPACNTR; + u32 pipeconf, dspcntr; - u32 pipeconf = dev_priv->pipeconf[pipe]; - u32 dspcntr = dev_priv->dspcntr[pipe]; u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; + if (WARN_ON(pipe < 0)) + return; + + pipeconf = dev_priv->pipeconf[pipe]; + dspcntr = dev_priv->dspcntr[pipe]; + if (pipe) { pipeconf_reg = PIPECCONF; dspcntr_reg = DSPCCNTR; @@ -994,7 +998,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, drm_encoder_init(dev, encoder, p_funcs->encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 89f705c3a5eb..d758f4cc6805 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -405,8 +405,6 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder( /*DSI connector funcs*/ static const struct drm_connector_funcs mdfld_dsi_connector_funcs = { .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms, - .save = mdfld_dsi_connector_save, - .restore = mdfld_dsi_connector_restore, .detect = mdfld_dsi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = mdfld_dsi_connector_set_property, @@ -563,6 +561,9 @@ void mdfld_dsi_output_init(struct drm_device *dev, connector = &dsi_connector->base.base; + dsi_connector->base.save = mdfld_dsi_connector_save; + dsi_connector->base.restore = mdfld_dsi_connector_restore; + drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, DRM_MODE_CONNECTOR_LVDS); drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c index 368a03ae3010..ba30b43a3412 100644 --- a/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/drivers/gpu/drm/gma500/oaktrail_device.c @@ -568,6 +568,8 @@ const struct psb_ops oaktrail_chip_ops = { .save_regs = oaktrail_save_display_registers, .restore_regs = oaktrail_restore_display_registers, + .save_crtc = gma_crtc_save, + .restore_crtc = gma_crtc_restore, .power_down = oaktrail_power_down, .power_up = oaktrail_power_up, diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 2310d879cdc2..2d18499d6060 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -654,7 +654,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &oaktrail_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 83bbc271bcfb..f7038f12ac76 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -323,7 +323,7 @@ void oaktrail_lvds_init(struct drm_device *dev, DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_LVDS; diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c index b6b135fcd59c..bea8578846d1 100644 --- a/drivers/gpu/drm/gma500/power.c +++ b/drivers/gpu/drm/gma500/power.c @@ -187,7 +187,7 @@ static bool gma_resume_pci(struct pci_dev *pdev) */ int gma_power_suspend(struct device *_dev) { - struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(_dev); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_psb_private *dev_priv = dev->dev_private; @@ -214,7 +214,7 @@ int gma_power_suspend(struct device *_dev) */ int gma_power_resume(struct device *_dev) { - struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(_dev); struct drm_device *dev = pci_get_drvdata(pdev); mutex_lock(&power_mutex); diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c index 07df7d4eea72..dc0f8527570c 100644 --- a/drivers/gpu/drm/gma500/psb_device.c +++ b/drivers/gpu/drm/gma500/psb_device.c @@ -181,7 +181,7 @@ static int psb_save_display_registers(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - struct drm_connector *connector; + struct gma_connector *connector; struct psb_state *regs = &dev_priv->regs.psb; /* Display arbitration control + watermarks */ @@ -198,12 +198,12 @@ static int psb_save_display_registers(struct drm_device *dev) drm_modeset_lock_all(dev); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (drm_helper_crtc_in_use(crtc)) - crtc->funcs->save(crtc); + dev_priv->ops->save_crtc(crtc); } - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->funcs->save) - connector->funcs->save(connector); + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) + if (connector->save) + connector->save(&connector->base); drm_modeset_unlock_all(dev); return 0; @@ -219,7 +219,7 @@ static int psb_restore_display_registers(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - struct drm_connector *connector; + struct gma_connector *connector; struct psb_state *regs = &dev_priv->regs.psb; /* Display arbitration + watermarks */ @@ -238,11 +238,11 @@ static int psb_restore_display_registers(struct drm_device *dev) drm_modeset_lock_all(dev); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) if (drm_helper_crtc_in_use(crtc)) - crtc->funcs->restore(crtc); + dev_priv->ops->restore_crtc(crtc); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->funcs->restore) - connector->funcs->restore(connector); + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) + if (connector->restore) + connector->restore(&connector->base); drm_modeset_unlock_all(dev); return 0; @@ -354,6 +354,8 @@ const struct psb_ops psb_chip_ops = { .init_pm = psb_init_pm, .save_regs = psb_save_display_registers, .restore_regs = psb_restore_display_registers, + .save_crtc = gma_crtc_save, + .restore_crtc = gma_crtc_restore, .power_down = psb_power_down, .power_up = psb_power_up, }; diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 3bd2c726dd61..b74372760d7f 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -653,6 +653,8 @@ struct psb_ops { void (*init_pm)(struct drm_device *dev); int (*save_regs)(struct drm_device *dev); int (*restore_regs)(struct drm_device *dev); + void (*save_crtc)(struct drm_crtc *crtc); + void (*restore_crtc)(struct drm_crtc *crtc); int (*power_up)(struct drm_device *dev); int (*power_down)(struct drm_device *dev); void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 6659da88fe5b..dcdbc37e55e1 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -439,8 +439,6 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { }; const struct drm_crtc_funcs psb_intel_crtc_funcs = { - .save = gma_crtc_save, - .restore = gma_crtc_restore, .cursor_set = gma_crtc_cursor_set, .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 860dd2177ca1..2a3b7c684db2 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -140,6 +140,9 @@ struct gma_encoder { struct gma_connector { struct drm_connector base; struct gma_encoder *encoder; + + void (*save)(struct drm_connector *connector); + void (*restore)(struct drm_connector *connector); }; struct psb_intel_crtc_state { diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index ce0645d0c1e5..b1b93317d054 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -653,8 +653,6 @@ const struct drm_connector_helper_funcs const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = psb_intel_lvds_save, - .restore = psb_intel_lvds_restore, .detect = psb_intel_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = psb_intel_lvds_set_property, @@ -715,6 +713,9 @@ void psb_intel_lvds_init(struct drm_device *dev, gma_encoder->dev_priv = lvds_priv; connector = &gma_connector->base; + gma_connector->save = psb_intel_lvds_save; + gma_connector->restore = psb_intel_lvds_restore; + encoder = &gma_encoder->base; drm_connector_init(dev, connector, &psb_intel_lvds_connector_funcs, @@ -722,7 +723,7 @@ void psb_intel_lvds_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_LVDS; diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 58529cea575d..e787d376ba67 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1837,8 +1837,6 @@ static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { .dpms = drm_helper_connector_dpms, - .save = psb_intel_sdvo_save, - .restore = psb_intel_sdvo_restore, .detect = psb_intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = psb_intel_sdvo_set_property, @@ -2021,6 +2019,9 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector, connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; + connector->base.save = psb_intel_sdvo_save; + connector->base.restore = psb_intel_sdvo_restore; + gma_connector_attach_encoder(&connector->base, &encoder->base); drm_connector_register(&connector->base.base); } @@ -2525,7 +2526,8 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) /* encoder type will be decided later */ gma_encoder = &psb_intel_sdvo->base; gma_encoder->type = INTEL_OUTPUT_SDVO; - drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0); + drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, + 0, NULL); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c index 00416f23b5cb..533d1e3d4a99 100644 --- a/drivers/gpu/drm/i2c/adv7511.c +++ b/drivers/gpu/drm/i2c/adv7511.c @@ -752,7 +752,7 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder, adv7511->f_tmds = mode->clock; } -static struct drm_encoder_slave_funcs adv7511_encoder_funcs = { +static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = { .dpms = adv7511_encoder_dpms, .mode_valid = adv7511_encoder_mode_valid, .mode_set = adv7511_encoder_mode_set, diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index d9a72c96e56c..90db5f4dcce5 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c @@ -371,7 +371,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder, return 0; } -static struct drm_encoder_slave_funcs ch7006_encoder_funcs = { +static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = { .set_config = ch7006_encoder_set_config, .destroy = ch7006_encoder_destroy, .dpms = ch7006_encoder_dpms, diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c index 002ce7874332..c400428f6c8c 100644 --- a/drivers/gpu/drm/i2c/sil164_drv.c +++ b/drivers/gpu/drm/i2c/sil164_drv.c @@ -341,7 +341,7 @@ sil164_encoder_destroy(struct drm_encoder *encoder) drm_i2c_encoder_destroy(encoder); } -static struct drm_encoder_slave_funcs sil164_encoder_funcs = { +static const struct drm_encoder_slave_funcs sil164_encoder_funcs = { .set_config = sil164_encoder_set_config, .destroy = sil164_encoder_destroy, .dpms = sil164_encoder_dpms, diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 896b6aaf8c4d..34e38749a817 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -855,18 +856,6 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode) priv->dpms = mode; } -static void -tda998x_encoder_save(struct drm_encoder *encoder) -{ - DBG(""); -} - -static void -tda998x_encoder_restore(struct drm_encoder *encoder) -{ - DBG(""); -} - static bool tda998x_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, @@ -878,7 +867,10 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder, static int tda998x_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - if (mode->clock > 150000) + /* TDA19988 dotclock can go up to 165MHz */ + struct tda998x_priv *priv = conn_to_tda998x_priv(connector); + + if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000)) return MODE_CLOCK_HIGH; if (mode->htotal >= BIT(13)) return MODE_BAD_HVALUE; @@ -1351,8 +1343,6 @@ static void tda998x_encoder_commit(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = { .dpms = tda998x_encoder_dpms, - .save = tda998x_encoder_save, - .restore = tda998x_encoder_restore, .mode_fixup = tda998x_encoder_mode_fixup, .prepare = tda998x_encoder_prepare, .commit = tda998x_encoder_commit, @@ -1393,10 +1383,13 @@ static void tda998x_connector_destroy(struct drm_connector *connector) } static const struct drm_connector_funcs tda998x_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, + .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, .detect = tda998x_connector_detect, .destroy = tda998x_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int tda998x_bind(struct device *dev, struct device *master, void *data) @@ -1437,7 +1430,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data) drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs); ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); if (ret) goto err_encoder; @@ -1453,7 +1446,6 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data) if (ret) goto err_sysfs; - priv->connector.encoder = &priv->encoder; drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder); return 0; @@ -1472,6 +1464,7 @@ static void tda998x_unbind(struct device *dev, struct device *master, { struct tda998x_priv *priv = dev_get_drvdata(dev); + drm_connector_unregister(&priv->connector); drm_connector_cleanup(&priv->connector); drm_encoder_cleanup(&priv->encoder); tda998x_destroy(priv); diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 13dea4263554..5e6a3013da49 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -129,11 +129,11 @@ struct intel_dvo_dev_ops { void (*dump_regs)(struct intel_dvo_device *dvo); }; -extern struct intel_dvo_dev_ops sil164_ops; -extern struct intel_dvo_dev_ops ch7xxx_ops; -extern struct intel_dvo_dev_ops ivch_ops; -extern struct intel_dvo_dev_ops tfp410_ops; -extern struct intel_dvo_dev_ops ch7017_ops; -extern struct intel_dvo_dev_ops ns2501_ops; +extern const struct intel_dvo_dev_ops sil164_ops; +extern const struct intel_dvo_dev_ops ch7xxx_ops; +extern const struct intel_dvo_dev_ops ivch_ops; +extern const struct intel_dvo_dev_ops tfp410_ops; +extern const struct intel_dvo_dev_ops ch7017_ops; +extern const struct intel_dvo_dev_ops ns2501_ops; #endif /* _INTEL_DVO_H */ diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index cbb22027a3ce..b3c7c199200c 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -402,7 +402,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops ch7017_ops = { +const struct intel_dvo_dev_ops ch7017_ops = { .init = ch7017_init, .detect = ch7017_detect, .mode_valid = ch7017_mode_valid, diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 4b4acc1a06fe..44b3159f2fe8 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops ch7xxx_ops = { +const struct intel_dvo_dev_ops ch7xxx_ops = { .init = ch7xxx_init, .detect = ch7xxx_detect, .mode_valid = ch7xxx_mode_valid, diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index ff9f1b077d83..4950b82f5b49 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -490,7 +490,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops ivch_ops = { +const struct intel_dvo_dev_ops ivch_ops = { .init = ivch_init, .dpms = ivch_dpms, .get_hw_state = ivch_get_hw_state, diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c index 063859fff0f0..2379c33cfe51 100644 --- a/drivers/gpu/drm/i915/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/dvo_ns2501.c @@ -698,7 +698,7 @@ static void ns2501_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops ns2501_ops = { +const struct intel_dvo_dev_ops ns2501_ops = { .init = ns2501_init, .detect = ns2501_detect, .mode_valid = ns2501_mode_valid, diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 26f13eb634f9..1c1a0674dbab 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -267,7 +267,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops sil164_ops = { +const struct intel_dvo_dev_ops sil164_ops = { .init = sil164_init, .detect = sil164_detect, .mode_valid = sil164_mode_valid, diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 6f1a0a6d4e22..31e181da93db 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -306,7 +306,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops tfp410_ops = { +const struct intel_dvo_dev_ops tfp410_ops = { .init = tfp410_init, .detect = tfp410_detect, .mode_valid = tfp410_mode_valid, diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 411a9c68b4ee..0fc38bb7276c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1142,8 +1142,34 @@ static int i915_frequency_info(struct seq_file *m, void *unused) MEMSTAT_VID_SHIFT); seq_printf(m, "Current P-state: %d\n", (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); - } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || - IS_BROADWELL(dev) || IS_GEN9(dev)) { + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + u32 freq_sts; + + mutex_lock(&dev_priv->rps.hw_lock); + freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); + seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); + seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); + + seq_printf(m, "actual GPU freq: %d MHz\n", + intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); + + seq_printf(m, "current GPU freq: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); + + seq_printf(m, "max GPU freq: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); + + seq_printf(m, "min GPU freq: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); + + seq_printf(m, "idle GPU freq: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); + + seq_printf(m, + "efficient (RPe) frequency: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); + mutex_unlock(&dev_priv->rps.hw_lock); + } else if (INTEL_INFO(dev)->gen >= 6) { u32 rp_state_limits; u32 gt_perf_status; u32 rp_state_cap; @@ -1284,33 +1310,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "efficient (RPe) frequency: %d MHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); - } else if (IS_VALLEYVIEW(dev)) { - u32 freq_sts; - - mutex_lock(&dev_priv->rps.hw_lock); - freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); - seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); - - seq_printf(m, "actual GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); - - seq_printf(m, "current GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); - - seq_printf(m, "max GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); - - seq_printf(m, "min GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); - - seq_printf(m, "idle GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); - - seq_printf(m, - "efficient (RPe) frequency: %d MHz\n", - intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); - mutex_unlock(&dev_priv->rps.hw_lock); } else { seq_puts(m, "no P-state info available\n"); } @@ -1602,7 +1601,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) return vlv_drpc_info(m); else if (INTEL_INFO(dev)->gen >= 6) return gen6_drpc_info(m); @@ -1639,7 +1638,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->fbc.lock); - if (intel_fbc_enabled(dev_priv)) + if (intel_fbc_is_active(dev_priv)) seq_puts(m, "FBC enabled\n"); else seq_printf(m, "FBC disabled: %s\n", @@ -1743,7 +1742,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; else if (IS_PINEVIEW(dev)) sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; - else if (IS_VALLEYVIEW(dev)) + else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; intel_runtime_pm_put(dev_priv); @@ -1843,25 +1842,31 @@ static int i915_opregion(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; - void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); int ret; - if (data == NULL) - return -ENOMEM; - ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) goto out; - if (opregion->header) { - memcpy(data, opregion->header, OPREGION_SIZE); - seq_write(m, data, OPREGION_SIZE); - } + if (opregion->header) + seq_write(m, opregion->header, OPREGION_SIZE); mutex_unlock(&dev->struct_mutex); out: - kfree(data); + return 0; +} + +static int i915_vbt(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + + if (opregion->vbt) + seq_write(m, opregion->vbt, opregion->vbt_size); + return 0; } @@ -1869,33 +1874,29 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct intel_fbdev *ifbdev = NULL; - struct intel_framebuffer *fb; + struct intel_framebuffer *fbdev_fb = NULL; struct drm_framebuffer *drm_fb; #ifdef CONFIG_DRM_FBDEV_EMULATION - struct drm_i915_private *dev_priv = dev->dev_private; + if (to_i915(dev)->fbdev) { + fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); - ifbdev = dev_priv->fbdev; - if (ifbdev) { - fb = to_intel_framebuffer(ifbdev->helper.fb); - - seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", - fb->base.width, - fb->base.height, - fb->base.depth, - fb->base.bits_per_pixel, - fb->base.modifier[0], - atomic_read(&fb->base.refcount.refcount)); - describe_obj(m, fb->obj); - seq_putc(m, '\n'); - } + seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", + fbdev_fb->base.width, + fbdev_fb->base.height, + fbdev_fb->base.depth, + fbdev_fb->base.bits_per_pixel, + fbdev_fb->base.modifier[0], + atomic_read(&fbdev_fb->base.refcount.refcount)); + describe_obj(m, fbdev_fb->obj); + seq_putc(m, '\n'); + } #endif mutex_lock(&dev->mode_config.fb_lock); drm_for_each_fb(drm_fb, dev) { - fb = to_intel_framebuffer(drm_fb); - if (ifbdev && &fb->base == ifbdev->helper.fb) + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); + if (fb == fbdev_fb) continue; seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", @@ -2473,15 +2474,15 @@ static int i915_guc_info(struct seq_file *m, void *data) if (!HAS_GUC_SCHED(dev_priv->dev)) return 0; + if (mutex_lock_interruptible(&dev->struct_mutex)) + return 0; + /* Take a local copy of the GuC data, so we can dump it at leisure */ - spin_lock(&dev_priv->guc.host2guc_lock); guc = dev_priv->guc; - if (guc.execbuf_client) { - spin_lock(&guc.execbuf_client->wq_lock); + if (guc.execbuf_client) client = *guc.execbuf_client; - spin_unlock(&guc.execbuf_client->wq_lock); - } - spin_unlock(&dev_priv->guc.host2guc_lock); + + mutex_unlock(&dev->struct_mutex); seq_printf(m, "GuC total action count: %llu\n", guc.action_count); seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); @@ -2582,8 +2583,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) } seq_puts(m, "\n"); - /* CHV PSR has no kind of performance counter */ - if (HAS_DDI(dev)) { + /* + * VLV/CHV PSR has no kind of performance counter + * SKL+ Perf counter is reset to 0 everytime DC state is entered + */ + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { psrperf = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK; @@ -2685,71 +2689,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) return 0; } -static const char *power_domain_str(enum intel_display_power_domain domain) -{ - switch (domain) { - case POWER_DOMAIN_PIPE_A: - return "PIPE_A"; - case POWER_DOMAIN_PIPE_B: - return "PIPE_B"; - case POWER_DOMAIN_PIPE_C: - return "PIPE_C"; - case POWER_DOMAIN_PIPE_A_PANEL_FITTER: - return "PIPE_A_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_B_PANEL_FITTER: - return "PIPE_B_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_C_PANEL_FITTER: - return "PIPE_C_PANEL_FITTER"; - case POWER_DOMAIN_TRANSCODER_A: - return "TRANSCODER_A"; - case POWER_DOMAIN_TRANSCODER_B: - return "TRANSCODER_B"; - case POWER_DOMAIN_TRANSCODER_C: - return "TRANSCODER_C"; - case POWER_DOMAIN_TRANSCODER_EDP: - return "TRANSCODER_EDP"; - case POWER_DOMAIN_PORT_DDI_A_LANES: - return "PORT_DDI_A_LANES"; - case POWER_DOMAIN_PORT_DDI_B_LANES: - return "PORT_DDI_B_LANES"; - case POWER_DOMAIN_PORT_DDI_C_LANES: - return "PORT_DDI_C_LANES"; - case POWER_DOMAIN_PORT_DDI_D_LANES: - return "PORT_DDI_D_LANES"; - case POWER_DOMAIN_PORT_DDI_E_LANES: - return "PORT_DDI_E_LANES"; - case POWER_DOMAIN_PORT_DSI: - return "PORT_DSI"; - case POWER_DOMAIN_PORT_CRT: - return "PORT_CRT"; - case POWER_DOMAIN_PORT_OTHER: - return "PORT_OTHER"; - case POWER_DOMAIN_VGA: - return "VGA"; - case POWER_DOMAIN_AUDIO: - return "AUDIO"; - case POWER_DOMAIN_PLLS: - return "PLLS"; - case POWER_DOMAIN_AUX_A: - return "AUX_A"; - case POWER_DOMAIN_AUX_B: - return "AUX_B"; - case POWER_DOMAIN_AUX_C: - return "AUX_C"; - case POWER_DOMAIN_AUX_D: - return "AUX_D"; - case POWER_DOMAIN_GMBUS: - return "GMBUS"; - case POWER_DOMAIN_MODESET: - return "MODESET"; - case POWER_DOMAIN_INIT: - return "INIT"; - default: - MISSING_CASE(domain); - return "?"; - } -} - static int i915_power_domain_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; @@ -2775,7 +2714,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) continue; seq_printf(m, " %-23s %d\n", - power_domain_str(power_domain), + intel_display_power_domain_str(power_domain), power_domains->domain_use_count[power_domain]); } } @@ -2916,6 +2855,20 @@ static void intel_dp_info(struct seq_file *m, intel_panel_info(m, &intel_connector->panel); } +static void intel_dp_mst_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + struct intel_encoder *intel_encoder = intel_connector->encoder; + struct intel_dp_mst_encoder *intel_mst = + enc_to_mst(&intel_encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, + intel_connector->port); + + seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); +} + static void intel_hdmi_info(struct seq_file *m, struct intel_connector *intel_connector) { @@ -2959,6 +2912,8 @@ static void intel_connector_info(struct seq_file *m, intel_hdmi_info(m, intel_connector); else if (intel_encoder->type == INTEL_OUTPUT_LVDS) intel_lvds_info(m, intel_connector); + else if (intel_encoder->type == INTEL_OUTPUT_DP_MST) + intel_dp_mst_info(m, intel_connector); } seq_printf(m, "\tmodes:\n"); @@ -4049,7 +4004,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, ret = i8xx_pipe_crc_ctl_reg(&source, &val); else if (INTEL_INFO(dev)->gen < 5) ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); - else if (IS_VALLEYVIEW(dev)) + else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); else if (IS_GEN5(dev) || IS_GEN6(dev)) ret = ilk_pipe_crc_ctl_reg(&source, &val); @@ -4118,7 +4073,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, if (IS_G4X(dev)) g4x_undo_pipe_scramble_reset(dev, pipe); - else if (IS_VALLEYVIEW(dev)) + else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_undo_pipe_scramble_reset(dev, pipe); else if (IS_HASWELL(dev) && pipe == PIPE_A) hsw_trans_edp_pipe_A_crc_wa(dev, false); @@ -4508,7 +4463,8 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) * - WM1+ latency values in 0.5us units * - latencies are in us on gen9/vlv/chv */ - if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev)) + if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) || + IS_CHERRYVIEW(dev)) latency *= 10; else if (level > 0) latency *= 5; @@ -5382,6 +5338,7 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_ips_status", i915_ips_status, 0}, {"i915_sr_status", i915_sr_status, 0}, {"i915_opregion", i915_opregion, 0}, + {"i915_vbt", i915_vbt, 0}, {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, {"i915_context_status", i915_context_status, 0}, {"i915_dump_lrc", i915_dump_lrc, 0}, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a81c76603544..d70d96fe553b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -169,6 +169,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_RESOURCE_STREAMER: value = HAS_RESOURCE_STREAMER(dev); break; + case I915_PARAM_HAS_EXEC_SOFTPIN: + value = 1; + break; default: DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; @@ -256,7 +259,7 @@ intel_setup_mchbar(struct drm_device *dev) u32 temp; bool enabled; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) return; dev_priv->mchbar_need_disable = false; @@ -367,7 +370,7 @@ static int i915_load_modeset_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - ret = intel_parse_bios(dev); + ret = intel_bios_init(dev_priv); if (ret) DRM_INFO("failed to find VBIOS tables\n"); @@ -403,6 +406,8 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_gem_stolen; + intel_setup_gmbus(dev); + /* Important: The output setup functions called by modeset_init need * working irqs for e.g. gmbus and dp aux transfers. */ intel_modeset_init(dev); @@ -452,6 +457,7 @@ cleanup_gem: cleanup_irq: intel_guc_ucode_fini(dev); drm_irq_uninstall(dev); + intel_teardown_gmbus(dev); cleanup_gem_stolen: i915_gem_cleanup_stolen(dev); cleanup_vga_switcheroo: @@ -779,7 +785,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) info->num_sprites[PIPE_A] = 2; info->num_sprites[PIPE_B] = 2; info->num_sprites[PIPE_C] = 1; - } else if (IS_VALLEYVIEW(dev)) + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) for_each_pipe(dev_priv, pipe) info->num_sprites[pipe] = 2; else @@ -791,7 +797,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) info->num_pipes = 0; } else if (info->num_pipes > 0 && (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && - !IS_VALLEYVIEW(dev)) { + HAS_PCH_SPLIT(dev)) { u32 fuse_strap = I915_READ(FUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP); @@ -836,9 +842,6 @@ static void intel_device_info_runtime_init(struct drm_device *dev) static void intel_init_dpio(struct drm_i915_private *dev_priv) { - if (!IS_VALLEYVIEW(dev_priv)) - return; - /* * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), * CHV x1 PHY (DP/HDMI D) @@ -847,7 +850,7 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv) if (IS_CHERRYVIEW(dev_priv)) { DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; - } else { + } else if (IS_VALLEYVIEW(dev_priv)) { DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; } } @@ -896,6 +899,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_pm_setup(dev); + intel_runtime_pm_get(dev_priv); + intel_display_crc_init(dev); i915_dump_device_info(dev_priv); @@ -1026,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); - intel_setup_gmbus(dev); intel_opregion_setup(dev); i915_gem_load(dev); @@ -1085,6 +1089,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) i915_audio_component_init(dev_priv); + intel_runtime_pm_put(dev_priv); + return 0; out_power_well: @@ -1097,7 +1103,6 @@ out_gem_unload: if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); - intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); pm_qos_remove_request(&dev_priv->pm_qos); destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); @@ -1120,6 +1125,9 @@ free_priv: kmem_cache_destroy(dev_priv->requests); kmem_cache_destroy(dev_priv->vmas); kmem_cache_destroy(dev_priv->objects); + + intel_runtime_pm_put(dev_priv); + kfree(dev_priv); return ret; } @@ -1196,7 +1204,6 @@ int i915_driver_unload(struct drm_device *dev) intel_csr_ucode_fini(dev_priv); - intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->hotplug.dp_wq); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6344dfb72177..3ac616d7363b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -228,157 +228,106 @@ static const struct intel_device_info intel_sandybridge_m_info = { .need_gfx_hws = 1, .has_hotplug = 1, \ .has_fbc = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ - .has_llc = 1 + .has_llc = 1, \ + GEN_DEFAULT_PIPEOFFSETS, \ + IVB_CURSOR_OFFSETS static const struct intel_device_info intel_ivybridge_d_info = { GEN7_FEATURES, .is_ivybridge = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; static const struct intel_device_info intel_ivybridge_m_info = { GEN7_FEATURES, .is_ivybridge = 1, .is_mobile = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; static const struct intel_device_info intel_ivybridge_q_info = { GEN7_FEATURES, .is_ivybridge = 1, .num_pipes = 0, /* legal, last one wins */ - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; +#define VLV_FEATURES \ + .gen = 7, .num_pipes = 2, \ + .need_gfx_hws = 1, .has_hotplug = 1, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .display_mmio_offset = VLV_DISPLAY_BASE, \ + GEN_DEFAULT_PIPEOFFSETS, \ + CURSOR_OFFSETS + static const struct intel_device_info intel_valleyview_m_info = { - GEN7_FEATURES, - .is_mobile = 1, - .num_pipes = 2, + VLV_FEATURES, .is_valleyview = 1, - .display_mmio_offset = VLV_DISPLAY_BASE, - .has_fbc = 0, /* legal, last one wins */ - .has_llc = 0, /* legal, last one wins */ - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, + .is_mobile = 1, }; static const struct intel_device_info intel_valleyview_d_info = { - GEN7_FEATURES, - .num_pipes = 2, + VLV_FEATURES, .is_valleyview = 1, - .display_mmio_offset = VLV_DISPLAY_BASE, - .has_fbc = 0, /* legal, last one wins */ - .has_llc = 0, /* legal, last one wins */ - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, }; +#define HSW_FEATURES \ + GEN7_FEATURES, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ + .has_ddi = 1, \ + .has_fpga_dbg = 1 + static const struct intel_device_info intel_haswell_d_info = { - GEN7_FEATURES, + HSW_FEATURES, .is_haswell = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; static const struct intel_device_info intel_haswell_m_info = { - GEN7_FEATURES, + HSW_FEATURES, .is_haswell = 1, .is_mobile = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; static const struct intel_device_info intel_broadwell_d_info = { - .gen = 8, .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .has_llc = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, + HSW_FEATURES, + .gen = 8, }; static const struct intel_device_info intel_broadwell_m_info = { - .gen = 8, .is_mobile = 1, .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .has_llc = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, + HSW_FEATURES, + .gen = 8, .is_mobile = 1, }; static const struct intel_device_info intel_broadwell_gt3d_info = { - .gen = 8, .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, + HSW_FEATURES, + .gen = 8, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, - .has_llc = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; static const struct intel_device_info intel_broadwell_gt3m_info = { - .gen = 8, .is_mobile = 1, .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, + HSW_FEATURES, + .gen = 8, .is_mobile = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, - .has_llc = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; static const struct intel_device_info intel_cherryview_info = { .gen = 8, .num_pipes = 3, .need_gfx_hws = 1, .has_hotplug = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .is_valleyview = 1, + .is_cherryview = 1, .display_mmio_offset = VLV_DISPLAY_BASE, GEN_CHV_PIPEOFFSETS, CURSOR_OFFSETS, }; static const struct intel_device_info intel_skylake_info = { + HSW_FEATURES, .is_skylake = 1, - .gen = 9, .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .has_llc = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, + .gen = 9, }; static const struct intel_device_info intel_skylake_gt3_info = { + HSW_FEATURES, .is_skylake = 1, - .gen = 9, .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, + .gen = 9, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, - .has_llc = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; static const struct intel_device_info intel_broxton_info = { @@ -396,33 +345,18 @@ static const struct intel_device_info intel_broxton_info = { }; static const struct intel_device_info intel_kabylake_info = { + HSW_FEATURES, .is_preliminary = 1, .is_kabylake = 1, .gen = 9, - .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .has_llc = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; static const struct intel_device_info intel_kabylake_gt3_info = { + HSW_FEATURES, .is_preliminary = 1, .is_kabylake = 1, .gen = 9, - .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, - .has_llc = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, }; /* @@ -465,6 +399,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_SKL_GT1_IDS(&intel_skylake_info), INTEL_SKL_GT2_IDS(&intel_skylake_info), INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), + INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), INTEL_BXT_IDS(&intel_broxton_info), INTEL_KBL_GT1_IDS(&intel_kabylake_info), INTEL_KBL_GT2_IDS(&intel_kabylake_info), @@ -565,7 +500,8 @@ void intel_detect_pch(struct drm_device *dev) DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); WARN_ON(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev)); - } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { + } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || + (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) { dev_priv->pch_type = intel_virt_detect_pch(dev); } else continue; @@ -607,15 +543,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) static void intel_suspend_encoders(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - struct drm_encoder *encoder; + struct intel_encoder *encoder; drm_modeset_lock_all(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - - if (intel_encoder->suspend) - intel_encoder->suspend(intel_encoder); - } + for_each_intel_encoder(dev, encoder) + if (encoder->suspend) + encoder->suspend(encoder); drm_modeset_unlock_all(dev); } @@ -624,6 +557,14 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume); static int bxt_resume_prepare(struct drm_i915_private *dev_priv); +static bool suspend_to_idle(struct drm_i915_private *dev_priv) +{ +#if IS_ENABLED(CONFIG_ACPI_SLEEP) + if (acpi_target_system_state() < ACPI_STATE_S3) + return true; +#endif + return false; +} static int i915_drm_suspend(struct drm_device *dev) { @@ -636,6 +577,8 @@ static int i915_drm_suspend(struct drm_device *dev) dev_priv->modeset_restore = MODESET_SUSPENDED; mutex_unlock(&dev_priv->modeset_restore_lock); + disable_rpm_wakeref_asserts(dev_priv); + /* We do a lot of poking in a lot of registers, make sure they work * properly. */ intel_display_set_init_power(dev_priv, true); @@ -648,7 +591,7 @@ static int i915_drm_suspend(struct drm_device *dev) if (error) { dev_err(&dev->pdev->dev, "GEM idle failed, resume might fail\n"); - return error; + goto out; } intel_guc_suspend(dev); @@ -676,11 +619,7 @@ static int i915_drm_suspend(struct drm_device *dev) i915_save_state(dev); - opregion_target_state = PCI_D3cold; -#if IS_ENABLED(CONFIG_ACPI_SLEEP) - if (acpi_target_system_state() < ACPI_STATE_S3) - opregion_target_state = PCI_D1; -#endif + opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; intel_opregion_notify_adapter(dev, opregion_target_state); intel_uncore_forcewake_reset(dev, false); @@ -695,23 +634,39 @@ static int i915_drm_suspend(struct drm_device *dev) if (HAS_CSR(dev_priv)) flush_work(&dev_priv->csr.work); - return 0; +out: + enable_rpm_wakeref_asserts(dev_priv); + + return error; } static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) { struct drm_i915_private *dev_priv = drm_dev->dev_private; + bool fw_csr; int ret; - intel_power_domains_suspend(dev_priv); + disable_rpm_wakeref_asserts(dev_priv); + + fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; + /* + * In case of firmware assisted context save/restore don't manually + * deinit the power domains. This also means the CSR/DMC firmware will + * stay active, it will power down any HW resources as required and + * also enable deeper system power states that would be blocked if the + * firmware was inactive. + */ + if (!fw_csr) + intel_power_domains_suspend(dev_priv); ret = intel_suspend_complete(dev_priv); if (ret) { DRM_ERROR("Suspend complete failed: %d\n", ret); - intel_power_domains_init_hw(dev_priv, true); + if (!fw_csr) + intel_power_domains_init_hw(dev_priv, true); - return ret; + goto out; } pci_disable_device(drm_dev->pdev); @@ -730,7 +685,12 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) pci_set_power_state(drm_dev->pdev, PCI_D3hot); - return 0; + dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); + +out: + enable_rpm_wakeref_asserts(dev_priv); + + return ret; } int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) @@ -761,6 +721,8 @@ static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + disable_rpm_wakeref_asserts(dev_priv); + mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); @@ -825,6 +787,8 @@ static int i915_drm_resume(struct drm_device *dev) drm_kms_helper_poll_enable(dev); + enable_rpm_wakeref_asserts(dev_priv); + return 0; } @@ -842,12 +806,16 @@ static int i915_drm_resume_early(struct drm_device *dev) * FIXME: This should be solved with a special hdmi sink device or * similar so that power domains can be employed. */ - if (pci_enable_device(dev->pdev)) - return -EIO; + if (pci_enable_device(dev->pdev)) { + ret = -EIO; + goto out; + } pci_set_master(dev->pdev); - if (IS_VALLEYVIEW(dev_priv)) + disable_rpm_wakeref_asserts(dev_priv); + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) ret = vlv_resume_prepare(dev_priv, false); if (ret) DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", @@ -861,7 +829,14 @@ static int i915_drm_resume_early(struct drm_device *dev) hsw_disable_pc8(dev_priv); intel_uncore_sanitize(dev); - intel_power_domains_init_hw(dev_priv, true); + + if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) + intel_power_domains_init_hw(dev_priv, true); + +out: + dev_priv->suspended_to_idle = false; + + enable_rpm_wakeref_asserts(dev_priv); return ret; } @@ -1495,6 +1470,9 @@ static int intel_runtime_suspend(struct device *device) return -EAGAIN; } + + disable_rpm_wakeref_asserts(dev_priv); + /* * We are safe here against re-faults, since the fault handler takes * an RPM reference. @@ -1502,6 +1480,8 @@ static int intel_runtime_suspend(struct device *device) i915_gem_release_all_mmaps(dev_priv); mutex_unlock(&dev->struct_mutex); + cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + intel_guc_suspend(dev); intel_suspend_gt_powersave(dev); @@ -1512,11 +1492,15 @@ static int intel_runtime_suspend(struct device *device) DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); intel_runtime_pm_enable_interrupts(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); + return ret; } - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); intel_uncore_forcewake_reset(dev, false); + + enable_rpm_wakeref_asserts(dev_priv); + WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); dev_priv->pm.suspended = true; /* @@ -1560,6 +1544,9 @@ static int intel_runtime_resume(struct device *device) DRM_DEBUG_KMS("Resuming device\n"); + WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); + disable_rpm_wakeref_asserts(dev_priv); + intel_opregion_notify_adapter(dev, PCI_D0); dev_priv->pm.suspended = false; @@ -1572,7 +1559,7 @@ static int intel_runtime_resume(struct device *device) ret = bxt_resume_prepare(dev_priv); else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) hsw_disable_pc8(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) ret = vlv_resume_prepare(dev_priv, true); /* @@ -1589,11 +1576,13 @@ static int intel_runtime_resume(struct device *device) * power well, so hpd is reinitialized from there. For * everyone else do it here. */ - if (!IS_VALLEYVIEW(dev_priv)) + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); intel_enable_gt_powersave(dev); + enable_rpm_wakeref_asserts(dev_priv); + if (ret) DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); else @@ -1614,7 +1603,7 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv) ret = bxt_suspend_complete(dev_priv); else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) ret = hsw_suspend_complete(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) ret = vlv_suspend_complete(dev_priv); else ret = 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e6ab4655eb23..f0f75d7c0d94 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -33,6 +33,7 @@ #include #include +#include #include "i915_reg.h" #include "intel_bios.h" #include "intel_ringbuffer.h" @@ -57,7 +58,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20151120" +#define DRIVER_DATE "20151218" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -457,7 +458,9 @@ struct intel_opregion { u32 swsci_gbda_sub_functions; u32 swsci_sbcb_sub_functions; struct opregion_asle *asle; - void *vbt; + void *rvda; + const void *vbt; + u32 vbt_size; u32 *lid_state; struct work_struct asle_work; }; @@ -763,6 +766,7 @@ struct intel_csr { func(is_crestline) sep \ func(is_ivybridge) sep \ func(is_valleyview) sep \ + func(is_cherryview) sep \ func(is_haswell) sep \ func(is_skylake) sep \ func(is_broxton) sep \ @@ -902,7 +906,6 @@ struct i915_fbc { /* This is always the inner lock when overlapping with struct_mutex and * it's the outer lock when overlapping with stolen_lock. */ struct mutex lock; - unsigned long uncompressed_size; unsigned threshold; unsigned int fb_id; unsigned int possible_framebuffer_bits; @@ -915,21 +918,21 @@ struct i915_fbc { bool false_color; - /* Tracks whether the HW is actually enabled, not whether the feature is - * possible. */ bool enabled; + bool active; struct intel_fbc_work { - struct delayed_work work; - struct intel_crtc *crtc; + bool scheduled; + struct work_struct work; struct drm_framebuffer *fb; - } *fbc_work; + unsigned long enable_jiffies; + } work; const char *no_fbc_reason; - bool (*fbc_enabled)(struct drm_i915_private *dev_priv); - void (*enable_fbc)(struct intel_crtc *crtc); - void (*disable_fbc)(struct drm_i915_private *dev_priv); + bool (*is_active)(struct drm_i915_private *dev_priv); + void (*activate)(struct intel_crtc *crtc); + void (*deactivate)(struct drm_i915_private *dev_priv); }; /** @@ -1602,6 +1605,8 @@ struct skl_wm_level { * For more, read the Documentation/power/runtime_pm.txt. */ struct i915_runtime_pm { + atomic_t wakeref_count; + atomic_t atomic_seq; bool suspended; bool irqs_enabled; }; @@ -1885,6 +1890,7 @@ struct drm_i915_private { u32 chv_phy_control; u32 suspend_count; + bool suspended_to_idle; struct i915_suspend_saved_registers regfile; struct vlv_s0ix_state vlv_s0ix_state; @@ -2466,9 +2472,9 @@ struct drm_i915_cmd_table { INTEL_DEVID(dev) == 0x0152 || \ INTEL_DEVID(dev) == 0x015a) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) -#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) +#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) -#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) +#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) @@ -2499,6 +2505,14 @@ struct drm_i915_cmd_table { #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ INTEL_DEVID(dev) == 0x1915 || \ INTEL_DEVID(dev) == 0x191E) +#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \ + INTEL_DEVID(dev) == 0x5913 || \ + INTEL_DEVID(dev) == 0x5916 || \ + INTEL_DEVID(dev) == 0x5921 || \ + INTEL_DEVID(dev) == 0x5926) +#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \ + INTEL_DEVID(dev) == 0x5915 || \ + INTEL_DEVID(dev) == 0x591E) #define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ (INTEL_DEVID(dev) & 0x00F0) == 0x0020) #define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ @@ -2595,20 +2609,22 @@ struct drm_i915_cmd_table { IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ - IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) + IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ + IS_KABYLAKE(dev)) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) #define HAS_CSR(dev) (IS_GEN9(dev)) -#define HAS_GUC_UCODE(dev) (IS_GEN9(dev)) -#define HAS_GUC_SCHED(dev) (IS_GEN9(dev)) +#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) +#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ INTEL_INFO(dev)->gen >= 8) #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ - !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) + !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ + !IS_BROXTON(dev)) #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 @@ -2619,17 +2635,20 @@ struct drm_i915_cmd_table { #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 +#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) +#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) -#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) +#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \ + IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) /* DPF == dynamic parity feature */ #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) @@ -2760,17 +2779,47 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, uint32_t mask, uint32_t bits); -void -ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); -void -ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); +void ilk_update_display_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask); +static inline void +ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) +{ + ilk_update_display_irq(dev_priv, bits, bits); +} +static inline void +ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) +{ + ilk_update_display_irq(dev_priv, bits, 0); +} +void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, + enum pipe pipe, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask); +static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, + enum pipe pipe, uint32_t bits) +{ + bdw_update_pipe_irq(dev_priv, pipe, bits, bits); +} +static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, + enum pipe pipe, uint32_t bits) +{ + bdw_update_pipe_irq(dev_priv, pipe, bits, 0); +} void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, uint32_t enabled_irq_mask); -#define ibx_enable_display_interrupt(dev_priv, bits) \ - ibx_display_interrupt_update((dev_priv), (bits), (bits)) -#define ibx_disable_display_interrupt(dev_priv, bits) \ - ibx_display_interrupt_update((dev_priv), (bits), 0) +static inline void +ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) +{ + ibx_display_interrupt_update(dev_priv, bits, bits); +} +static inline void +ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) +{ + ibx_display_interrupt_update(dev_priv, bits, 0); +} + /* i915_gem.c */ int i915_gem_create_ioctl(struct drm_device *dev, void *data, @@ -2839,6 +2888,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma); #define PIN_UPDATE (1<<5) #define PIN_ZONE_4G (1<<6) #define PIN_HIGH (1<<7) +#define PIN_OFFSET_FIXED (1<<8) #define PIN_OFFSET_MASK (~4095) int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, @@ -2874,6 +2924,9 @@ static inline int __sg_page_count(struct scatterlist *sg) return sg->length >> PAGE_SHIFT; } +struct page * +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); + static inline struct page * i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) { @@ -3187,6 +3240,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, unsigned long start, unsigned long end, unsigned flags); +int __must_check i915_gem_evict_for_vma(struct i915_vma *target); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ @@ -3315,6 +3369,10 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) } extern void intel_i2c_reset(struct drm_device *dev); +/* intel_bios.c */ +int intel_bios_init(struct drm_i915_private *dev_priv); +bool intel_bios_is_valid_vbt(const void *buf, size_t size); + /* intel_opregion.c */ #ifdef CONFIG_ACPI extern int intel_opregion_setup(struct drm_device *dev); @@ -3493,7 +3551,7 @@ __raw_write(64, q) static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev) { - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) return VLV_VGACNTRL; else if (INTEL_INFO(dev)->gen >= 5) return CPU_VGACNTRL; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 262020f8b38d..ddc21d4b388d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2817,20 +2817,13 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, if (i915.enable_execlists) { spin_lock_irq(&ring->execlist_lock); - while (!list_empty(&ring->execlist_queue)) { - struct drm_i915_gem_request *submit_req; - submit_req = list_first_entry(&ring->execlist_queue, - struct drm_i915_gem_request, - execlist_link); - list_del(&submit_req->execlist_link); + /* list_splice_tail_init checks for empty lists */ + list_splice_tail_init(&ring->execlist_queue, + &ring->execlist_retired_req_list); - if (submit_req->ctx != ring->default_context) - intel_lr_context_unpin(submit_req); - - i915_gem_request_unreference(submit_req); - } spin_unlock_irq(&ring->execlist_lock); + intel_execlists_retire_requests(ring); } /* @@ -3001,6 +2994,10 @@ i915_gem_idle_work_handler(struct work_struct *work) if (!list_empty(&ring->request_list)) return; + /* we probably should sync with hangcheck here, using cancel_work_sync. + * Also locking seems to be fubar here, ring->request_list is protected + * by dev->struct_mutex. */ + intel_mark_idle(dev); if (mutex_trylock(&dev->struct_mutex)) { @@ -3125,7 +3122,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (ret == 0) ret = __i915_wait_request(req[i], reset_counter, true, args->timeout_ns > 0 ? &args->timeout_ns : NULL, - file->driver_priv); + to_rps_client(file)); i915_gem_request_unreference__unlocked(req[i]); } return ret; @@ -3491,7 +3488,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, if (flags & PIN_MAPPABLE) end = min_t(u64, end, dev_priv->gtt.mappable_end); if (flags & PIN_ZONE_4G) - end = min_t(u64, end, (1ULL << 32)); + end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); if (alignment == 0) alignment = flags & PIN_MAPPABLE ? fence_alignment : @@ -3528,30 +3525,50 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) goto err_unpin; - if (flags & PIN_HIGH) { - search_flag = DRM_MM_SEARCH_BELOW; - alloc_flag = DRM_MM_CREATE_TOP; + if (flags & PIN_OFFSET_FIXED) { + uint64_t offset = flags & PIN_OFFSET_MASK; + + if (offset & (alignment - 1) || offset + size > end) { + ret = -EINVAL; + goto err_free_vma; + } + vma->node.start = offset; + vma->node.size = size; + vma->node.color = obj->cache_level; + ret = drm_mm_reserve_node(&vm->mm, &vma->node); + if (ret) { + ret = i915_gem_evict_for_vma(vma); + if (ret == 0) + ret = drm_mm_reserve_node(&vm->mm, &vma->node); + } + if (ret) + goto err_free_vma; } else { - search_flag = DRM_MM_SEARCH_DEFAULT; - alloc_flag = DRM_MM_CREATE_DEFAULT; - } + if (flags & PIN_HIGH) { + search_flag = DRM_MM_SEARCH_BELOW; + alloc_flag = DRM_MM_CREATE_TOP; + } else { + search_flag = DRM_MM_SEARCH_DEFAULT; + alloc_flag = DRM_MM_CREATE_DEFAULT; + } search_free: - ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, - size, alignment, - obj->cache_level, - start, end, - search_flag, - alloc_flag); - if (ret) { - ret = i915_gem_evict_something(dev, vm, size, alignment, - obj->cache_level, - start, end, - flags); - if (ret == 0) - goto search_free; + ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, + size, alignment, + obj->cache_level, + start, end, + search_flag, + alloc_flag); + if (ret) { + ret = i915_gem_evict_something(dev, vm, size, alignment, + obj->cache_level, + start, end, + flags); + if (ret == 0) + goto search_free; - goto err_free_vma; + goto err_free_vma; + } } if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { ret = -EINVAL; @@ -4142,6 +4159,10 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) vma->node.start < (flags & PIN_OFFSET_MASK)) return true; + if (flags & PIN_OFFSET_FIXED && + vma->node.start != (flags & PIN_OFFSET_MASK)) + return true; + return false; } @@ -4895,14 +4916,6 @@ int i915_gem_init(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - if (IS_VALLEYVIEW(dev)) { - /* VLVA0 (potential hack), BIOS isn't actually waking us */ - I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); - if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & - VLV_GTLC_ALLOWWAKEACK), 10)) - DRM_DEBUG_DRIVER("allow wake ack timed out\n"); - } - if (!i915.enable_execlists) { dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; dev_priv->gt.init_rings = i915_gem_init_rings; @@ -5020,7 +5033,7 @@ i915_gem_load(struct drm_device *dev) dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) + if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) dev_priv->num_fence_regs = 32; else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dev_priv->num_fence_regs = 16; @@ -5241,6 +5254,21 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) return false; } +/* Like i915_gem_object_get_page(), but mark the returned page dirty */ +struct page * +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) +{ + struct page *page; + + /* Only default objects have per-page dirty tracking */ + if (WARN_ON(obj->ops != &i915_gem_object_ops)) + return NULL; + + page = i915_gem_object_get_page(obj, n); + set_page_dirty(page); + return page; +} + /* Allocate a new GEM object and fill it with the supplied data */ struct drm_i915_gem_object * i915_gem_object_create_from_data(struct drm_device *dev, @@ -5266,6 +5294,7 @@ i915_gem_object_create_from_data(struct drm_device *dev, i915_gem_object_pin_pages(obj); sg = obj->pages; bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); + obj->dirty = 1; /* Backing store is now out of date */ i915_gem_object_unpin_pages(obj); if (WARN_ON(bytes != size)) { diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 43761c5bcaca..c25083c78ba7 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -189,8 +189,15 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) * shouldn't touch the cache level, especially as that * would make the object snooped which might have a * negative performance impact. + * + * Snooping is required on non-llc platforms in execlist + * mode, but since all GGTT accesses use PAT entry 0 we + * get snooping anyway regardless of cache_level. + * + * This is only applicable for Ivy Bridge devices since + * later platforms don't have L3 control bits in the PTE. */ - if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) { + if (IS_IVYBRIDGE(dev)) { ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); /* Failure shouldn't ever happen this early */ if (WARN_ON(ret)) { @@ -340,6 +347,10 @@ void i915_gem_context_reset(struct drm_device *dev) i915_gem_context_unreference(lctx); ring->last_context = NULL; } + + /* Force the GPU state to be reinitialised on enabling */ + if (ring->default_context) + ring->default_context->legacy_hw_ctx.initialized = false; } } @@ -708,7 +719,7 @@ static int do_switch(struct drm_i915_gem_request *req) if (ret) goto unpin_out; - if (!to->legacy_hw_ctx.initialized) { + if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) { hw_flags |= MI_RESTORE_INHIBIT; /* NB: If we inhibit the restore, the context is not allowed to * die because future work may end up depending on valid address diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index d71a133ceff5..07c6e4d320c9 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -199,6 +199,45 @@ found: return ret; } +int +i915_gem_evict_for_vma(struct i915_vma *target) +{ + struct drm_mm_node *node, *next; + + list_for_each_entry_safe(node, next, + &target->vm->mm.head_node.node_list, + node_list) { + struct i915_vma *vma; + int ret; + + if (node->start + node->size <= target->node.start) + continue; + if (node->start >= target->node.start + target->node.size) + break; + + vma = container_of(node, typeof(*vma), node); + + if (vma->pin_count) { + if (!vma->exec_entry || (vma->pin_count > 1)) + /* Object is pinned for some other use */ + return -EBUSY; + + /* We need to evict a buffer in the same batch */ + if (vma->exec_entry->flags & EXEC_OBJECT_PINNED) + /* Overlapping fixed objects in the same batch */ + return -EINVAL; + + return -ENOSPC; + } + + ret = i915_vma_unbind(vma); + if (ret) + return ret; + } + + return 0; +} + /** * i915_gem_evict_vm - Evict all idle vmas from a vm * @vm: Address space to cleanse diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a4c243cec4aa..dccb517361b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -249,6 +249,31 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) obj->cache_level != I915_CACHE_NONE); } +/* Used to convert any address to canonical form. + * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS, + * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the + * addresses to be in a canonical form: + * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct + * canonical form [63:48] == [47]." + */ +#define GEN8_HIGH_ADDRESS_BIT 47 +static inline uint64_t gen8_canonical_addr(uint64_t address) +{ + return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT); +} + +static inline uint64_t gen8_noncanonical_addr(uint64_t address) +{ + return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1); +} + +static inline uint64_t +relocation_target(struct drm_i915_gem_relocation_entry *reloc, + uint64_t target_offset) +{ + return gen8_canonical_addr((int)reloc->delta + target_offset); +} + static int relocate_entry_cpu(struct drm_i915_gem_object *obj, struct drm_i915_gem_relocation_entry *reloc, @@ -256,7 +281,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; uint32_t page_offset = offset_in_page(reloc->offset); - uint64_t delta = reloc->delta + target_offset; + uint64_t delta = relocation_target(reloc, target_offset); char *vaddr; int ret; @@ -264,7 +289,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, if (ret) return ret; - vaddr = kmap_atomic(i915_gem_object_get_page(obj, + vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, reloc->offset >> PAGE_SHIFT)); *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); @@ -273,7 +298,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, if (page_offset == 0) { kunmap_atomic(vaddr); - vaddr = kmap_atomic(i915_gem_object_get_page(obj, + vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); } @@ -292,7 +317,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint64_t delta = reloc->delta + target_offset; + uint64_t delta = relocation_target(reloc, target_offset); uint64_t offset; void __iomem *reloc_page; int ret; @@ -347,7 +372,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; uint32_t page_offset = offset_in_page(reloc->offset); - uint64_t delta = (int)reloc->delta + target_offset; + uint64_t delta = relocation_target(reloc, target_offset); char *vaddr; int ret; @@ -355,7 +380,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj, if (ret) return ret; - vaddr = kmap_atomic(i915_gem_object_get_page(obj, + vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, reloc->offset >> PAGE_SHIFT)); clflush_write32(vaddr + page_offset, lower_32_bits(delta)); @@ -364,7 +389,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj, if (page_offset == 0) { kunmap_atomic(vaddr); - vaddr = kmap_atomic(i915_gem_object_get_page(obj, + vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); } @@ -395,7 +420,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, target_i915_obj = target_vma->obj; target_obj = &target_vma->obj->base; - target_offset = target_vma->node.start; + target_offset = gen8_canonical_addr(target_vma->node.start); /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and * pipe_control writes because the gpu doesn't properly redirect them @@ -599,6 +624,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, flags |= PIN_GLOBAL | PIN_MAPPABLE; if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + if (entry->flags & EXEC_OBJECT_PINNED) + flags |= entry->offset | PIN_OFFSET_FIXED; if ((flags & PIN_MAPPABLE) == 0) flags |= PIN_HIGH; } @@ -670,6 +697,10 @@ eb_vma_misplaced(struct i915_vma *vma) vma->node.start & (entry->alignment - 1)) return true; + if (entry->flags & EXEC_OBJECT_PINNED && + vma->node.start != entry->offset) + return true; + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && vma->node.start < BATCH_OFFSET_BIAS) return true; @@ -695,6 +726,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, struct i915_vma *vma; struct i915_address_space *vm; struct list_head ordered_vmas; + struct list_head pinned_vmas; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; int retry; @@ -703,6 +735,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; INIT_LIST_HEAD(&ordered_vmas); + INIT_LIST_HEAD(&pinned_vmas); while (!list_empty(vmas)) { struct drm_i915_gem_exec_object2 *entry; bool need_fence, need_mappable; @@ -721,7 +754,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(vma); - if (need_mappable) { + if (entry->flags & EXEC_OBJECT_PINNED) + list_move_tail(&vma->exec_list, &pinned_vmas); + else if (need_mappable) { entry->flags |= __EXEC_OBJECT_NEEDS_MAP; list_move(&vma->exec_list, &ordered_vmas); } else @@ -731,6 +766,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, obj->base.pending_write_domain = 0; } list_splice(&ordered_vmas, vmas); + list_splice(&pinned_vmas, vmas); /* Attempt to pin all of the buffers into the GTT. * This is done in 3 phases: @@ -983,6 +1019,21 @@ validate_exec_list(struct drm_device *dev, if (exec[i].flags & invalid_flags) return -EINVAL; + /* Offset can be used as input (EXEC_OBJECT_PINNED), reject + * any non-page-aligned or non-canonical addresses. + */ + if (exec[i].flags & EXEC_OBJECT_PINNED) { + if (exec[i].offset != + gen8_canonical_addr(exec[i].offset & PAGE_MASK)) + return -EINVAL; + + /* From drm_mm perspective address space is continuous, + * so from this point we're always using non-canonical + * form internally. + */ + exec[i].offset = gen8_noncanonical_addr(exec[i].offset); + } + if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) return -EINVAL; @@ -1317,7 +1368,8 @@ eb_get_batch(struct eb_vmas *eb) * Note that actual hangs have only been observed on gen7, but for * paranoia do it everywhere. */ - vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0) + vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; return vma->obj; } @@ -1675,6 +1727,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) { + exec2_list[i].offset = + gen8_canonical_addr(exec2_list[i].offset); ret = __copy_to_user(&user_exec_list[i].offset, &exec2_list[i].offset, sizeof(user_exec_list[i].offset)); @@ -1740,6 +1794,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, int i; for (i = 0; i < args->buffer_count; i++) { + exec2_list[i].offset = + gen8_canonical_addr(exec2_list[i].offset); ret = __copy_to_user(&user_exec_list[i].offset, &exec2_list[i].offset, sizeof(user_exec_list[i].offset)); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f4cd01df40db..56f4f2e58d53 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -140,8 +140,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) #endif /* Early VLV doesn't have this */ - if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && - dev->pdev->revision < 0xb) { + if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); return 0; } @@ -770,10 +769,10 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, scratch_pte); } else { - uint64_t templ4, pml4e; + uint64_t pml4e; struct i915_page_directory_pointer *pdp; - gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { + gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { gen8_ppgtt_clear_pte_range(vm, pdp, start, length, scratch_pte); } @@ -839,10 +838,10 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, cache_level); } else { struct i915_page_directory_pointer *pdp; - uint64_t templ4, pml4e; + uint64_t pml4e; uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; - gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { + gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, start, cache_level); } @@ -1020,10 +1019,9 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, { struct drm_device *dev = vm->dev; struct i915_page_table *pt; - uint64_t temp; uint32_t pde; - gen8_for_each_pde(pt, pd, start, length, temp, pde) { + gen8_for_each_pde(pt, pd, start, length, pde) { /* Don't reallocate page tables */ if (test_bit(pde, pd->used_pdes)) { /* Scratch is never allocated this way */ @@ -1082,13 +1080,12 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, { struct drm_device *dev = vm->dev; struct i915_page_directory *pd; - uint64_t temp; uint32_t pdpe; uint32_t pdpes = I915_PDPES_PER_PDP(dev); WARN_ON(!bitmap_empty(new_pds, pdpes)); - gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { + gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { if (test_bit(pdpe, pdp->used_pdpes)) continue; @@ -1136,12 +1133,11 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, { struct drm_device *dev = vm->dev; struct i915_page_directory_pointer *pdp; - uint64_t temp; uint32_t pml4e; WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); - gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { + gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (!test_bit(pml4e, pml4->used_pml4es)) { pdp = alloc_pdp(dev); if (IS_ERR(pdp)) @@ -1225,7 +1221,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, struct i915_page_directory *pd; const uint64_t orig_start = start; const uint64_t orig_length = length; - uint64_t temp; uint32_t pdpe; uint32_t pdpes = I915_PDPES_PER_PDP(dev); int ret; @@ -1252,7 +1247,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, } /* For every page directory referenced, allocate page tables */ - gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { + gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); if (ret) @@ -1264,7 +1259,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, /* Allocations have completed successfully, so set the bitmaps, and do * the mappings. */ - gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { + gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { gen8_pde_t *const page_directory = kmap_px(pd); struct i915_page_table *pt; uint64_t pd_len = length; @@ -1274,7 +1269,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, /* Every pd should be allocated, we just did that above. */ WARN_ON(!pd); - gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { + gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { /* Same reasoning as pd */ WARN_ON(!pt); WARN_ON(!pd_len); @@ -1311,6 +1306,8 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, err_out: while (pdpe--) { + unsigned long temp; + for_each_set_bit(temp, new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES), I915_PDES) free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); @@ -1333,7 +1330,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); struct i915_page_directory_pointer *pdp; - uint64_t temp, pml4e; + uint64_t pml4e; int ret = 0; /* Do the pml4 allocations first, so we don't need to track the newly @@ -1352,7 +1349,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, "The allocation has spanned more than 512GB. " "It is highly likely this is incorrect."); - gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { + gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { WARN_ON(!pdp); ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); @@ -1392,10 +1389,9 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, struct seq_file *m) { struct i915_page_directory *pd; - uint64_t temp; uint32_t pdpe; - gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { + gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { struct i915_page_table *pt; uint64_t pd_len = length; uint64_t pd_start = start; @@ -1405,7 +1401,7 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, continue; seq_printf(m, "\tPDPE #%d\n", pdpe); - gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { + gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { uint32_t pte; gen8_pte_t *pt_vaddr; @@ -1455,11 +1451,11 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) if (!USES_FULL_48BIT_PPGTT(vm->dev)) { gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); } else { - uint64_t templ4, pml4e; + uint64_t pml4e; struct i915_pml4 *pml4 = &ppgtt->pml4; struct i915_page_directory_pointer *pdp; - gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) { + gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (!test_bit(pml4e, pml4->used_pml4es)) continue; @@ -2355,6 +2351,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, int i = 0; struct sg_page_iter sg_iter; dma_addr_t addr = 0; /* shut up gcc */ + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_dma_address(sg_iter.sg) + @@ -2381,6 +2380,34 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, */ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); +} + +struct insert_entries { + struct i915_address_space *vm; + struct sg_table *st; + uint64_t start; + enum i915_cache_level level; + u32 flags; +}; + +static int gen8_ggtt_insert_entries__cb(void *_arg) +{ + struct insert_entries *arg = _arg; + gen8_ggtt_insert_entries(arg->vm, arg->st, + arg->start, arg->level, arg->flags); + return 0; +} + +static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm, + struct sg_table *st, + uint64_t start, + enum i915_cache_level level, + u32 flags) +{ + struct insert_entries arg = { vm, st, start, level, flags }; + stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); } /* @@ -2401,6 +2428,9 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, int i = 0; struct sg_page_iter sg_iter; dma_addr_t addr = 0; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); @@ -2425,6 +2455,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, */ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } static void gen8_ggtt_clear_range(struct i915_address_space *vm, @@ -2439,6 +2471,9 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; int i; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); if (WARN(num_entries > max_entries, "First entry = %d; Num entries = %d (max=%d)\n", @@ -2451,6 +2486,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, for (i = 0; i < num_entries; i++) gen8_set_pte(>t_base[i], scratch_pte); readl(gtt_base); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } static void gen6_ggtt_clear_range(struct i915_address_space *vm, @@ -2465,6 +2502,9 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; int i; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); if (WARN(num_entries > max_entries, "First entry = %d; Num entries = %d (max=%d)\n", @@ -2477,6 +2517,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); readl(gtt_base); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } static void i915_ggtt_insert_entries(struct i915_address_space *vm, @@ -2484,11 +2526,17 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm, uint64_t start, enum i915_cache_level cache_level, u32 unused) { + struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned int flags = (cache_level == I915_CACHE_NONE) ? AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); + } static void i915_ggtt_clear_range(struct i915_address_space *vm, @@ -2496,9 +2544,16 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm, uint64_t length, bool unused) { + struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); + intel_gtt_clear_range(first_entry, num_entries); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } static int ggtt_bind_vma(struct i915_vma *vma, @@ -2531,26 +2586,6 @@ static int ggtt_bind_vma(struct i915_vma *vma, return 0; } -struct ggtt_bind_vma__cb { - struct i915_vma *vma; - enum i915_cache_level cache_level; - u32 flags; -}; - -static int ggtt_bind_vma__cb(void *_arg) -{ - struct ggtt_bind_vma__cb *arg = _arg; - return ggtt_bind_vma(arg->vma, arg->cache_level, arg->flags); -} - -static int ggtt_bind_vma__BKL(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct ggtt_bind_vma__cb arg = { vma, cache_level, flags }; - return stop_machine(ggtt_bind_vma__cb, &arg, NULL); -} - static int aliasing_gtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) @@ -3019,8 +3054,8 @@ static int gen8_gmch_probe(struct drm_device *dev, dev_priv->gtt.base.bind_vma = ggtt_bind_vma; dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; - if (IS_CHERRYVIEW(dev)) - dev_priv->gtt.base.bind_vma = ggtt_bind_vma__BKL; + if (IS_CHERRYVIEW(dev_priv)) + dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL; return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 877c32c78a6a..b448ad832dcf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -455,32 +455,29 @@ static inline uint32_t gen6_pde_index(uint32_t addr) * between from start until start + length. On gen8+ it simply iterates * over every page directory entry in a page directory. */ -#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \ - for (iter = gen8_pde_index(start); \ - length > 0 && iter < I915_PDES ? \ - (pt = (pd)->page_table[iter]), 1 : 0; \ - iter++, \ - temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \ - temp = min(temp, length), \ - start += temp, length -= temp) +#define gen8_for_each_pde(pt, pd, start, length, iter) \ + for (iter = gen8_pde_index(start); \ + length > 0 && iter < I915_PDES && \ + (pt = (pd)->page_table[iter], true); \ + ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \ + temp = min(temp - start, length); \ + start += temp, length -= temp; }), ++iter) -#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \ - for (iter = gen8_pdpe_index(start); \ - length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \ - (pd = (pdp)->page_directory[iter]), 1 : 0; \ - iter++, \ - temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \ - temp = min(temp, length), \ - start += temp, length -= temp) +#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \ + for (iter = gen8_pdpe_index(start); \ + length > 0 && iter < I915_PDPES_PER_PDP(dev) && \ + (pd = (pdp)->page_directory[iter], true); \ + ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \ + temp = min(temp - start, length); \ + start += temp, length -= temp; }), ++iter) -#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \ - for (iter = gen8_pml4e_index(start); \ - length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \ - (pdp = (pml4)->pdps[iter]), 1 : 0; \ - iter++, \ - temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \ - temp = min(temp, length), \ - start += temp, length -= temp) +#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \ + for (iter = gen8_pml4e_index(start); \ + length > 0 && iter < GEN8_PML4ES_PER_PML4 && \ + (pdp = (pml4)->pdps[iter], true); \ + ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \ + temp = min(temp - start, length); \ + start += temp, length -= temp; }), ++iter) static inline uint32_t gen8_pte_index(uint64_t address) { diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 5026a6267a88..fc7e6d5c6251 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -103,7 +103,7 @@ static int render_state_setup(struct render_state *so) if (ret) return ret; - page = sg_page(so->obj->pages->sgl); + page = i915_gem_object_get_dirty_page(so->obj, 0); d = kmap(page); while (i < rodata->batch_items) { diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index ed9f1002ab36..05aa7e61cbe0 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -86,7 +86,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) return -EINVAL; intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - spin_lock(&dev_priv->guc.host2guc_lock); dev_priv->guc.action_count += 1; dev_priv->guc.action_cmd = data[0]; @@ -119,7 +118,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) } dev_priv->guc.action_status = status; - spin_unlock(&dev_priv->guc.host2guc_lock); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; @@ -292,16 +290,12 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc) const uint32_t cacheline_size = cache_line_size(); uint32_t offset; - spin_lock(&guc->host2guc_lock); - /* Doorbell uses a single cache line within a page */ offset = offset_in_page(guc->db_cacheline); /* Moving to next cache line to reduce contention */ guc->db_cacheline += cacheline_size; - spin_unlock(&guc->host2guc_lock); - DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n", offset, guc->db_cacheline, cacheline_size); @@ -322,13 +316,11 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority) const uint16_t end = start + half; uint16_t id; - spin_lock(&guc->host2guc_lock); id = find_next_zero_bit(guc->doorbell_bitmap, end, start); if (id == end) id = GUC_INVALID_DOORBELL_ID; else bitmap_set(guc->doorbell_bitmap, id, 1); - spin_unlock(&guc->host2guc_lock); DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", hi_pri ? "high" : "normal", id); @@ -338,9 +330,7 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority) static void release_doorbell(struct intel_guc *guc, uint16_t id) { - spin_lock(&guc->host2guc_lock); bitmap_clear(guc->doorbell_bitmap, id, 1); - spin_unlock(&guc->host2guc_lock); } /* @@ -487,16 +477,13 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset) struct guc_process_desc *desc; void *base; u32 size = sizeof(struct guc_wq_item); - int ret = 0, timeout_counter = 200; + int ret = -ETIMEDOUT, timeout_counter = 200; base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); desc = base + gc->proc_desc_offset; while (timeout_counter-- > 0) { - ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head, - gc->wq_size) >= size, 1); - - if (!ret) { + if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { *offset = gc->wq_tail; /* advance the tail for next workqueue item */ @@ -505,7 +492,11 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset) /* this will break the loop */ timeout_counter = 0; + ret = 0; } + + if (timeout_counter) + usleep_range(1000, 2000); }; kunmap_atomic(base); @@ -577,7 +568,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq) WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); @@ -597,15 +588,12 @@ int i915_guc_submit(struct i915_guc_client *client, { struct intel_guc *guc = client->guc; enum intel_ring_id ring_id = rq->ring->id; - unsigned long flags; int q_ret, b_ret; /* Need this because of the deferred pin ctx and ring */ /* Shall we move this right after ring is pinned? */ lr_context_update(rq); - spin_lock_irqsave(&client->wq_lock, flags); - q_ret = guc_add_workqueue_item(client, rq); if (q_ret == 0) b_ret = guc_ring_doorbell(client); @@ -620,12 +608,8 @@ int i915_guc_submit(struct i915_guc_client *client, } else { client->retcode = 0; } - spin_unlock_irqrestore(&client->wq_lock, flags); - - spin_lock(&guc->host2guc_lock); guc->submissions[ring_id] += 1; guc->last_seqno[ring_id] = rq->seqno; - spin_unlock(&guc->host2guc_lock); return q_ret; } @@ -677,7 +661,7 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, /** * gem_release_guc_obj() - Release gem object allocated for GuC usage * @obj: gem obj to be released - */ + */ static void gem_release_guc_obj(struct drm_i915_gem_object *obj) { if (!obj) @@ -768,7 +752,6 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, client->client_obj = obj; client->wq_offset = GUC_DB_SIZE; client->wq_size = GUC_WQ_SIZE; - spin_lock_init(&client->wq_lock); client->doorbell_offset = select_doorbell_cacheline(guc); @@ -871,8 +854,6 @@ int i915_guc_submission_init(struct drm_device *dev) if (!guc->ctx_pool_obj) return -ENOMEM; - spin_lock_init(&dev_priv->guc.host2guc_lock); - ida_init(&guc->ctx_ids); guc_create_log(guc); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c8ba94968aaf..fa8afa7860ae 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -215,9 +215,9 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -static void ilk_update_display_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) +void ilk_update_display_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) { uint32_t new_val; @@ -239,18 +239,6 @@ static void ilk_update_display_irq(struct drm_i915_private *dev_priv, } } -void -ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) -{ - ilk_update_display_irq(dev_priv, mask, mask); -} - -void -ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) -{ - ilk_update_display_irq(dev_priv, mask, 0); -} - /** * ilk_update_gt_irq - update GTIMR * @dev_priv: driver private @@ -300,11 +288,11 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) } /** - * snb_update_pm_irq - update GEN6_PMIMR - * @dev_priv: driver private - * @interrupt_mask: mask of interrupt bits to update - * @enabled_irq_mask: mask of interrupt bits to enable - */ + * snb_update_pm_irq - update GEN6_PMIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, uint32_t enabled_irq_mask) @@ -418,11 +406,11 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) } /** - * bdw_update_port_irq - update DE port interrupt - * @dev_priv: driver private - * @interrupt_mask: mask of interrupt bits to update - * @enabled_irq_mask: mask of interrupt bits to enable - */ + * bdw_update_port_irq - update DE port interrupt + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, uint32_t enabled_irq_mask) @@ -449,6 +437,38 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, } } +/** + * bdw_update_pipe_irq - update DE pipe interrupt + * @dev_priv: driver private + * @pipe: pipe whose interrupt to update + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, + enum pipe pipe, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + uint32_t new_val; + + assert_spin_locked(&dev_priv->irq_lock); + + WARN_ON(enabled_irq_mask & ~interrupt_mask); + + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; + + new_val = dev_priv->de_irq_mask[pipe]; + new_val &= ~interrupt_mask; + new_val |= (~enabled_irq_mask & interrupt_mask); + + if (new_val != dev_priv->de_irq_mask[pipe]) { + dev_priv->de_irq_mask[pipe] = new_val; + I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); + } +} + /** * ibx_display_interrupt_update - update SDEIMR * @dev_priv: driver private @@ -561,7 +581,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, { u32 enable_mask; - if (IS_VALLEYVIEW(dev_priv->dev)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, status_mask); else @@ -575,7 +595,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, { u32 enable_mask; - if (IS_VALLEYVIEW(dev_priv->dev)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, status_mask); else @@ -1083,6 +1103,14 @@ static void gen6_pm_rps_work(struct work_struct *work) spin_unlock_irq(&dev_priv->irq_lock); return; } + + /* + * The RPS work is synced during runtime suspend, we don't require a + * wakeref. TODO: instead of disabling the asserts make sure that we + * always hold an RPM reference while the work is running. + */ + DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); + pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ @@ -1095,7 +1123,7 @@ static void gen6_pm_rps_work(struct work_struct *work) WARN_ON(pm_iir & ~dev_priv->pm_rps_events); if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) - return; + goto out; mutex_lock(&dev_priv->rps.hw_lock); @@ -1150,6 +1178,8 @@ static void gen6_pm_rps_work(struct work_struct *work) intel_set_rps(dev_priv->dev, new_delay); mutex_unlock(&dev_priv->rps.hw_lock); +out: + ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); } @@ -1703,7 +1733,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) */ POSTING_READ(PORT_HOTPLUG_STAT); - if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { + if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; if (hotplug_trigger) { @@ -1738,6 +1768,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + while (true) { /* Find, clear, then process each source of interrupt */ @@ -1772,6 +1805,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) } out: + enable_rpm_wakeref_asserts(dev_priv); + return ret; } @@ -1785,6 +1820,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + for (;;) { master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; iir = I915_READ(VLV_IIR); @@ -1815,6 +1853,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) POSTING_READ(GEN8_MASTER_IRQ); } + enable_rpm_wakeref_asserts(dev_priv); + return ret; } @@ -1824,8 +1864,24 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; + /* + * Somehow the PCH doesn't seem to really ack the interrupt to the CPU + * unless we touch the hotplug register, even if hotplug_trigger is + * zero. Not acking leads to "The master control interrupt lied (SDE)!" + * errors. + */ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + if (!hotplug_trigger) { + u32 mask = PORTA_HOTPLUG_STATUS_MASK | + PORTD_HOTPLUG_STATUS_MASK | + PORTC_HOTPLUG_STATUS_MASK | + PORTB_HOTPLUG_STATUS_MASK; + dig_hotplug_reg &= ~mask; + } + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); + if (!hotplug_trigger) + return; intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, hpd, @@ -1840,8 +1896,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; - if (hotplug_trigger) - ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); + ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -1934,8 +1989,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; - if (hotplug_trigger) - ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); + ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -2131,6 +2185,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + /* We get interrupts on unclaimed registers, so check for this before we * do any I915_{READ,WRITE}. */ intel_uncore_check_errors(dev); @@ -2189,6 +2246,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) POSTING_READ(SDEIER); } + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + enable_rpm_wakeref_asserts(dev_priv); + return ret; } @@ -2221,6 +2281,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + if (INTEL_INFO(dev_priv)->gen >= 9) aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; @@ -2228,7 +2291,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; if (!master_ctl) - return IRQ_NONE; + goto out; I915_WRITE_FW(GEN8_MASTER_IRQ, 0); @@ -2363,6 +2426,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); POSTING_READ_FW(GEN8_MASTER_IRQ); +out: + enable_rpm_wakeref_asserts(dev_priv); + return ret; } @@ -2645,7 +2711,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_enable_display_irq(dev_priv, bit); + ilk_enable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; @@ -2670,10 +2736,9 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; - I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); - POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); + bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + return 0; } @@ -2700,7 +2765,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_disable_display_irq(dev_priv, bit); + ilk_disable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -2721,9 +2786,7 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; - I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); - POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); + bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -2962,6 +3025,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!i915.enable_hangcheck) return; + /* + * The hangcheck work is synced during runtime suspend, we don't + * require a wakeref. TODO: instead of disabling the asserts make + * sure that we hold a reference when this work is running. + */ + DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); + for_each_ring(ring, dev_priv, i) { u64 acthd; u32 seqno; @@ -3053,13 +3123,18 @@ static void i915_hangcheck_elapsed(struct work_struct *work) } } - if (rings_hung) - return i915_handle_error(dev, true, "Ring hung"); + if (rings_hung) { + i915_handle_error(dev, true, "Ring hung"); + goto out; + } if (busy_count) /* Reset timer case chip hangs without another request * being added */ i915_queue_hangcheck(dev); + +out: + ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); } void i915_queue_hangcheck(struct drm_device *dev) @@ -3452,7 +3527,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) * setup is guaranteed to run in single-threaded context. But we * need it to make the assert_spin_locked happy. */ spin_lock_irq(&dev_priv->irq_lock); - ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); + ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); spin_unlock_irq(&dev_priv->irq_lock); } @@ -3851,13 +3926,18 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) u16 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + irqreturn_t ret; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + + ret = IRQ_NONE; iir = I915_READ16(IIR); if (iir == 0) - return IRQ_NONE; + goto out; while (iir & ~flip_mask) { /* Can't rely on pipestat interrupt bit in iir as it might @@ -3906,8 +3986,12 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) iir = new_iir; } + ret = IRQ_HANDLED; - return IRQ_HANDLED; +out: + enable_rpm_wakeref_asserts(dev_priv); + + return ret; } static void i8xx_irq_uninstall(struct drm_device * dev) @@ -4036,6 +4120,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + iir = I915_READ(IIR); do { bool irq_received = (iir & ~flip_mask) != 0; @@ -4118,6 +4205,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) iir = new_iir; } while (iir & ~flip_mask); + enable_rpm_wakeref_asserts(dev_priv); + return ret; } @@ -4257,6 +4346,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + iir = I915_READ(IIR); for (;;) { @@ -4342,6 +4434,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) iir = new_iir; } + enable_rpm_wakeref_asserts(dev_priv); + return ret; } @@ -4385,7 +4479,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); /* Let's track the enabled rps events */ - if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) + if (IS_VALLEYVIEW(dev_priv)) /* WaGsvRC0ResidencyMethod:vlv */ dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; else diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1a12d44b9710..007ae83a4086 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -855,31 +855,31 @@ enum skl_disp_power_wells { * * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is * digital port D (CHV) or port A (BXT). - */ -/* - * Dual channel PHY (VLV/CHV/BXT) - * --------------------------------- - * | CH0 | CH1 | - * | CMN/PLL/REF | CMN/PLL/REF | - * |---------------|---------------| Display PHY - * | PCS01 | PCS23 | PCS01 | PCS23 | - * |-------|-------|-------|-------| - * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3| - * --------------------------------- - * | DDI0 | DDI1 | DP/HDMI ports - * --------------------------------- * - * Single channel PHY (CHV/BXT) - * ----------------- - * | CH0 | - * | CMN/PLL/REF | - * |---------------| Display PHY - * | PCS01 | PCS23 | - * |-------|-------| - * |TX0|TX1|TX2|TX3| - * ----------------- - * | DDI2 | DP/HDMI port - * ----------------- + * + * Dual channel PHY (VLV/CHV/BXT) + * --------------------------------- + * | CH0 | CH1 | + * | CMN/PLL/REF | CMN/PLL/REF | + * |---------------|---------------| Display PHY + * | PCS01 | PCS23 | PCS01 | PCS23 | + * |-------|-------|-------|-------| + * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3| + * --------------------------------- + * | DDI0 | DDI1 | DP/HDMI ports + * --------------------------------- + * + * Single channel PHY (CHV/BXT) + * ----------------- + * | CH0 | + * | CMN/PLL/REF | + * |---------------| Display PHY + * | PCS01 | PCS23 | + * |-------|-------| + * |TX0|TX1|TX2|TX3| + * ----------------- + * | DDI2 | DP/HDMI port + * ----------------- */ #define DPIO_DEVFN 0 @@ -2972,6 +2972,13 @@ enum skl_disp_power_wells { #define OGAMC1 _MMIO(0x30020) #define OGAMC0 _MMIO(0x30024) +/* + * GEN9 clock gating regs + */ +#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530) +#define PWM2_GATING_DIS (1 << 14) +#define PWM1_GATING_DIS (1 << 13) + /* * Display engine regs */ @@ -7320,6 +7327,7 @@ enum skl_disp_power_wells { #define SBI_READY (0x0<<0) /* SBI offsets */ +#define SBI_SSCDIVINTPHASE 0x0200 #define SBI_SSCDIVINTPHASE6 0x0600 #define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) @@ -7327,6 +7335,7 @@ enum skl_disp_power_wells { #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) #define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) +#define SBI_SSCDITHPHASE 0x0204 #define SBI_SSCCTL 0x020c #define SBI_SSCCTL6 0x060C #define SBI_SSCCTL_PATHALT (1<<3) @@ -7549,6 +7558,7 @@ enum skl_disp_power_wells { #define SFUSE_STRAP _MMIO(0xc2014) #define SFUSE_STRAP_FUSE_LOCK (1<<13) #define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) +#define SFUSE_STRAP_CRT_DISABLED (1<<6) #define SFUSE_STRAP_DDIB_DETECTED (1<<2) #define SFUSE_STRAP_DDIC_DETECTED (1<<1) #define SFUSE_STRAP_DDID_DETECTED (1<<0) @@ -7706,7 +7716,7 @@ enum skl_disp_power_wells { #define BXT_DSI_PLL_RATIO_MAX 0x7D #define BXT_DSI_PLL_RATIO_MIN 0x22 #define BXT_DSI_PLL_RATIO_MASK 0xFF -#define BXT_REF_CLOCK_KHZ 19500 +#define BXT_REF_CLOCK_KHZ 19200 #define BXT_DSI_PLL_ENABLE _MMIO(0x46080) #define BXT_DSI_PLL_DO_ENABLE (1 << 31) @@ -8092,9 +8102,7 @@ enum skl_disp_power_wells { #define RGB_FLIP_TO_BGR (1 << 2) #define BXT_PIPE_SELECT_MASK (7 << 7) -#define BXT_PIPE_SELECT_C (2 << 7) -#define BXT_PIPE_SELECT_B (1 << 7) -#define BXT_PIPE_SELECT_A (0 << 7) +#define BXT_PIPE_SELECT(pipe) ((pipe) << 7) #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) #define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 2d9182189422..a2aa09ce3202 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev) dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); - } else if (!IS_VALLEYVIEW(dev)) { + } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); @@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev) I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - } else if (!IS_VALLEYVIEW(dev)) { + } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index f929c61f0fa2..37e3f0ddf8e0 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -49,7 +49,7 @@ static u32 calc_residency(struct drm_device *dev, intel_runtime_pm_get(dev_priv); /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { units = 1; div = dev_priv->czclk_freq; @@ -284,7 +284,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); - if (IS_VALLEYVIEW(dev_priv->dev)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 freq; freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); @@ -598,7 +598,7 @@ void i915_setup_sysfs(struct drm_device *dev) if (ret) DRM_ERROR("RC6p residency sysfs setup failed\n"); } - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { ret = sysfs_merge_group(&dev->primary->kdev->kobj, &media_rc6_attr_group); if (ret) @@ -619,7 +619,7 @@ void i915_setup_sysfs(struct drm_device *dev) } ret = 0; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs); else if (INTEL_INFO(dev)->gen >= 6) ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs); @@ -635,7 +635,7 @@ void i915_setup_sysfs(struct drm_device *dev) void i915_teardown_sysfs(struct drm_device *dev) { sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr); - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs); else sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs); diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 643f342de33b..d0b1c9afa35e 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -95,6 +95,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->update_pipe = false; crtc_state->disable_lp_wm = false; + crtc_state->disable_cxsr = false; + crtc_state->wm_changed = false; return &crtc_state->base; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index de465f2876d1..31f6d212fb1b 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -262,7 +262,8 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder) tmp |= AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_UPPER_N_MASK; tmp &= ~AUD_CONFIG_LOWER_N_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) || + intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST)) tmp |= AUD_CONFIG_N_VALUE_INDEX; I915_WRITE(HSW_AUD_CFG(pipe), tmp); @@ -375,7 +376,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) if (HAS_PCH_IBX(dev_priv->dev)) { aud_config = IBX_AUD_CFG(pipe); aud_cntrl_st2 = IBX_AUD_CNTL_ST2; - } else if (IS_VALLEYVIEW(dev_priv)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { aud_config = VLV_AUD_CFG(pipe); aud_cntrl_st2 = VLV_AUD_CNTL_ST2; } else { @@ -435,7 +436,8 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, aud_config = IBX_AUD_CFG(pipe); aud_cntl_st = IBX_AUD_CNTL_ST(pipe); aud_cntrl_st2 = IBX_AUD_CNTL_ST2; - } else if (IS_VALLEYVIEW(connector->dev)) { + } else if (IS_VALLEYVIEW(connector->dev) || + IS_CHERRYVIEW(connector->dev)) { hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); aud_config = VLV_AUD_CFG(pipe); aud_cntl_st = VLV_AUD_CNTL_ST(pipe); @@ -474,7 +476,8 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) || + intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST)) tmp |= AUD_CONFIG_N_VALUE_INDEX; else tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); @@ -512,7 +515,8 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) /* ELD Conn_Type */ connector->eld[5] &= ~(3 << 2); - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST)) connector->eld[5] |= (1 << 2); connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; @@ -567,7 +571,7 @@ void intel_init_audio(struct drm_device *dev) if (IS_G4X(dev)) { dev_priv->display.audio_codec_enable = g4x_audio_codec_enable; dev_priv->display.audio_codec_disable = g4x_audio_codec_disable; - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { dev_priv->display.audio_codec_enable = ilk_audio_codec_enable; dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) { diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index ce82f9c7df24..eba3e0f87181 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -24,7 +24,7 @@ * Eric Anholt * */ -#include + #include #include #include @@ -332,10 +332,10 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, drm_mode_debug_printmodeline(panel_fixed_mode); } -static int intel_bios_ssc_frequency(struct drm_device *dev, +static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv, bool alternate) { - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_INFO(dev_priv)->gen) { case 2: return alternate ? 66667 : 48000; case 3: @@ -350,26 +350,29 @@ static void parse_general_features(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { - struct drm_device *dev = dev_priv->dev; const struct bdb_general_features *general; general = find_section(bdb, BDB_GENERAL_FEATURES); - if (general) { - dev_priv->vbt.int_tv_support = general->int_tv_support; + if (!general) + return; + + dev_priv->vbt.int_tv_support = general->int_tv_support; + /* int_crt_support can't be trusted on earlier platforms */ + if (bdb->version >= 155 && + (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv))) dev_priv->vbt.int_crt_support = general->int_crt_support; - dev_priv->vbt.lvds_use_ssc = general->enable_ssc; - dev_priv->vbt.lvds_ssc_freq = - intel_bios_ssc_frequency(dev, general->ssc_freq); - dev_priv->vbt.display_clock_mode = general->display_clock_mode; - dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; - DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", - dev_priv->vbt.int_tv_support, - dev_priv->vbt.int_crt_support, - dev_priv->vbt.lvds_use_ssc, - dev_priv->vbt.lvds_ssc_freq, - dev_priv->vbt.display_clock_mode, - dev_priv->vbt.fdi_rx_polarity_inverted); - } + dev_priv->vbt.lvds_use_ssc = general->enable_ssc; + dev_priv->vbt.lvds_ssc_freq = + intel_bios_ssc_frequency(dev_priv, general->ssc_freq); + dev_priv->vbt.display_clock_mode = general->display_clock_mode; + dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; + DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", + dev_priv->vbt.int_tv_support, + dev_priv->vbt.int_crt_support, + dev_priv->vbt.lvds_use_ssc, + dev_priv->vbt.lvds_ssc_freq, + dev_priv->vbt.display_clock_mode, + dev_priv->vbt.fdi_rx_polarity_inverted); } static void @@ -1054,10 +1057,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, static void parse_ddi_ports(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { - struct drm_device *dev = dev_priv->dev; enum port port; - if (!HAS_DDI(dev)) + if (!HAS_DDI(dev_priv)) return; if (!dev_priv->vbt.child_dev_num) @@ -1170,7 +1172,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv, static void init_vbt_defaults(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; enum port port; dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; @@ -1195,8 +1196,8 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) * Core/SandyBridge/IvyBridge use alternative (120MHz) reference * clock for LVDS. */ - dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, - !HAS_PCH_SPLIT(dev)); + dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv, + !HAS_PCH_SPLIT(dev_priv)); DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq); for (port = PORT_A; port < I915_MAX_PORTS; port++) { @@ -1211,88 +1212,79 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) } } -static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) +static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) { - DRM_DEBUG_KMS("Falling back to manually reading VBT from " - "VBIOS ROM for %s\n", - id->ident); - return 1; + const void *_vbt = vbt; + + return _vbt + vbt->bdb_offset; } -static const struct dmi_system_id intel_no_opregion_vbt[] = { - { - .callback = intel_no_opregion_vbt_callback, - .ident = "ThinkCentre A57", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), - }, - }, - { } -}; - -static const struct bdb_header *validate_vbt(const void *base, - size_t size, - const void *_vbt, - const char *source) +/** + * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT + * @buf: pointer to a buffer to validate + * @size: size of the buffer + * + * Returns true on valid VBT. + */ +bool intel_bios_is_valid_vbt(const void *buf, size_t size) { - size_t offset = _vbt - base; - const struct vbt_header *vbt = _vbt; + const struct vbt_header *vbt = buf; const struct bdb_header *bdb; - if (offset + sizeof(struct vbt_header) > size) { + if (!vbt) + return false; + + if (sizeof(struct vbt_header) > size) { DRM_DEBUG_DRIVER("VBT header incomplete\n"); - return NULL; + return false; } if (memcmp(vbt->signature, "$VBT", 4)) { DRM_DEBUG_DRIVER("VBT invalid signature\n"); - return NULL; + return false; } - offset += vbt->bdb_offset; - if (offset + sizeof(struct bdb_header) > size) { + if (vbt->bdb_offset + sizeof(struct bdb_header) > size) { DRM_DEBUG_DRIVER("BDB header incomplete\n"); - return NULL; + return false; } - bdb = base + offset; - if (offset + bdb->bdb_size > size) { + bdb = get_bdb_header(vbt); + if (vbt->bdb_offset + bdb->bdb_size > size) { DRM_DEBUG_DRIVER("BDB incomplete\n"); - return NULL; + return false; } - DRM_DEBUG_KMS("Using VBT from %s: %20s\n", - source, vbt->signature); - return bdb; + return vbt; } -static const struct bdb_header *find_vbt(void __iomem *bios, size_t size) +static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) { - const struct bdb_header *bdb = NULL; size_t i; /* Scour memory looking for the VBT signature. */ for (i = 0; i + 4 < size; i++) { - if (ioread32(bios + i) == *((const u32 *) "$VBT")) { - /* - * This is the one place where we explicitly discard the - * address space (__iomem) of the BIOS/VBT. From now on - * everything is based on 'base', and treated as regular - * memory. - */ - void *_bios = (void __force *) bios; + void *vbt; - bdb = validate_vbt(_bios, size, _bios + i, "PCI ROM"); - break; - } + if (ioread32(bios + i) != *((const u32 *) "$VBT")) + continue; + + /* + * This is the one place where we explicitly discard the address + * space (__iomem) of the BIOS/VBT. + */ + vbt = (void __force *) bios + i; + if (intel_bios_is_valid_vbt(vbt, size - i)) + return vbt; + + break; } - return bdb; + return NULL; } /** - * intel_parse_bios - find VBT and initialize settings from the BIOS + * intel_bios_init - find VBT and initialize settings from the BIOS * @dev: DRM device * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers @@ -1301,37 +1293,39 @@ static const struct bdb_header *find_vbt(void __iomem *bios, size_t size) * Returns 0 on success, nonzero on failure. */ int -intel_parse_bios(struct drm_device *dev) +intel_bios_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct pci_dev *pdev = dev->pdev; - const struct bdb_header *bdb = NULL; + struct pci_dev *pdev = dev_priv->dev->pdev; + const struct vbt_header *vbt = dev_priv->opregion.vbt; + const struct bdb_header *bdb; u8 __iomem *bios = NULL; - if (HAS_PCH_NOP(dev)) + if (HAS_PCH_NOP(dev_priv)) return -ENODEV; init_vbt_defaults(dev_priv); - /* XXX Should this validation be moved to intel_opregion.c? */ - if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) - bdb = validate_vbt(dev_priv->opregion.header, OPREGION_SIZE, - dev_priv->opregion.vbt, "OpRegion"); - - if (bdb == NULL) { + if (!vbt) { size_t size; bios = pci_map_rom(pdev, &size); if (!bios) return -1; - bdb = find_vbt(bios, size); - if (!bdb) { + vbt = find_vbt(bios, size); + if (!vbt) { pci_unmap_rom(pdev, bios); return -1; } + + DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n"); } + bdb = get_bdb_header(vbt); + + DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n", + (int)sizeof(vbt->signature), vbt->signature, bdb->version); + /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); parse_general_definitions(dev_priv, bdb); diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 7ec8c9aefb84..54eac1003a1e 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -28,8 +28,6 @@ #ifndef _I830_BIOS_H_ #define _I830_BIOS_H_ -#include - struct vbt_header { u8 signature[20]; /**< Always starts with 'VBT$' */ u16 version; /**< decimal */ @@ -588,8 +586,6 @@ struct bdb_psr { struct psr_table psr_table[16]; } __packed; -int intel_parse_bios(struct drm_device *dev); - /* * Driver<->VBIOS interaction occurs through scratch bits in * GR18 & SWF*. diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 27b3e610e8f0..9c89df1af036 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -777,11 +777,37 @@ void intel_crt_init(struct drm_device *dev) struct intel_crt *crt; struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; + i915_reg_t adpa_reg; + u32 adpa; /* Skip machines without VGA that falsely report hotplug events */ if (dmi_check_system(intel_no_crt)) return; + if (HAS_PCH_SPLIT(dev)) + adpa_reg = PCH_ADPA; + else if (IS_VALLEYVIEW(dev)) + adpa_reg = VLV_ADPA; + else + adpa_reg = ADPA; + + adpa = I915_READ(adpa_reg); + if ((adpa & ADPA_DAC_ENABLE) == 0) { + /* + * On some machines (some IVB at least) CRT can be + * fused off, but there's no known fuse bit to + * indicate that. On these machine the ADPA register + * works normally, except the DAC enable bit won't + * take. So the only way to tell is attempt to enable + * it and see what happens. + */ + I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE | + ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); + if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0) + return; + I915_WRITE(adpa_reg, adpa); + } + crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); if (!crt) return; @@ -798,7 +824,7 @@ void intel_crt_init(struct drm_device *dev) &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); intel_connector_attach_encoder(intel_connector, &crt->base); @@ -815,15 +841,10 @@ void intel_crt_init(struct drm_device *dev) connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - if (HAS_PCH_SPLIT(dev)) - crt->adpa_reg = PCH_ADPA; - else if (IS_VALLEYVIEW(dev)) - crt->adpa_reg = VLV_ADPA; - else - crt->adpa_reg = ADPA; + crt->adpa_reg = adpa_reg; crt->base.compute_config = intel_crt_compute_config; - if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) { + if (HAS_PCH_SPLIT(dev)) { crt->base.disable = pch_disable_crt; crt->base.post_disable = pch_post_disable_crt; } else { diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 6c6a6695e99c..9bb63a85997a 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -166,6 +166,14 @@ struct stepping_info { char substepping; }; +/* + * Kabylake derivated from Skylake H0, so SKL H0 + * is the right firmware for KBL A0 (revid 0). + */ +static const struct stepping_info kbl_stepping_info[] = { + {'H', '0'}, {'I', '0'} +}; + static const struct stepping_info skl_stepping_info[] = { {'A', '0'}, {'B', '0'}, {'C', '0'}, {'D', '0'}, {'E', '0'}, {'F', '0'}, @@ -182,7 +190,10 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de const struct stepping_info *si; unsigned int size; - if (IS_SKYLAKE(dev)) { + if (IS_KABYLAKE(dev)) { + size = ARRAY_SIZE(kbl_stepping_info); + si = kbl_stepping_info; + } else if (IS_SKYLAKE(dev)) { size = ARRAY_SIZE(skl_stepping_info); si = skl_stepping_info; } else if (IS_BROXTON(dev)) { diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 59deb0d85533..e6408e5583d7 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -353,10 +353,10 @@ static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, { const struct ddi_buf_trans *ddi_translations; - if (IS_SKL_ULX(dev)) { + if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { ddi_translations = skl_y_ddi_translations_dp; *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); - } else if (IS_SKL_ULT(dev)) { + } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { ddi_translations = skl_u_ddi_translations_dp; *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); } else { @@ -373,7 +373,7 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; const struct ddi_buf_trans *ddi_translations; - if (IS_SKL_ULX(dev)) { + if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { if (dev_priv->edp_low_vswing) { ddi_translations = skl_y_ddi_translations_edp; *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); @@ -381,7 +381,7 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev, ddi_translations = skl_y_ddi_translations_dp; *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); } - } else if (IS_SKL_ULT(dev)) { + } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { if (dev_priv->edp_low_vswing) { ddi_translations = skl_u_ddi_translations_edp; *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); @@ -408,7 +408,7 @@ skl_get_buf_trans_hdmi(struct drm_device *dev, { const struct ddi_buf_trans *ddi_translations; - if (IS_SKL_ULX(dev)) { + if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { ddi_translations = skl_y_ddi_translations_hdmi; *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); } else { @@ -675,15 +675,16 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) temp = I915_READ(DP_TP_STATUS(PORT_E)); if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { DRM_DEBUG_KMS("FDI link training done on step %d\n", i); + break; + } - /* Enable normal pixel sending for FDI */ - I915_WRITE(DP_TP_CTL(PORT_E), - DP_TP_CTL_FDI_AUTOTRAIN | - DP_TP_CTL_LINK_TRAIN_NORMAL | - DP_TP_CTL_ENHANCED_FRAME_ENABLE | - DP_TP_CTL_ENABLE); - - return; + /* + * Leave things enabled even if we failed to train FDI. + * Results in less fireworks from the state checker. + */ + if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) { + DRM_ERROR("FDI link training failed!\n"); + break; } temp = I915_READ(DDI_BUF_CTL(PORT_E)); @@ -712,7 +713,12 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(FDI_RX_MISC(PIPE_A)); } - DRM_ERROR("FDI link training failed!\n"); + /* Enable normal pixel sending for FDI */ + I915_WRITE(DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_LINK_TRAIN_NORMAL | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_ENABLE); } void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) @@ -3108,6 +3114,19 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) I915_WRITE(FDI_RX_CTL(PIPE_A), val); } +bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc) +{ + u32 temp; + + if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { + temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); + if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) + return true; + } + return false; +} + void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -3151,7 +3170,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->has_hdmi_sink = true; intel_hdmi = enc_to_intel_hdmi(&encoder->base); - if (intel_hdmi->infoframe_enabled(&encoder->base)) + if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) pipe_config->has_infoframe = true; break; case TRANS_DDI_MODE_SELECT_DVI: @@ -3168,11 +3187,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, break; } - if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { - temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); - if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) - pipe_config->has_audio = true; - } + pipe_config->has_audio = + intel_ddi_is_audio_enabled(dev_priv, intel_crtc); if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { @@ -3284,7 +3300,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) encoder = &intel_encoder->base; drm_encoder_init(dev, encoder, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); intel_encoder->compute_config = intel_ddi_compute_config; intel_encoder->enable = intel_enable_ddi; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 622d30c6c37f..2f00828ccc6e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -44,6 +44,8 @@ #include #include #include +#include +#include /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { @@ -186,7 +188,7 @@ int intel_hrawclk(struct drm_device *dev) uint32_t clkcfg; /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) return 200; clkcfg = I915_READ(CLKCFG); @@ -214,7 +216,7 @@ int intel_hrawclk(struct drm_device *dev) static void intel_update_czclk(struct drm_i915_private *dev_priv) { - if (!IS_VALLEYVIEW(dev_priv)) + if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) return; dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", @@ -715,11 +717,12 @@ static bool intel_PLL_is_valid(struct drm_device *dev, if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) INTELPllInvalid("m1 out of range\n"); - if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) + if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && + !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) if (clock->m1 <= clock->m2) INTELPllInvalid("m1 <= m2\n"); - if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) { + if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) { if (clock->p < limit->p.min || limit->p.max < clock->p) INTELPllInvalid("p out of range\n"); if (clock->m < limit->m.min || limit->m.max < clock->m) @@ -1304,7 +1307,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) panel_pipe = PIPE_B; /* XXX: else fix for eDP */ - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { /* presumably write lock depends on pipe, not port select */ pp_reg = VLV_PIPE_PP_CONTROL(pipe); panel_pipe = pipe; @@ -1422,7 +1425,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, "plane %d assertion failure, should be off on pipe %c but is still active\n", sprite, pipe_name(pipe)); } - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { for_each_sprite(dev_priv, pipe, sprite) { u32 val = I915_READ(SPCNTR(pipe, sprite)); I915_STATE_WARN(val & SP_ENABLE, @@ -1605,9 +1608,6 @@ static void vlv_enable_pll(struct intel_crtc *crtc, assert_pipe_disabled(dev_priv, crtc->pipe); - /* No really, not for ILK+ */ - BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); - /* PLL is protected by panel, make sure we can write it */ if (IS_MOBILE(dev_priv->dev)) assert_panel_unlocked(dev_priv, crtc->pipe); @@ -1645,8 +1645,6 @@ static void chv_enable_pll(struct intel_crtc *crtc, assert_pipe_disabled(dev_priv, crtc->pipe); - BUG_ON(!IS_CHERRYVIEW(dev_priv->dev)); - mutex_lock(&dev_priv->sb_lock); /* Enable back the 10bit clock to display controller */ @@ -2131,7 +2129,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) * need the check. */ if (HAS_GMCH_DISPLAY(dev_priv->dev)) - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) + if (crtc->config->has_dsi_encoder) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); @@ -2318,7 +2316,7 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) if (INTEL_INFO(dev_priv)->gen >= 9) return 256 * 1024; else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || - IS_VALLEYVIEW(dev_priv)) + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return 128 * 1024; else if (INTEL_INFO(dev_priv)->gen >= 4) return 4 * 1024; @@ -3189,8 +3187,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->fbc.disable_fbc) - dev_priv->fbc.disable_fbc(dev_priv); + if (dev_priv->fbc.deactivate) + dev_priv->fbc.deactivate(dev_priv); dev_priv->display.update_primary_plane(crtc, fb, x, y); @@ -3953,6 +3951,21 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) return 0; } +static void lpt_disable_iclkip(struct drm_i915_private *dev_priv) +{ + u32 temp; + + I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); + + mutex_lock(&dev_priv->sb_lock); + + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + temp |= SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + /* Program iCLKIP clock to the desired frequency */ static void lpt_program_iclkip(struct drm_crtc *crtc) { @@ -3962,18 +3975,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) u32 divsel, phaseinc, auxdiv, phasedir = 0; u32 temp; - mutex_lock(&dev_priv->sb_lock); - - /* It is necessary to ungate the pixclk gate prior to programming - * the divisors, and gate it back when it is done. - */ - I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); - - /* Disable SSCCTL */ - intel_sbi_write(dev_priv, SBI_SSCCTL6, - intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | - SBI_SSCCTL_DISABLE, - SBI_ICLK); + lpt_disable_iclkip(dev_priv); /* 20MHz is a corner case which is out of range for the 7-bit divisor */ if (clock == 20000) { @@ -3991,7 +3993,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) u32 iclk_pi_range = 64; u32 desired_divisor, msb_divisor_value, pi_value; - desired_divisor = (iclk_virtual_root_freq / clock); + desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock); msb_divisor_value = desired_divisor / iclk_pi_range; pi_value = desired_divisor % iclk_pi_range; @@ -4013,6 +4015,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) phasedir, phaseinc); + mutex_lock(&dev_priv->sb_lock); + /* Program SSCDIVINTPHASE6 */ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; @@ -4034,12 +4038,12 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) temp &= ~SBI_SSCCTL_DISABLE; intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); + mutex_unlock(&dev_priv->sb_lock); + /* Wait for initialization time */ udelay(24); I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); - - mutex_unlock(&dev_priv->sb_lock); } static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, @@ -4152,6 +4156,12 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) I915_WRITE(FDI_RX_TUSIZE1(pipe), I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); + /* + * Sometimes spurious CPU pipe underruns happen during FDI + * training, at least with VGA+HDMI cloning. Suppress them. + */ + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); + /* For PCH output, training FDI link */ dev_priv->display.fdi_link_train(crtc); @@ -4185,6 +4195,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) intel_fdi_normal_train(crtc); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); + /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { const struct drm_display_mode *adjusted_mode = @@ -4643,7 +4655,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc) return; if (HAS_GMCH_DISPLAY(dev_priv->dev)) { - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) + if (intel_crtc->config->has_dsi_encoder) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); @@ -4713,14 +4725,6 @@ intel_post_enable_primary(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - /* - * BDW signals flip done immediately if the plane - * is disabled, even if the plane enable is already - * armed to occur at the next vblank :( - */ - if (IS_BROADWELL(dev)) - intel_wait_for_vblank(dev, pipe); - /* * FIXME IPS should be fine as long as one plane is * enabled, but in practice it seems to have problems @@ -4798,22 +4802,22 @@ intel_pre_disable_primary(struct drm_crtc *crtc) static void intel_post_plane_update(struct intel_crtc *crtc) { struct intel_crtc_atomic_commit *atomic = &crtc->atomic; + struct intel_crtc_state *pipe_config = + to_intel_crtc_state(crtc->base.state); struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; if (atomic->wait_vblank) intel_wait_for_vblank(dev, crtc->pipe); intel_frontbuffer_flip(dev, atomic->fb_bits); - if (atomic->disable_cxsr) - crtc->wm.cxsr_allowed = true; + crtc->wm.cxsr_allowed = true; - if (crtc->atomic.update_wm_post) + if (pipe_config->wm_changed && pipe_config->base.active) intel_update_watermarks(&crtc->base); if (atomic->update_fbc) - intel_fbc_update(dev_priv); + intel_fbc_update(crtc); if (atomic->post_enable_primary) intel_post_enable_primary(&crtc->base); @@ -4826,9 +4830,11 @@ static void intel_pre_plane_update(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc_atomic_commit *atomic = &crtc->atomic; + struct intel_crtc_state *pipe_config = + to_intel_crtc_state(crtc->base.state); if (atomic->disable_fbc) - intel_fbc_disable_crtc(crtc); + intel_fbc_deactivate(crtc); if (crtc->atomic.disable_ips) hsw_disable_ips(crtc); @@ -4836,10 +4842,13 @@ static void intel_pre_plane_update(struct intel_crtc *crtc) if (atomic->pre_disable_primary) intel_pre_disable_primary(&crtc->base); - if (atomic->disable_cxsr) { + if (pipe_config->disable_cxsr) { crtc->wm.cxsr_allowed = false; intel_set_memory_cxsr(dev_priv, false); } + + if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed) + intel_update_watermarks(&crtc->base); } static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) @@ -4936,6 +4945,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) intel_wait_for_vblank(dev, pipe); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); + + intel_fbc_enable(intel_crtc); } /* IPS only exists on ULT machines and is tied to pipe A. */ @@ -4953,7 +4964,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) int pipe = intel_crtc->pipe, hsw_workaround_pipe; struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state); - bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); if (WARN_ON(intel_crtc->active)) return; @@ -4986,10 +4996,12 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); + if (intel_crtc->config->has_pch_encoder) + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); + else + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); + for_each_encoder_on_crtc(dev, crtc, encoder) { - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); if (encoder->pre_enable) encoder->pre_enable(encoder); } @@ -4997,7 +5009,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) dev_priv->display.fdi_link_train(crtc); - if (!is_dsi) + if (!intel_crtc->config->has_dsi_encoder) intel_ddi_enable_pipe_clock(intel_crtc); if (INTEL_INFO(dev)->gen >= 9) @@ -5012,7 +5024,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_crtc_load_lut(crtc); intel_ddi_set_pipe_settings(crtc); - if (!is_dsi) + if (!intel_crtc->config->has_dsi_encoder) intel_ddi_enable_transcoder_func(crtc); intel_update_watermarks(crtc); @@ -5021,7 +5033,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) lpt_pch_enable(crtc); - if (intel_crtc->config->dp_encoder_is_mst && !is_dsi) + if (intel_crtc->config->dp_encoder_is_mst) intel_ddi_set_vc_payload_alloc(crtc, true); assert_vblank_disabled(crtc); @@ -5032,9 +5044,13 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_opregion_notify_encoder(encoder, true); } - if (intel_crtc->config->has_pch_encoder) + if (intel_crtc->config->has_pch_encoder) { + intel_wait_for_vblank(dev, pipe); + intel_wait_for_vblank(dev, pipe); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, true); + } /* If we change the relative order between pipe/planes enabling, we need * to change the workaround. */ @@ -5043,6 +5059,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_wait_for_vblank(dev, hsw_workaround_pipe); intel_wait_for_vblank(dev, hsw_workaround_pipe); } + + intel_fbc_enable(intel_crtc); } static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) @@ -5077,12 +5095,22 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); + /* + * Sometimes spurious CPU pipe underruns happen when the + * pipe is already disabled, but FDI RX/TX is still enabled. + * Happens at least with VGA+HDMI cloning. Suppress them. + */ + if (intel_crtc->config->has_pch_encoder) + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); + intel_disable_pipe(intel_crtc); ironlake_pfit_disable(intel_crtc, false); - if (intel_crtc->config->has_pch_encoder) + if (intel_crtc->config->has_pch_encoder) { ironlake_fdi_disable(crtc); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); + } for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->post_disable) @@ -5113,6 +5141,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) } intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); + + intel_fbc_disable_crtc(intel_crtc); } static void haswell_crtc_disable(struct drm_crtc *crtc) @@ -5122,7 +5152,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; - bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); if (intel_crtc->config->has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, @@ -5141,7 +5170,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) if (intel_crtc->config->dp_encoder_is_mst) intel_ddi_set_vc_payload_alloc(crtc, false); - if (!is_dsi) + if (!intel_crtc->config->has_dsi_encoder) intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); if (INTEL_INFO(dev)->gen >= 9) @@ -5149,21 +5178,23 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) else ironlake_pfit_disable(intel_crtc, false); - if (!is_dsi) + if (!intel_crtc->config->has_dsi_encoder) intel_ddi_disable_pipe_clock(intel_crtc); - if (intel_crtc->config->has_pch_encoder) { - lpt_disable_pch_transcoder(dev_priv); - intel_ddi_fdi_disable(crtc); - } - for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->post_disable) encoder->post_disable(encoder); - if (intel_crtc->config->has_pch_encoder) + if (intel_crtc->config->has_pch_encoder) { + lpt_disable_pch_transcoder(dev_priv); + lpt_disable_iclkip(dev_priv); + intel_ddi_fdi_disable(crtc); + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, true); + } + + intel_fbc_disable_crtc(intel_crtc); } static void i9xx_pfit_enable(struct intel_crtc *crtc) @@ -5229,10 +5260,6 @@ static enum intel_display_power_domain port_to_aux_power_domain(enum port port) } } -#define for_each_power_domain(domain, mask) \ - for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ - if ((1 << (domain)) & (mask)) - enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder) { @@ -5445,7 +5472,7 @@ static void intel_update_cdclk(struct drm_device *dev) * BSpec erroneously claims we should aim for 4MHz, but * in fact 1MHz is the correct frequency. */ - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { /* * Program the gmbus_freq based on the cdclk frequency. * BSpec erroneously claims we should aim for 4MHz, but @@ -6155,13 +6182,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - bool is_dsi; if (WARN_ON(intel_crtc->active)) return; - is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); - if (intel_crtc->config->has_dp_encoder) intel_dp_set_m_n(intel_crtc, M1_N1); @@ -6184,7 +6208,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) if (encoder->pre_pll_enable) encoder->pre_pll_enable(encoder); - if (!is_dsi) { + if (!intel_crtc->config->has_dsi_encoder) { if (IS_CHERRYVIEW(dev)) { chv_prepare_pll(intel_crtc, intel_crtc->config); chv_enable_pll(intel_crtc, intel_crtc->config); @@ -6263,6 +6287,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); + + intel_fbc_enable(intel_crtc); } static void i9xx_pfit_disable(struct intel_crtc *crtc) @@ -6310,7 +6336,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (encoder->post_disable) encoder->post_disable(encoder); - if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) { + if (!intel_crtc->config->has_dsi_encoder) { if (IS_CHERRYVIEW(dev)) chv_disable_pll(dev_priv, pipe); else if (IS_VALLEYVIEW(dev)) @@ -6325,6 +6351,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (!IS_GEN2(dev)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); + + intel_fbc_disable_crtc(intel_crtc); } static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) @@ -6464,13 +6492,11 @@ static void intel_connector_check_state(struct intel_connector *connector) int intel_connector_init(struct intel_connector *connector) { - struct drm_connector_state *connector_state; + drm_atomic_helper_connector_reset(&connector->base); - connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL); - if (!connector_state) + if (!connector->base.state) return -ENOMEM; - connector->base.state = connector_state; return 0; } @@ -7171,7 +7197,7 @@ static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, WARN_ON(!crtc_state->base.state); - if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) { refclk = 100000; } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { @@ -7870,7 +7896,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) pipeconf |= PIPECONF_DOUBLE_WIDE; /* only g4x and later have fancy bpc/dither controls */ - if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { + if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { /* Bspec claims that we can't use dithering for 30bpp pipes. */ if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) pipeconf |= PIPECONF_DITHER_EN | @@ -7910,7 +7936,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) } else pipeconf |= PIPECONF_PROGRESSIVE; - if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range) + if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && + intel_crtc->config->limited_color_range) pipeconf |= PIPECONF_COLOR_RANGE_SELECT; I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); @@ -7925,8 +7952,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, int refclk, num_connectors = 0; intel_clock_t clock; bool ok; - bool is_dsi = false; - struct intel_encoder *encoder; const intel_limit_t *limit; struct drm_atomic_state *state = crtc_state->base.state; struct drm_connector *connector; @@ -7936,26 +7961,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != &crtc->base) - continue; - - encoder = to_intel_encoder(connector_state->best_encoder); - - switch (encoder->type) { - case INTEL_OUTPUT_DSI: - is_dsi = true; - break; - default: - break; - } - - num_connectors++; - } - - if (is_dsi) + if (crtc_state->has_dsi_encoder) return 0; + for_each_connector_in_state(state, connector, connector_state, i) { + if (connector_state->crtc == &crtc->base) + num_connectors++; + } + if (!crtc_state->clock_set) { refclk = i9xx_get_refclk(crtc_state, num_connectors); @@ -8171,7 +8184,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (!(tmp & PIPECONF_ENABLE)) return false; - if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { + if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { switch (tmp & PIPECONF_BPC_MASK) { case PIPECONF_6BPC: pipe_config->pipe_bpp = 18; @@ -8187,7 +8200,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, } } - if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT)) + if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && + (tmp & PIPECONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; if (INTEL_INFO(dev)->gen < 4) @@ -8215,7 +8229,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = 1; } pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); - if (!IS_VALLEYVIEW(dev)) { + if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { /* * DPLL_DVO_2X_MODE must be enabled for both DPLLs * on 830. Filter it out here so that we don't @@ -8567,6 +8581,67 @@ static void lpt_disable_clkout_dp(struct drm_device *dev) mutex_unlock(&dev_priv->sb_lock); } +#define BEND_IDX(steps) ((50 + (steps)) / 5) + +static const uint16_t sscdivintphase[] = { + [BEND_IDX( 50)] = 0x3B23, + [BEND_IDX( 45)] = 0x3B23, + [BEND_IDX( 40)] = 0x3C23, + [BEND_IDX( 35)] = 0x3C23, + [BEND_IDX( 30)] = 0x3D23, + [BEND_IDX( 25)] = 0x3D23, + [BEND_IDX( 20)] = 0x3E23, + [BEND_IDX( 15)] = 0x3E23, + [BEND_IDX( 10)] = 0x3F23, + [BEND_IDX( 5)] = 0x3F23, + [BEND_IDX( 0)] = 0x0025, + [BEND_IDX( -5)] = 0x0025, + [BEND_IDX(-10)] = 0x0125, + [BEND_IDX(-15)] = 0x0125, + [BEND_IDX(-20)] = 0x0225, + [BEND_IDX(-25)] = 0x0225, + [BEND_IDX(-30)] = 0x0325, + [BEND_IDX(-35)] = 0x0325, + [BEND_IDX(-40)] = 0x0425, + [BEND_IDX(-45)] = 0x0425, + [BEND_IDX(-50)] = 0x0525, +}; + +/* + * Bend CLKOUT_DP + * steps -50 to 50 inclusive, in steps of 5 + * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) + * change in clock period = -(steps / 10) * 5.787 ps + */ +static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) +{ + uint32_t tmp; + int idx = BEND_IDX(steps); + + if (WARN_ON(steps % 5 != 0)) + return; + + if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) + return; + + mutex_lock(&dev_priv->sb_lock); + + if (steps % 10 != 0) + tmp = 0xAAAAAAAB; + else + tmp = 0x00000000; + intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); + + tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); + tmp &= 0xffff0000; + tmp |= sscdivintphase[idx]; + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +#undef BEND_IDX + static void lpt_init_pch_refclk(struct drm_device *dev) { struct intel_encoder *encoder; @@ -8582,10 +8657,12 @@ static void lpt_init_pch_refclk(struct drm_device *dev) } } - if (has_vga) + if (has_vga) { + lpt_bend_clkout_dp(to_i915(dev), 0); lpt_enable_clkout_dp(dev, true, true); - else + } else { lpt_disable_clkout_dp(dev); + } } /* @@ -8948,7 +9025,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS); + is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS); WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); @@ -9722,14 +9799,10 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) else cdclk = 337500; - /* - * FIXME move the cdclk caclulation to - * compute_config() so we can fail gracegully. - */ if (cdclk > dev_priv->max_cdclk_freq) { - DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", - cdclk, dev_priv->max_cdclk_freq); - cdclk = dev_priv->max_cdclk_freq; + DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", + cdclk, dev_priv->max_cdclk_freq); + return -EINVAL; } to_intel_atomic_state(state)->cdclk = cdclk; @@ -9824,6 +9897,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, break; case PORT_CLK_SEL_SPLL: pipe_config->shared_dpll = DPLL_ID_SPLL; + break; } } @@ -11203,6 +11277,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, return true; else if (i915.enable_execlists) return true; + else if (obj->base.dma_buf && + !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, + false)) + return true; else return ring != i915_gem_request_get_ring(obj->last_write_req); } @@ -11317,6 +11395,9 @@ static void intel_mmio_flip_work_func(struct work_struct *work) { struct intel_mmio_flip *mmio_flip = container_of(work, struct intel_mmio_flip, work); + struct intel_framebuffer *intel_fb = + to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); + struct drm_i915_gem_object *obj = intel_fb->obj; if (mmio_flip->req) { WARN_ON(__i915_wait_request(mmio_flip->req, @@ -11326,6 +11407,12 @@ static void intel_mmio_flip_work_func(struct work_struct *work) i915_gem_request_unreference__unlocked(mmio_flip->req); } + /* For framebuffer backed by dmabuf, wait for fence */ + if (obj->base.dma_buf) + WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, + false, false, + MAX_SCHEDULE_TIMEOUT) < 0); + intel_do_mmio_flip(mmio_flip); kfree(mmio_flip); } @@ -11527,7 +11614,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { ring = &dev_priv->ring[BCS]; if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) /* vlv: DISPLAY_FLIP fails to change tiling */ @@ -11596,7 +11683,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); - intel_fbc_disable_crtc(intel_crtc); + intel_fbc_deactivate(intel_crtc); intel_frontbuffer_flip_prepare(dev, to_intel_plane(primary)->frontbuffer_bit); @@ -11683,9 +11770,14 @@ static bool intel_wm_need_update(struct drm_plane *plane, struct intel_plane_state *cur = to_intel_plane_state(plane->state); /* Update watermarks on tiling or size changes. */ - if (!plane->state->fb || !state->fb || - plane->state->fb->modifier[0] != state->fb->modifier[0] || - plane->state->rotation != state->rotation || + if (new->visible != cur->visible) + return true; + + if (!cur->base.fb || !new->base.fb) + return false; + + if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] || + cur->base.rotation != new->base.rotation || drm_rect_width(&new->src) != drm_rect_width(&cur->src) || drm_rect_height(&new->src) != drm_rect_height(&cur->src) || drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || @@ -11708,6 +11800,7 @@ static bool needs_scaling(struct intel_plane_state *state) int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { + struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); struct drm_crtc *crtc = crtc_state->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *plane = plane_state->plane; @@ -11754,25 +11847,17 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, plane->base.id, was_visible, visible, turn_off, turn_on, mode_changed); - if (turn_on) { - intel_crtc->atomic.update_wm_pre = true; - /* must disable cxsr around plane enable/disable */ - if (plane->type != DRM_PLANE_TYPE_CURSOR) { - intel_crtc->atomic.disable_cxsr = true; - /* to potentially re-enable cxsr */ - intel_crtc->atomic.wait_vblank = true; - intel_crtc->atomic.update_wm_post = true; - } - } else if (turn_off) { - intel_crtc->atomic.update_wm_post = true; + if (turn_on || turn_off) { + pipe_config->wm_changed = true; + /* must disable cxsr around plane enable/disable */ if (plane->type != DRM_PLANE_TYPE_CURSOR) { if (is_crtc_enabled) intel_crtc->atomic.wait_vblank = true; - intel_crtc->atomic.disable_cxsr = true; + pipe_config->disable_cxsr = true; } } else if (intel_wm_need_update(plane, plane_state)) { - intel_crtc->atomic.update_wm_pre = true; + pipe_config->wm_changed = true; } if (visible || was_visible) @@ -11917,7 +12002,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } if (mode_changed && !crtc_state->active) - intel_crtc->atomic.update_wm_post = true; + pipe_config->wm_changed = true; if (mode_changed && crtc_state->enable && dev_priv->display.crtc_compute_clock && @@ -12008,7 +12093,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, struct drm_connector_state *connector_state; int bpp, i; - if ((IS_G4X(dev) || IS_VALLEYVIEW(dev))) + if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) bpp = 10*3; else if (INTEL_INFO(dev)->gen >= 5) bpp = 12*3; @@ -12603,6 +12688,8 @@ intel_pipe_config_compare(struct drm_device *dev, } else PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); + PIPE_CONF_CHECK_I(has_dsi_encoder); + PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); @@ -12620,7 +12707,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(pixel_multiplier); PIPE_CONF_CHECK_I(has_hdmi_sink); if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || - IS_VALLEYVIEW(dev)) + IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) PIPE_CONF_CHECK_I(limited_color_range); PIPE_CONF_CHECK_I(has_infoframe); @@ -13399,6 +13486,16 @@ static int intel_atomic_commit(struct drm_device *dev, dev_priv->display.crtc_disable(crtc); intel_crtc->active = false; intel_disable_shared_dpll(intel_crtc); + + /* + * Underruns don't always raise + * interrupts, so check manually. + */ + intel_check_cpu_fifo_underruns(dev_priv); + intel_check_pch_fifo_underruns(dev_priv); + + if (!crtc->state->active) + intel_update_watermarks(crtc); } } @@ -13668,6 +13765,19 @@ intel_prepare_plane_fb(struct drm_plane *plane, return ret; } + /* For framebuffer backed by dmabuf, wait for fence */ + if (obj && obj->base.dma_buf) { + long lret; + + lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, + false, true, + MAX_SCHEDULE_TIMEOUT); + if (lret == -ERESTARTSYS) + return lret; + + WARN(lret < 0, "waiting returns %li\n", lret); + } + if (!obj) { ret = 0; } else if (plane->type == DRM_PLANE_TYPE_CURSOR && @@ -13823,9 +13933,6 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, to_intel_crtc_state(old_crtc_state); bool modeset = needs_modeset(crtc->state); - if (intel_crtc->atomic.update_wm_pre) - intel_update_watermarks(crtc); - /* Perform vblank evasion around commit operation */ intel_pipe_update_start(intel_crtc); @@ -13920,7 +14027,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, drm_universal_plane_init(dev, &primary->base, 0, &intel_plane_funcs, intel_primary_formats, num_formats, - DRM_PLANE_TYPE_PRIMARY); + DRM_PLANE_TYPE_PRIMARY, NULL); if (INTEL_INFO(dev)->gen >= 4) intel_create_rotation_property(dev, primary); @@ -14072,7 +14179,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, &intel_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), - DRM_PLANE_TYPE_CURSOR); + DRM_PLANE_TYPE_CURSOR, NULL); if (INTEL_INFO(dev)->gen >= 4) { if (!dev->mode_config.rotation_property) @@ -14149,7 +14256,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) goto fail; ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, - cursor, &intel_crtc_funcs); + cursor, &intel_crtc_funcs, NULL); if (ret) goto fail; @@ -14275,7 +14382,14 @@ static bool intel_crt_present(struct drm_device *dev) if (IS_CHERRYVIEW(dev)) return false; - if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) + if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) + return false; + + /* DDI E can't be used if DDI A requires 4 lanes */ + if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) + return false; + + if (!dev_priv->vbt.int_crt_support) return false; return true; @@ -14360,7 +14474,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_D) & DP_DETECTED) intel_dp_init(dev, PCH_DP_D, PORT_D); - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { /* * The DP_DETECTED bit is the latched state of the DDC * SDA pin at boot. However since eDP doesn't require DDC @@ -14509,7 +14623,7 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, * pixels and 32K bytes." */ return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); - } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) { + } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { return 32*1024; } else if (gen >= 4) { if (fb_modifier == I915_FORMAT_MOD_X_TILED) @@ -14613,7 +14727,8 @@ static int intel_framebuffer_init(struct drm_device *dev, } break; case DRM_FORMAT_ABGR8888: - if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) { + if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && + INTEL_INFO(dev)->gen < 9) { DRM_DEBUG("unsupported pixel format: %s\n", drm_get_format_name(mode_cmd->pixel_format)); return -EINVAL; @@ -14629,7 +14744,7 @@ static int intel_framebuffer_init(struct drm_device *dev, } break; case DRM_FORMAT_ABGR2101010: - if (!IS_VALLEYVIEW(dev)) { + if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { DRM_DEBUG("unsupported pixel format: %s\n", drm_get_format_name(mode_cmd->pixel_format)); return -EINVAL; @@ -14757,7 +14872,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.crtc_disable = ironlake_crtc_disable; dev_priv->display.update_primary_plane = ironlake_update_primary_plane; - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; @@ -14790,7 +14905,7 @@ static void intel_init_display(struct drm_device *dev) else if (IS_HASWELL(dev)) dev_priv->display.get_display_clock_speed = haswell_get_display_clock_speed; - else if (IS_VALLEYVIEW(dev)) + else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) dev_priv->display.get_display_clock_speed = valleyview_get_display_clock_speed; else if (IS_GEN5(dev)) @@ -14818,9 +14933,6 @@ static void intel_init_display(struct drm_device *dev) else if (IS_I945GM(dev) || IS_845G(dev)) dev_priv->display.get_display_clock_speed = i9xx_misc_get_display_clock_speed; - else if (IS_PINEVIEW(dev)) - dev_priv->display.get_display_clock_speed = - pnv_get_display_clock_speed; else if (IS_I915GM(dev)) dev_priv->display.get_display_clock_speed = i915gm_get_display_clock_speed; @@ -14851,7 +14963,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.modeset_calc_cdclk = broadwell_modeset_calc_cdclk; } - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { dev_priv->display.modeset_commit_cdclk = valleyview_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = @@ -15338,6 +15450,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0); crtc->base.state->active = crtc->active; crtc->base.enabled = crtc->active; + crtc->base.state->connector_mask = 0; /* Because we only establish the connector -> encoder -> * crtc links if something is active, this means the @@ -15540,7 +15653,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) for_each_intel_connector(dev, connector) { if (connector->get_hw_state(connector)) { connector->base.dpms = DRM_MODE_DPMS_ON; - connector->base.encoder = &connector->encoder->base; + + encoder = connector->encoder; + connector->base.encoder = &encoder->base; + + if (encoder->base.crtc && + encoder->base.crtc->state->active) { + /* + * This has to be done during hardware readout + * because anything calling .crtc_disable may + * rely on the connector_mask being accurate. + */ + encoder->base.crtc->state->connector_mask |= + 1 << drm_connector_index(&connector->base); + } + } else { connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.encoder = NULL; @@ -15625,7 +15752,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev) pll->on = false; } - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_wm_get_hw_state(dev); else if (IS_GEN9(dev)) skl_wm_get_hw_state(dev); @@ -15748,7 +15875,7 @@ void intel_connector_unregister(struct intel_connector *intel_connector) void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_connector *connector; + struct intel_connector *connector; intel_disable_gt_powersave(dev); @@ -15775,12 +15902,8 @@ void intel_modeset_cleanup(struct drm_device *dev) flush_scheduled_work(); /* destroy the backlight and sysfs files before encoders/connectors */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct intel_connector *intel_connector; - - intel_connector = to_intel_connector(connector); - intel_connector->unregister(intel_connector); - } + for_each_intel_connector(dev, connector) + connector->unregister(connector); drm_mode_config_cleanup(dev); @@ -15789,6 +15912,8 @@ void intel_modeset_cleanup(struct drm_device *dev) mutex_lock(&dev->struct_mutex); intel_cleanup_gt_powersave(dev); mutex_unlock(&dev->struct_mutex); + + intel_teardown_gmbus(dev); } /* diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index e1456ead5c53..796e3d313cb9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -389,8 +389,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) * We don't have power sequencer currently. * Pick one that's not used by other ports. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { struct intel_dp *tmp; if (encoder->type != INTEL_OUTPUT_EDP) @@ -517,7 +516,7 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) struct drm_device *dev = dev_priv->dev; struct intel_encoder *encoder; - if (WARN_ON(!IS_VALLEYVIEW(dev))) + if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))) return; /* @@ -530,7 +529,7 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) * should use them always. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + for_each_intel_encoder(dev, encoder) { struct intel_dp *intel_dp; if (encoder->type != INTEL_OUTPUT_EDP) @@ -582,7 +581,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, pps_lock(intel_dp); - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); i915_reg_t pp_ctrl_reg, pp_div_reg; u32 pp_div; @@ -610,7 +609,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); - if (IS_VALLEYVIEW(dev) && + if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && intel_dp->pps_pipe == INVALID_PIPE) return false; @@ -624,7 +623,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); - if (IS_VALLEYVIEW(dev) && + if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && intel_dp->pps_pipe == INVALID_PIPE) return false; @@ -681,7 +680,7 @@ static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * The clock divider is based off the hrawclk, and would like to run at * 2MHz. So, take the hrawclk value and divide by 2 and use that */ - return index ? 0 : intel_hrawclk(dev) / 2; + return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2); } static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) @@ -694,10 +693,10 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return 0; if (intel_dig_port->port == PORT_A) { - return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000); + return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000); } else { - return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); + return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2); } } @@ -711,7 +710,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) if (index) return 0; return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000); - } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { + } else if (HAS_PCH_LPT_H(dev_priv)) { /* Workaround for non-ULT HSW */ switch (index) { case 0: return 63; @@ -719,7 +718,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) default: return 0; } } else { - return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); + return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2); } } @@ -915,6 +914,27 @@ done: /* Unload any bytes sent back from the other side */ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); + + /* + * By BSpec: "Message sizes of 0 or >20 are not allowed." + * We have no idea of what happened so we return -EBUSY so + * drm layer takes care for the necessary retries. + */ + if (recv_bytes == 0 || recv_bytes > 20) { + DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n", + recv_bytes); + /* + * FIXME: This patch was created on top of a series that + * organize the retries at drm level. There EBUSY should + * also take care for 1ms wait before retrying. + * That aux retries re-org is still needed and after that is + * merged we remove this sleep from here. + */ + usleep_range(1000, 1500); + ret = -EBUSY; + goto out; + } + if (recv_bytes > recv_size) recv_bytes = recv_size; @@ -1723,7 +1743,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder) I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); } else { if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) && - crtc->config->limited_color_range) + !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range) intel_dp->DP |= DP_COLOR_RANGE_16_235; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) @@ -2418,7 +2438,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.flags |= flags; if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) && - tmp & DP_COLOR_RANGE_16_235) + !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; pipe_config->has_dp_encoder = true; @@ -2694,9 +2714,18 @@ static void intel_enable_dp(struct intel_encoder *encoder) pps_lock(intel_dp); - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_init_panel_power_sequencer(intel_dp); + /* + * We get an occasional spurious underrun between the port + * enable and vdd enable, when enabling port A eDP. + * + * FIXME: Not sure if this applies to (PCH) port D eDP as well + */ + if (port == PORT_A) + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); + intel_dp_enable_port(intel_dp); if (port == PORT_A && IS_GEN5(dev_priv)) { @@ -2714,9 +2743,12 @@ static void intel_enable_dp(struct intel_encoder *encoder) edp_panel_on(intel_dp); edp_panel_vdd_off(intel_dp, true); + if (port == PORT_A) + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); + pps_unlock(intel_dp); - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { unsigned int lane_mask = 0x0; if (IS_CHERRYVIEW(dev)) @@ -2817,8 +2849,7 @@ static void vlv_steal_power_sequencer(struct drm_device *dev, if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) return; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { struct intel_dp *intel_dp; enum port port; @@ -3206,7 +3237,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) if (dev_priv->edp_low_vswing && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; - } else if (IS_VALLEYVIEW(dev)) + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; else if (IS_GEN7(dev) && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; @@ -3247,7 +3278,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) default: return DP_TRAIN_PRE_EMPH_LEVEL_0; } - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: return DP_TRAIN_PRE_EMPH_LEVEL_3; @@ -4527,7 +4558,7 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv, return cpt_digital_port_connected(dev_priv, port); else if (IS_BROXTON(dev_priv)) return bxt_digital_port_connected(dev_priv, port); - else if (IS_VALLEYVIEW(dev_priv)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return vlv_digital_port_connected(dev_priv, port); else return g4x_digital_port_connected(dev_priv, port); @@ -4921,7 +4952,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder) * Read out the current power sequencer assignment, * in case the BIOS did something with it. */ - if (IS_VALLEYVIEW(encoder->dev)) + if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev)) vlv_initial_power_sequencer_setup(intel_dp); intel_edp_panel_vdd_sanitize(intel_dp); @@ -5281,7 +5312,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, /* Haswell doesn't have any port selection bits for the panel * power sequencer any more. */ - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { port_sel = PANEL_PORT_SELECT_VLV(port); } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { if (port == PORT_A) @@ -5393,12 +5424,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) val = I915_READ(reg); if (index > DRRS_HIGH_RR) { - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; else val |= PIPECONF_EDP_RR_MODE_SWITCH; } else { - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; else val &= ~PIPECONF_EDP_RR_MODE_SWITCH; @@ -5765,7 +5796,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, } mutex_unlock(&dev->mode_config.mutex); - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { intel_dp->edp_notifier.notifier_call = edp_notify_handler; register_reboot_notifier(&intel_dp->edp_notifier); @@ -5813,7 +5844,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, /* intel_dp vfuncs */ if (INTEL_INFO(dev)->gen >= 9) intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; - else if (IS_VALLEYVIEW(dev)) + else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; @@ -5848,8 +5879,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_encoder->type = INTEL_OUTPUT_EDP; /* eDP only on port B and/or C on vlv/chv */ - if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) && - port != PORT_B && port != PORT_C)) + if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && + is_edp(intel_dp) && port != PORT_B && port != PORT_C)) return false; DRM_DEBUG_KMS("Adding %s connector on port %c\n", @@ -5900,7 +5931,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, if (is_edp(intel_dp)) { pps_lock(intel_dp); intel_dp_init_panel_power_timestamps(intel_dp); - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_initial_power_sequencer_setup(intel_dp); else intel_dp_init_panel_power_sequencer(dev, intel_dp); @@ -5976,8 +6007,9 @@ intel_dp_init(struct drm_device *dev, intel_encoder = &intel_dig_port->base; encoder = &intel_encoder->base; - drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, - DRM_MODE_ENCODER_TMDS); + if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, + DRM_MODE_ENCODER_TMDS, NULL)) + goto err_encoder_init; intel_encoder->compute_config = intel_dp_compute_config; intel_encoder->disable = intel_disable_dp; @@ -6027,6 +6059,7 @@ intel_dp_init(struct drm_device *dev, err_init_connector: drm_encoder_cleanup(encoder); +err_encoder_init: kfree(intel_connector); err_connector_alloc: kfree(intel_dig_port); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 8c4e7dfe304c..fa0dabf578dc 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -78,6 +78,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, return false; } + if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port)) + pipe_config->has_audio = true; mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); pipe_config->pbn = mst_pbn; @@ -102,6 +104,11 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder) struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = encoder->base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int ret; DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); @@ -112,6 +119,10 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder) if (ret) { DRM_ERROR("failed to update payload %d\n", ret); } + if (intel_crtc->config->has_audio) { + intel_audio_codec_disable(encoder); + intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); + } } static void intel_mst_post_disable_dp(struct intel_encoder *encoder) @@ -208,6 +219,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder) struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); enum port port = intel_dig_port->port; int ret; @@ -220,6 +232,13 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder) ret = drm_dp_check_act_status(&intel_dp->mst_mgr); ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); + + if (crtc->config->has_audio) { + DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", + pipe_name(crtc->pipe)); + intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); + intel_audio_codec_enable(encoder); + } } static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, @@ -245,6 +264,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, pipe_config->has_dp_encoder = true; + pipe_config->has_audio = + intel_ddi_is_audio_enabled(dev_priv, crtc); + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; @@ -512,7 +534,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) drm_kms_helper_hotplug_event(dev); } -static struct drm_dp_mst_topology_cbs mst_cbs = { +static const struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, .register_connector = intel_dp_register_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector, @@ -536,7 +558,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_mst->primary = intel_dig_port; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, - DRM_MODE_ENCODER_DPMST); + DRM_MODE_ENCODER_DPMST, NULL); intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->crtc_mask = 0x7; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 86ce3c2ed79a..ea5415851c6e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -365,7 +365,9 @@ struct intel_crtc_state { #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ unsigned long quirks; - bool update_pipe; + bool update_pipe; /* can a fast modeset be performed? */ + bool disable_cxsr; + bool wm_changed; /* watermarks are updated */ /* Pipe source size (ie. panel fitter input size) * All planes will be positioned inside this space, @@ -393,6 +395,9 @@ struct intel_crtc_state { * accordingly. */ bool has_dp_encoder; + /* DSI has special cases */ + bool has_dsi_encoder; + /* Whether we should send NULL infoframes. Required for audio. */ bool has_hdmi_sink; @@ -528,9 +533,7 @@ struct intel_crtc_atomic_commit { /* Sleepable operations to perform before commit */ bool disable_fbc; bool disable_ips; - bool disable_cxsr; bool pre_disable_primary; - bool update_wm_pre, update_wm_post; /* Sleepable operations to perform after commit */ unsigned fb_bits; @@ -709,7 +712,8 @@ struct intel_hdmi { void (*set_infoframes)(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode); - bool (*infoframe_enabled)(struct drm_encoder *encoder); + bool (*infoframe_enabled)(struct drm_encoder *encoder, + const struct intel_crtc_state *pipe_config); }; struct intel_dp_mst_encoder; @@ -1009,6 +1013,8 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); void intel_ddi_fdi_disable(struct drm_crtc *crtc); +bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc); void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); struct intel_encoder * @@ -1317,9 +1323,11 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) #endif /* intel_fbc.c */ -bool intel_fbc_enabled(struct drm_i915_private *dev_priv); -void intel_fbc_update(struct drm_i915_private *dev_priv); +bool intel_fbc_is_active(struct drm_i915_private *dev_priv); +void intel_fbc_deactivate(struct intel_crtc *crtc); +void intel_fbc_update(struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); +void intel_fbc_enable(struct intel_crtc *crtc); void intel_fbc_disable(struct drm_i915_private *dev_priv); void intel_fbc_disable_crtc(struct intel_crtc *crtc); void intel_fbc_invalidate(struct drm_i915_private *dev_priv, @@ -1411,6 +1419,8 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv); void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv); void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); +const char * +intel_display_power_domain_str(enum intel_display_power_domain domain); bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); @@ -1420,6 +1430,89 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); void intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); + +static inline void +assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv) +{ + WARN_ONCE(dev_priv->pm.suspended, + "Device suspended during HW access\n"); +} + +static inline void +assert_rpm_wakelock_held(struct drm_i915_private *dev_priv) +{ + assert_rpm_device_not_suspended(dev_priv); + /* FIXME: Needs to be converted back to WARN_ONCE, but currently causes + * too much noise. */ + if (!atomic_read(&dev_priv->pm.wakeref_count)) + DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access"); +} + +static inline int +assert_rpm_atomic_begin(struct drm_i915_private *dev_priv) +{ + int seq = atomic_read(&dev_priv->pm.atomic_seq); + + assert_rpm_wakelock_held(dev_priv); + + return seq; +} + +static inline void +assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq) +{ + WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq, + "HW access outside of RPM atomic section\n"); +} + +/** + * disable_rpm_wakeref_asserts - disable the RPM assert checks + * @dev_priv: i915 device instance + * + * This function disable asserts that check if we hold an RPM wakelock + * reference, while keeping the device-not-suspended checks still enabled. + * It's meant to be used only in special circumstances where our rule about + * the wakelock refcount wrt. the device power state doesn't hold. According + * to this rule at any point where we access the HW or want to keep the HW in + * an active state we must hold an RPM wakelock reference acquired via one of + * the intel_runtime_pm_get() helpers. Currently there are a few special spots + * where this rule doesn't hold: the IRQ and suspend/resume handlers, the + * forcewake release timer, and the GPU RPS and hangcheck works. All other + * users should avoid using this function. + * + * Any calls to this function must have a symmetric call to + * enable_rpm_wakeref_asserts(). + */ +static inline void +disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) +{ + atomic_inc(&dev_priv->pm.wakeref_count); +} + +/** + * enable_rpm_wakeref_asserts - re-enable the RPM assert checks + * @dev_priv: i915 device instance + * + * This function re-enables the RPM assert checks after disabling them with + * disable_rpm_wakeref_asserts. It's meant to be used only in special + * circumstances otherwise its use should be avoided. + * + * Any calls to this function must have a symmetric call to + * disable_rpm_wakeref_asserts(). + */ +static inline void +enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) +{ + atomic_dec(&dev_priv->pm.wakeref_count); +} + +/* TODO: convert users of these to rely instead on proper RPM refcounting */ +#define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv) \ + disable_rpm_wakeref_asserts(dev_priv) + +#define ENABLE_RPM_WAKEREF_ASSERTS(dev_priv) \ + enable_rpm_wakeref_asserts(dev_priv) + void intel_runtime_pm_get(struct drm_i915_private *dev_priv); void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); void intel_runtime_pm_put(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index efb5a27dd49c..44742fa2f616 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -266,16 +266,18 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) } static bool intel_dsi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *config) + struct intel_crtc_state *pipe_config) { struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; - struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; DRM_DEBUG_KMS("\n"); + pipe_config->has_dsi_encoder = true; + if (fixed_mode) intel_fixed_panel_mode(fixed_mode, adjusted_mode); @@ -367,7 +369,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_dsi_device_ready(encoder); else if (IS_BROXTON(dev)) bxt_dsi_device_ready(encoder); @@ -462,6 +464,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder) intel_panel_enable_backlight(intel_dsi->attached_connector); } +static void intel_dsi_prepare(struct intel_encoder *intel_encoder); + static void intel_dsi_pre_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; @@ -474,13 +478,16 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) DRM_DEBUG_KMS("\n"); + intel_dsi_prepare(encoder); + intel_enable_dsi_pll(encoder); + /* Panel Enable over CRC PMIC */ if (intel_dsi->gpio_panel) gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1); msleep(intel_dsi->panel_on_delay); - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { /* * Disable DPOunit clock gating, can stall pipe * and we need DPLL REFA always enabled @@ -677,8 +684,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, * Enable bit does not get set. To check whether DSI Port C * was enabled in BIOS, check the Pipe B enable bit */ - if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && - (port == PORT_C)) + if (IS_VALLEYVIEW(dev) && port == PORT_C) dpi_enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; @@ -699,6 +705,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, u32 pclk = 0; DRM_DEBUG_KMS("\n"); + pipe_config->has_dsi_encoder = true; + /* * DPLL_MD is not used in case of DSI, reading will get some default value * set dpll_md = 0 @@ -707,7 +715,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, if (IS_BROXTON(encoder->base.dev)) pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp); - else if (IS_VALLEYVIEW(encoder->base.dev)) + else if (IS_VALLEYVIEW(encoder->base.dev) || + IS_CHERRYVIEW(encoder->base.dev)) pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); if (!pclk) @@ -860,7 +869,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) } for_each_dsi_port(port, intel_dsi->ports) { - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { /* * escape clock divider, 20MHz, shared for A and C. * device ready must be off when doing this! txclkesc? @@ -876,21 +885,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH); } else if (IS_BROXTON(dev)) { - /* - * FIXME: - * BXT can connect any PIPE to any MIPI port. - * Select the pipe based on the MIPI port read from - * VBT for now. Pick PIPE A for MIPI port A and C - * for port C. - */ + enum pipe pipe = intel_crtc->pipe; + tmp = I915_READ(MIPI_CTRL(port)); tmp &= ~BXT_PIPE_SELECT_MASK; - if (port == PORT_A) - tmp |= BXT_PIPE_SELECT_A; - else if (port == PORT_C) - tmp |= BXT_PIPE_SELECT_C; - + tmp |= BXT_PIPE_SELECT(pipe); I915_WRITE(MIPI_CTRL(port), tmp); } @@ -1026,15 +1026,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) } } -static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder) -{ - DRM_DEBUG_KMS("\n"); - - intel_dsi_prepare(encoder); - intel_enable_dsi_pll(encoder); - -} - static enum drm_connector_status intel_dsi_detect(struct drm_connector *connector, bool force) { @@ -1129,7 +1120,7 @@ void intel_dsi_init(struct drm_device *dev) if (!dev_priv->vbt.has_mipi) return; - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { dev_priv->mipi_mmio_base = VLV_MIPI_BASE; } else { DRM_ERROR("Unsupported Mipi device to reg base"); @@ -1152,11 +1143,10 @@ void intel_dsi_init(struct drm_device *dev) connector = &intel_connector->base; - drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); + drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, + NULL); - /* XXX: very likely not all of these are needed */ intel_encoder->compute_config = intel_dsi_compute_config; - intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable; intel_encoder->pre_enable = intel_dsi_pre_enable; intel_encoder->enable = intel_dsi_enable_nop; intel_encoder->disable = intel_dsi_pre_disable; diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index cb3cf3986212..fbd2b51810ca 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -561,7 +561,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_enable_dsi_pll(encoder); else if (IS_BROXTON(dev)) bxt_enable_dsi_pll(encoder); @@ -571,7 +571,7 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_disable_dsi_pll(encoder); else if (IS_BROXTON(dev)) bxt_disable_dsi_pll(encoder); @@ -599,6 +599,6 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) if (IS_BROXTON(dev)) bxt_dsi_reset_clocks(encoder, port); - else if (IS_VALLEYVIEW(dev)) + else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_dsi_reset_clocks(encoder, port); } diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 7161deb2aed8..286baec979c8 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -429,7 +429,7 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder = &intel_dvo->base; drm_encoder_init(dev, &intel_encoder->base, - &intel_dvo_enc_funcs, encoder_type); + &intel_dvo_enc_funcs, encoder_type, NULL); intel_encoder->disable = intel_disable_dvo; intel_encoder->enable = intel_enable_dvo; diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 11fc5281e8ef..a1988a486b92 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -43,7 +43,7 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv) { - return dev_priv->fbc.enable_fbc != NULL; + return dev_priv->fbc.activate != NULL; } static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) @@ -51,6 +51,11 @@ static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8; } +static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) +{ + return INTEL_INFO(dev_priv)->gen < 4; +} + /* * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's @@ -64,11 +69,51 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) return crtc->base.y - crtc->adjusted_y; } -static void i8xx_fbc_disable(struct drm_i915_private *dev_priv) +/* + * For SKL+, the plane source size used by the hardware is based on the value we + * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value + * we wrote to PIPESRC. + */ +static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc, + int *width, int *height) +{ + struct intel_plane_state *plane_state = + to_intel_plane_state(crtc->base.primary->state); + int w, h; + + if (intel_rotation_90_or_270(plane_state->base.rotation)) { + w = drm_rect_height(&plane_state->src) >> 16; + h = drm_rect_width(&plane_state->src) >> 16; + } else { + w = drm_rect_width(&plane_state->src) >> 16; + h = drm_rect_height(&plane_state->src) >> 16; + } + + if (width) + *width = w; + if (height) + *height = h; +} + +static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc, + struct drm_framebuffer *fb) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + int lines; + + intel_fbc_get_plane_source_size(crtc, NULL, &lines); + if (INTEL_INFO(dev_priv)->gen >= 7) + lines = min(lines, 2048); + + /* Hardware needs the full buffer stride, not just the active area. */ + return lines * fb->pitches[0]; +} + +static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) { u32 fbc_ctl; - dev_priv->fbc.enabled = false; + dev_priv->fbc.active = false; /* Disable compression */ fbc_ctl = I915_READ(FBC_CONTROL); @@ -83,11 +128,9 @@ static void i8xx_fbc_disable(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("FBC idle timed out\n"); return; } - - DRM_DEBUG_KMS("disabled FBC\n"); } -static void i8xx_fbc_enable(struct intel_crtc *crtc) +static void i8xx_fbc_activate(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_framebuffer *fb = crtc->base.primary->fb; @@ -96,10 +139,10 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc) int i; u32 fbc_ctl; - dev_priv->fbc.enabled = true; + dev_priv->fbc.active = true; /* Note: fbc.threshold == 1 for i8xx */ - cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE; + cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE; if (fb->pitches[0] < cfb_pitch) cfb_pitch = fb->pitches[0]; @@ -132,24 +175,21 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc) fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= obj->fence_reg; I915_WRITE(FBC_CONTROL, fbc_ctl); - - DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n", - cfb_pitch, crtc->base.y, plane_name(crtc->plane)); } -static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv) +static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) { return I915_READ(FBC_CONTROL) & FBC_CTL_EN; } -static void g4x_fbc_enable(struct intel_crtc *crtc) +static void g4x_fbc_activate(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_framebuffer *fb = crtc->base.primary->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 dpfc_ctl; - dev_priv->fbc.enabled = true; + dev_priv->fbc.active = true; dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN; if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) @@ -162,27 +202,23 @@ static void g4x_fbc_enable(struct intel_crtc *crtc) /* enable it... */ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); - - DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); } -static void g4x_fbc_disable(struct drm_i915_private *dev_priv) +static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) { u32 dpfc_ctl; - dev_priv->fbc.enabled = false; + dev_priv->fbc.active = false; /* Disable compression */ dpfc_ctl = I915_READ(DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; I915_WRITE(DPFC_CONTROL, dpfc_ctl); - - DRM_DEBUG_KMS("disabled FBC\n"); } } -static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv) +static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) { return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; } @@ -194,7 +230,7 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv) POSTING_READ(MSG_FBC_REND_STATE); } -static void ilk_fbc_enable(struct intel_crtc *crtc) +static void ilk_fbc_activate(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_framebuffer *fb = crtc->base.primary->fb; @@ -203,7 +239,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc) int threshold = dev_priv->fbc.threshold; unsigned int y_offset; - dev_priv->fbc.enabled = true; + dev_priv->fbc.active = true; dpfc_ctl = DPFC_CTL_PLANE(crtc->plane); if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) @@ -238,32 +274,28 @@ static void ilk_fbc_enable(struct intel_crtc *crtc) } intel_fbc_recompress(dev_priv); - - DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); } -static void ilk_fbc_disable(struct drm_i915_private *dev_priv) +static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) { u32 dpfc_ctl; - dev_priv->fbc.enabled = false; + dev_priv->fbc.active = false; /* Disable compression */ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); - - DRM_DEBUG_KMS("disabled FBC\n"); } } -static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv) +static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) { return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; } -static void gen7_fbc_enable(struct intel_crtc *crtc) +static void gen7_fbc_activate(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_framebuffer *fb = crtc->base.primary->fb; @@ -271,7 +303,7 @@ static void gen7_fbc_enable(struct intel_crtc *crtc) u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; - dev_priv->fbc.enabled = true; + dev_priv->fbc.active = true; dpfc_ctl = 0; if (IS_IVYBRIDGE(dev_priv)) @@ -317,103 +349,41 @@ static void gen7_fbc_enable(struct intel_crtc *crtc) I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc)); intel_fbc_recompress(dev_priv); - - DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); } /** - * intel_fbc_enabled - Is FBC enabled? + * intel_fbc_is_active - Is FBC active? * @dev_priv: i915 device instance * * This function is used to verify the current state of FBC. * FIXME: This should be tracked in the plane config eventually * instead of queried at runtime for most callers. */ -bool intel_fbc_enabled(struct drm_i915_private *dev_priv) +bool intel_fbc_is_active(struct drm_i915_private *dev_priv) { - return dev_priv->fbc.enabled; + return dev_priv->fbc.active; } -static void intel_fbc_enable(struct intel_crtc *crtc, - const struct drm_framebuffer *fb) +static void intel_fbc_activate(const struct drm_framebuffer *fb) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = fb->dev->dev_private; + struct intel_crtc *crtc = dev_priv->fbc.crtc; - dev_priv->fbc.enable_fbc(crtc); + dev_priv->fbc.activate(crtc); - dev_priv->fbc.crtc = crtc; dev_priv->fbc.fb_id = fb->base.id; dev_priv->fbc.y = crtc->base.y; } static void intel_fbc_work_fn(struct work_struct *__work) { - struct intel_fbc_work *work = - container_of(to_delayed_work(__work), - struct intel_fbc_work, work); - struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private; - struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb; - - mutex_lock(&dev_priv->fbc.lock); - if (work == dev_priv->fbc.fbc_work) { - /* Double check that we haven't switched fb without cancelling - * the prior work. - */ - if (crtc_fb == work->fb) - intel_fbc_enable(work->crtc, work->fb); - - dev_priv->fbc.fbc_work = NULL; - } - mutex_unlock(&dev_priv->fbc.lock); - - kfree(work); -} - -static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) -{ - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); - - if (dev_priv->fbc.fbc_work == NULL) - return; - - /* Synchronisation is provided by struct_mutex and checking of - * dev_priv->fbc.fbc_work, so we can perform the cancellation - * entirely asynchronously. - */ - if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work)) - /* tasklet was killed before being run, clean up */ - kfree(dev_priv->fbc.fbc_work); - - /* Mark the work as no longer wanted so that if it does - * wake-up (because the work was already running and waiting - * for our mutex), it will discover that is no longer - * necessary to run. - */ - dev_priv->fbc.fbc_work = NULL; -} - -static void intel_fbc_schedule_enable(struct intel_crtc *crtc) -{ - struct intel_fbc_work *work; - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); - - intel_fbc_cancel_work(dev_priv); - - work = kzalloc(sizeof(*work), GFP_KERNEL); - if (work == NULL) { - DRM_ERROR("Failed to allocate FBC work structure\n"); - intel_fbc_enable(crtc, crtc->base.primary->fb); - return; - } - - work->crtc = crtc; - work->fb = crtc->base.primary->fb; - INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); - - dev_priv->fbc.fbc_work = work; + struct drm_i915_private *dev_priv = + container_of(__work, struct drm_i915_private, fbc.work.work); + struct intel_fbc_work *work = &dev_priv->fbc.work; + struct intel_crtc *crtc = dev_priv->fbc.crtc; + int delay_ms = 50; +retry: /* Delay the actual enabling to let pageflipping cease and the * display to settle before starting the compression. Note that * this delay also serves a second purpose: it allows for a @@ -427,43 +397,71 @@ static void intel_fbc_schedule_enable(struct intel_crtc *crtc) * * WaFbcWaitForVBlankBeforeEnable:ilk,snb */ - schedule_delayed_work(&work->work, msecs_to_jiffies(50)); + wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms); + + mutex_lock(&dev_priv->fbc.lock); + + /* Were we cancelled? */ + if (!work->scheduled) + goto out; + + /* Were we delayed again while this function was sleeping? */ + if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms), + jiffies)) { + mutex_unlock(&dev_priv->fbc.lock); + goto retry; + } + + if (crtc->base.primary->fb == work->fb) + intel_fbc_activate(work->fb); + + work->scheduled = false; + +out: + mutex_unlock(&dev_priv->fbc.lock); } -static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) +{ + WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); + dev_priv->fbc.work.scheduled = false; +} + +static void intel_fbc_schedule_activation(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc_work *work = &dev_priv->fbc.work; + + WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); + + /* It is useless to call intel_fbc_cancel_work() in this function since + * we're not releasing fbc.lock, so it won't have an opportunity to grab + * it to discover that it was cancelled. So we just update the expected + * jiffy count. */ + work->fb = crtc->base.primary->fb; + work->scheduled = true; + work->enable_jiffies = jiffies; + + schedule_work(&work->work); +} + +static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv) { WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); intel_fbc_cancel_work(dev_priv); - if (dev_priv->fbc.enabled) - dev_priv->fbc.disable_fbc(dev_priv); - dev_priv->fbc.crtc = NULL; -} - -/** - * intel_fbc_disable - disable FBC - * @dev_priv: i915 device instance - * - * This function disables FBC. - */ -void intel_fbc_disable(struct drm_i915_private *dev_priv) -{ - if (!fbc_supported(dev_priv)) - return; - - mutex_lock(&dev_priv->fbc.lock); - __intel_fbc_disable(dev_priv); - mutex_unlock(&dev_priv->fbc.lock); + if (dev_priv->fbc.active) + dev_priv->fbc.deactivate(dev_priv); } /* - * intel_fbc_disable_crtc - disable FBC if it's associated with crtc + * intel_fbc_deactivate - deactivate FBC if it's associated with crtc * @crtc: the CRTC * - * This function disables FBC if it's associated with the provided CRTC. + * This function deactivates FBC if it's associated with the provided CRTC. */ -void intel_fbc_disable_crtc(struct intel_crtc *crtc) +void intel_fbc_deactivate(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; @@ -472,7 +470,7 @@ void intel_fbc_disable_crtc(struct intel_crtc *crtc) mutex_lock(&dev_priv->fbc.lock); if (dev_priv->fbc.crtc == crtc) - __intel_fbc_disable(dev_priv); + __intel_fbc_deactivate(dev_priv); mutex_unlock(&dev_priv->fbc.lock); } @@ -486,13 +484,21 @@ static void set_no_fbc_reason(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("Disabling FBC: %s\n", reason); } -static bool crtc_is_valid(struct intel_crtc *crtc) +static bool crtc_can_fbc(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) return false; + if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) + return false; + + return true; +} + +static bool crtc_is_valid(struct intel_crtc *crtc) +{ if (!intel_crtc_active(&crtc->base)) return false; @@ -502,24 +508,6 @@ static bool crtc_is_valid(struct intel_crtc *crtc) return true; } -static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv) -{ - struct drm_crtc *crtc = NULL, *tmp_crtc; - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - - if (crtc_is_valid(to_intel_crtc(tmp_crtc))) - crtc = tmp_crtc; - } - - if (!crtc) - return NULL; - - return crtc; -} - static bool multiple_pipes_ok(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -590,11 +578,17 @@ again: } } -static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size, - int fb_cpp) +static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) { + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_framebuffer *fb = crtc->base.primary->state->fb; struct drm_mm_node *uninitialized_var(compressed_llb); - int ret; + int size, fb_cpp, ret; + + WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb)); + + size = intel_fbc_calculate_cfb_size(crtc, fb); + fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0); ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb, size, fb_cpp); @@ -629,8 +623,6 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size, dev_priv->mm.stolen_base + compressed_llb->start); } - dev_priv->fbc.uncompressed_size = size; - DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", dev_priv->fbc.compressed_fb.size, dev_priv->fbc.threshold); @@ -647,18 +639,15 @@ err_llb: static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) { - if (dev_priv->fbc.uncompressed_size == 0) - return; - - i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb); + if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb)) + i915_gem_stolen_remove_node(dev_priv, + &dev_priv->fbc.compressed_fb); if (dev_priv->fbc.compressed_llb) { i915_gem_stolen_remove_node(dev_priv, dev_priv->fbc.compressed_llb); kfree(dev_priv->fbc.compressed_llb); } - - dev_priv->fbc.uncompressed_size = 0; } void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) @@ -671,64 +660,6 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->fbc.lock); } -/* - * For SKL+, the plane source size used by the hardware is based on the value we - * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value - * we wrote to PIPESRC. - */ -static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc, - int *width, int *height) -{ - struct intel_plane_state *plane_state = - to_intel_plane_state(crtc->base.primary->state); - int w, h; - - if (intel_rotation_90_or_270(plane_state->base.rotation)) { - w = drm_rect_height(&plane_state->src) >> 16; - h = drm_rect_width(&plane_state->src) >> 16; - } else { - w = drm_rect_width(&plane_state->src) >> 16; - h = drm_rect_height(&plane_state->src) >> 16; - } - - if (width) - *width = w; - if (height) - *height = h; -} - -static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - int lines; - - intel_fbc_get_plane_source_size(crtc, NULL, &lines); - if (INTEL_INFO(dev_priv)->gen >= 7) - lines = min(lines, 2048); - - /* Hardware needs the full buffer stride, not just the active area. */ - return lines * fb->pitches[0]; -} - -static int intel_fbc_setup_cfb(struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - int size, cpp; - - size = intel_fbc_calculate_cfb_size(crtc); - cpp = drm_format_plane_cpp(fb->pixel_format, 0); - - if (size <= dev_priv->fbc.uncompressed_size) - return 0; - - /* Release any current block */ - __intel_fbc_cleanup_cfb(dev_priv); - - return intel_fbc_alloc_cfb(dev_priv, size, cpp); -} - static bool stride_is_valid(struct drm_i915_private *dev_priv, unsigned int stride) { @@ -803,47 +734,34 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) } /** - * __intel_fbc_update - enable/disable FBC as needed, unlocked - * @dev_priv: i915 device instance + * __intel_fbc_update - activate/deactivate FBC as needed, unlocked + * @crtc: the CRTC that triggered the update * - * This function completely reevaluates the status of FBC, then enables, - * disables or maintains it on the same state. + * This function completely reevaluates the status of FBC, then activates, + * deactivates or maintains it on the same state. */ -static void __intel_fbc_update(struct drm_i915_private *dev_priv) +static void __intel_fbc_update(struct intel_crtc *crtc) { - struct drm_crtc *drm_crtc = NULL; - struct intel_crtc *crtc; + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; const struct drm_display_mode *adjusted_mode; WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); - if (intel_vgpu_active(dev_priv->dev)) - i915.enable_fbc = 0; - - if (i915.enable_fbc < 0) { - set_no_fbc_reason(dev_priv, "disabled per chip default"); - goto out_disable; - } - - if (!i915.enable_fbc) { - set_no_fbc_reason(dev_priv, "disabled per module param"); - goto out_disable; - } - - drm_crtc = intel_fbc_find_crtc(dev_priv); - if (!drm_crtc) { - set_no_fbc_reason(dev_priv, "no output"); - goto out_disable; - } - if (!multiple_pipes_ok(dev_priv)) { set_no_fbc_reason(dev_priv, "more than one pipe active"); goto out_disable; } - crtc = to_intel_crtc(drm_crtc); + if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc) + return; + + if (!crtc_is_valid(crtc)) { + set_no_fbc_reason(dev_priv, "no output"); + goto out_disable; + } + fb = crtc->base.primary->fb; obj = intel_fb_obj(fb); adjusted_mode = &crtc->config->base.adjusted_mode; @@ -859,12 +777,6 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv) goto out_disable; } - if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) && - crtc->plane != PLANE_A) { - set_no_fbc_reason(dev_priv, "FBC unsupported on plane"); - goto out_disable; - } - /* The use of a CPU fence is mandatory in order to detect writes * by the CPU to the scanout and trigger updates to the FBC. */ @@ -897,8 +809,19 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv) goto out_disable; } - if (intel_fbc_setup_cfb(crtc)) { - set_no_fbc_reason(dev_priv, "not enough stolen memory"); + /* It is possible for the required CFB size change without a + * crtc->disable + crtc->enable since it is possible to change the + * stride without triggering a full modeset. Since we try to + * over-allocate the CFB, there's a chance we may keep FBC enabled even + * if this happens, but if we exceed the current CFB size we'll have to + * disable FBC. Notice that it would be possible to disable FBC, wait + * for a frame, free the stolen node, then try to reenable FBC in case + * we didn't get any invalidate/deactivate calls, but this would require + * a lot of tracking just for a specific case. If we conclude it's an + * important case, we can implement it later. */ + if (intel_fbc_calculate_cfb_size(crtc, fb) > + dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) { + set_no_fbc_reason(dev_priv, "CFB requirements changed"); goto out_disable; } @@ -909,10 +832,11 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv) */ if (dev_priv->fbc.crtc == crtc && dev_priv->fbc.fb_id == fb->base.id && - dev_priv->fbc.y == crtc->base.y) + dev_priv->fbc.y == crtc->base.y && + dev_priv->fbc.active) return; - if (intel_fbc_enabled(dev_priv)) { + if (intel_fbc_is_active(dev_priv)) { /* We update FBC along two paths, after changing fb/crtc * configuration (modeswitching) and after page-flipping * finishes. For the latter, we know that not only did @@ -936,36 +860,37 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv) * disabling paths we do need to wait for a vblank at * some point. And we wait before enabling FBC anyway. */ - DRM_DEBUG_KMS("disabling active FBC for update\n"); - __intel_fbc_disable(dev_priv); + DRM_DEBUG_KMS("deactivating FBC for update\n"); + __intel_fbc_deactivate(dev_priv); } - intel_fbc_schedule_enable(crtc); + intel_fbc_schedule_activation(crtc); dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)"; return; out_disable: /* Multiple disables should be harmless */ - if (intel_fbc_enabled(dev_priv)) { - DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); - __intel_fbc_disable(dev_priv); + if (intel_fbc_is_active(dev_priv)) { + DRM_DEBUG_KMS("unsupported config, deactivating FBC\n"); + __intel_fbc_deactivate(dev_priv); } - __intel_fbc_cleanup_cfb(dev_priv); } /* - * intel_fbc_update - enable/disable FBC as needed - * @dev_priv: i915 device instance + * intel_fbc_update - activate/deactivate FBC as needed + * @crtc: the CRTC that triggered the update * - * This function reevaluates the overall state and enables or disables FBC. + * This function reevaluates the overall state and activates or deactivates FBC. */ -void intel_fbc_update(struct drm_i915_private *dev_priv) +void intel_fbc_update(struct intel_crtc *crtc) { + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + if (!fbc_supported(dev_priv)) return; mutex_lock(&dev_priv->fbc.lock); - __intel_fbc_update(dev_priv); + __intel_fbc_update(crtc); mutex_unlock(&dev_priv->fbc.lock); } @@ -985,16 +910,13 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, if (dev_priv->fbc.enabled) fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe); - else if (dev_priv->fbc.fbc_work) - fbc_bits = INTEL_FRONTBUFFER_PRIMARY( - dev_priv->fbc.fbc_work->crtc->pipe); else fbc_bits = dev_priv->fbc.possible_framebuffer_bits; dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits); if (dev_priv->fbc.busy_bits) - __intel_fbc_disable(dev_priv); + __intel_fbc_deactivate(dev_priv); mutex_unlock(&dev_priv->fbc.lock); } @@ -1012,14 +934,139 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, dev_priv->fbc.busy_bits &= ~frontbuffer_bits; - if (!dev_priv->fbc.busy_bits) { - __intel_fbc_disable(dev_priv); - __intel_fbc_update(dev_priv); + if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) { + if (origin != ORIGIN_FLIP && dev_priv->fbc.active) { + intel_fbc_recompress(dev_priv); + } else { + __intel_fbc_deactivate(dev_priv); + __intel_fbc_update(dev_priv->fbc.crtc); + } } mutex_unlock(&dev_priv->fbc.lock); } +/** + * intel_fbc_enable: tries to enable FBC on the CRTC + * @crtc: the CRTC + * + * This function checks if it's possible to enable FBC on the following CRTC, + * then enables it. Notice that it doesn't activate FBC. + */ +void intel_fbc_enable(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + + if (!fbc_supported(dev_priv)) + return; + + mutex_lock(&dev_priv->fbc.lock); + + if (dev_priv->fbc.enabled) { + WARN_ON(dev_priv->fbc.crtc == crtc); + goto out; + } + + WARN_ON(dev_priv->fbc.active); + WARN_ON(dev_priv->fbc.crtc != NULL); + + if (intel_vgpu_active(dev_priv->dev)) { + set_no_fbc_reason(dev_priv, "VGPU is active"); + goto out; + } + + if (i915.enable_fbc < 0) { + set_no_fbc_reason(dev_priv, "disabled per chip default"); + goto out; + } + + if (!i915.enable_fbc) { + set_no_fbc_reason(dev_priv, "disabled per module param"); + goto out; + } + + if (!crtc_can_fbc(crtc)) { + set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC"); + goto out; + } + + if (intel_fbc_alloc_cfb(crtc)) { + set_no_fbc_reason(dev_priv, "not enough stolen memory"); + goto out; + } + + DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); + dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n"; + + dev_priv->fbc.enabled = true; + dev_priv->fbc.crtc = crtc; +out: + mutex_unlock(&dev_priv->fbc.lock); +} + +/** + * __intel_fbc_disable - disable FBC + * @dev_priv: i915 device instance + * + * This is the low level function that actually disables FBC. Callers should + * grab the FBC lock. + */ +static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc = dev_priv->fbc.crtc; + + WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); + WARN_ON(!dev_priv->fbc.enabled); + WARN_ON(dev_priv->fbc.active); + assert_pipe_disabled(dev_priv, crtc->pipe); + + DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); + + __intel_fbc_cleanup_cfb(dev_priv); + + dev_priv->fbc.enabled = false; + dev_priv->fbc.crtc = NULL; +} + +/** + * intel_fbc_disable_crtc - disable FBC if it's associated with crtc + * @crtc: the CRTC + * + * This function disables FBC if it's associated with the provided CRTC. + */ +void intel_fbc_disable_crtc(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + + if (!fbc_supported(dev_priv)) + return; + + mutex_lock(&dev_priv->fbc.lock); + if (dev_priv->fbc.crtc == crtc) { + WARN_ON(!dev_priv->fbc.enabled); + WARN_ON(dev_priv->fbc.active); + __intel_fbc_disable(dev_priv); + } + mutex_unlock(&dev_priv->fbc.lock); +} + +/** + * intel_fbc_disable - globally disable FBC + * @dev_priv: i915 device instance + * + * This function disables FBC regardless of which CRTC is associated with it. + */ +void intel_fbc_disable(struct drm_i915_private *dev_priv) +{ + if (!fbc_supported(dev_priv)) + return; + + mutex_lock(&dev_priv->fbc.lock); + if (dev_priv->fbc.enabled) + __intel_fbc_disable(dev_priv); + mutex_unlock(&dev_priv->fbc.lock); +} + /** * intel_fbc_init - Initialize FBC * @dev_priv: the i915 device @@ -1030,8 +1077,11 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) { enum pipe pipe; + INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn); mutex_init(&dev_priv->fbc.lock); dev_priv->fbc.enabled = false; + dev_priv->fbc.active = false; + dev_priv->fbc.work.scheduled = false; if (!HAS_FBC(dev_priv)) { dev_priv->fbc.no_fbc_reason = "unsupported by this chipset"; @@ -1047,29 +1097,29 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) } if (INTEL_INFO(dev_priv)->gen >= 7) { - dev_priv->fbc.fbc_enabled = ilk_fbc_enabled; - dev_priv->fbc.enable_fbc = gen7_fbc_enable; - dev_priv->fbc.disable_fbc = ilk_fbc_disable; + dev_priv->fbc.is_active = ilk_fbc_is_active; + dev_priv->fbc.activate = gen7_fbc_activate; + dev_priv->fbc.deactivate = ilk_fbc_deactivate; } else if (INTEL_INFO(dev_priv)->gen >= 5) { - dev_priv->fbc.fbc_enabled = ilk_fbc_enabled; - dev_priv->fbc.enable_fbc = ilk_fbc_enable; - dev_priv->fbc.disable_fbc = ilk_fbc_disable; + dev_priv->fbc.is_active = ilk_fbc_is_active; + dev_priv->fbc.activate = ilk_fbc_activate; + dev_priv->fbc.deactivate = ilk_fbc_deactivate; } else if (IS_GM45(dev_priv)) { - dev_priv->fbc.fbc_enabled = g4x_fbc_enabled; - dev_priv->fbc.enable_fbc = g4x_fbc_enable; - dev_priv->fbc.disable_fbc = g4x_fbc_disable; + dev_priv->fbc.is_active = g4x_fbc_is_active; + dev_priv->fbc.activate = g4x_fbc_activate; + dev_priv->fbc.deactivate = g4x_fbc_deactivate; } else { - dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled; - dev_priv->fbc.enable_fbc = i8xx_fbc_enable; - dev_priv->fbc.disable_fbc = i8xx_fbc_disable; + dev_priv->fbc.is_active = i8xx_fbc_is_active; + dev_priv->fbc.activate = i8xx_fbc_activate; + dev_priv->fbc.deactivate = i8xx_fbc_deactivate; /* This value was pulled out of someone's hat */ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); } /* We still don't have any sort of hardware state readout for FBC, so - * disable it in case the BIOS enabled it to make sure software matches - * the hardware state. */ - if (dev_priv->fbc.fbc_enabled(dev_priv)) - dev_priv->fbc.disable_fbc(dev_priv); + * deactivate it in case the BIOS activated it to make sure software + * matches the hardware state. */ + if (dev_priv->fbc.is_active(dev_priv)) + dev_priv->fbc.deactivate(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 7ccde58f8c98..bea75cafc623 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -163,13 +163,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper, goto out; } - /* Flush everything out, we'll be doing GTT only from now on */ - ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL); - if (ret) { - DRM_ERROR("failed to pin obj: %d\n", ret); - goto out; - } - mutex_unlock(&dev->struct_mutex); ifbdev->fb = to_intel_framebuffer(fb); @@ -225,6 +218,14 @@ static int intelfb_create(struct drm_fb_helper *helper, mutex_lock(&dev->struct_mutex); + /* Pin the GGTT vma for our access via info->screen_base. + * This also validates that any existing fb inherited from the + * BIOS is suitable for own access. + */ + ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL); + if (ret) + goto out_unlock; + info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { DRM_ERROR("Failed to allocate fb_info\n"); @@ -287,6 +288,7 @@ out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_unpin: i915_gem_object_ggtt_unpin(obj); +out_unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -524,6 +526,10 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { static void intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev) { + /* We rely on the object-free to release the VMA pinning for + * the info->screen_base mmaping. Leaking the VMA is simpler than + * trying to rectify all the possible error paths leading here. + */ drm_fb_helper_unregister_fbi(&ifbdev->helper); drm_fb_helper_release_fbi(&ifbdev->helper); diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 7ae182d0594b..bda526660e20 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -128,9 +128,9 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, DE_PIPEB_FIFO_UNDERRUN; if (enable) - ironlake_enable_display_irq(dev_priv, bit); + ilk_enable_display_irq(dev_priv, bit); else - ironlake_disable_display_irq(dev_priv, bit); + ilk_disable_display_irq(dev_priv, bit); } static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc) @@ -161,9 +161,9 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, if (!ivb_can_enable_err_int(dev)) return; - ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); + ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB); } else { - ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); + ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB); if (old && I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { @@ -178,14 +178,10 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; - assert_spin_locked(&dev_priv->irq_lock); - if (enable) - dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; + bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); else - dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; - I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); - POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); + bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); } static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 5ba586683c87..822952235dcf 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -42,8 +42,6 @@ struct i915_guc_client { uint32_t wq_offset; uint32_t wq_size; - - spinlock_t wq_lock; /* Protects all data below */ uint32_t wq_tail; /* GuC submission statistics & status */ @@ -95,8 +93,6 @@ struct intel_guc { struct i915_guc_client *execbuf_client; - spinlock_t host2guc_lock; /* Protects all data below */ - DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS); uint32_t db_cacheline; /* Cyclic counter mod pagesize */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f16cd2a843b2..4a77639a489d 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -78,7 +78,7 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) case HDMI_INFOFRAME_TYPE_VENDOR: return VIDEO_DIP_SELECT_VENDOR; default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); + MISSING_CASE(type); return 0; } } @@ -93,7 +93,7 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type) case HDMI_INFOFRAME_TYPE_VENDOR: return VIDEO_DIP_ENABLE_VENDOR; default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); + MISSING_CASE(type); return 0; } } @@ -108,7 +108,7 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) case HDMI_INFOFRAME_TYPE_VENDOR: return VIDEO_DIP_ENABLE_VS_HSW; default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); + MISSING_CASE(type); return 0; } } @@ -127,7 +127,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, case HDMI_INFOFRAME_TYPE_VENDOR: return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); + MISSING_CASE(type); return INVALID_MMIO_REG; } } @@ -169,10 +169,10 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, POSTING_READ(VIDEO_DIP_CTL); } -static bool g4x_infoframe_enabled(struct drm_encoder *encoder) +static bool g4x_infoframe_enabled(struct drm_encoder *encoder, + const struct intel_crtc_state *pipe_config) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); u32 val = I915_READ(VIDEO_DIP_CTL); @@ -225,13 +225,13 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, POSTING_READ(reg); } -static bool ibx_infoframe_enabled(struct drm_encoder *encoder) +static bool ibx_infoframe_enabled(struct drm_encoder *encoder, + const struct intel_crtc_state *pipe_config) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; + i915_reg_t reg = TVIDEO_DIP_CTL(pipe); u32 val = I915_READ(reg); if ((val & VIDEO_DIP_ENABLE) == 0) @@ -287,12 +287,12 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, POSTING_READ(reg); } -static bool cpt_infoframe_enabled(struct drm_encoder *encoder) +static bool cpt_infoframe_enabled(struct drm_encoder *encoder, + const struct intel_crtc_state *pipe_config) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - u32 val = I915_READ(TVIDEO_DIP_CTL(intel_crtc->pipe)); + struct drm_i915_private *dev_priv = to_i915(encoder->dev); + enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; + u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return false; @@ -341,13 +341,13 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, POSTING_READ(reg); } -static bool vlv_infoframe_enabled(struct drm_encoder *encoder) +static bool vlv_infoframe_enabled(struct drm_encoder *encoder, + const struct intel_crtc_state *pipe_config) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(intel_crtc->pipe)); + enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; + u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return false; @@ -375,8 +375,6 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, u32 val = I915_READ(ctl_reg); data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0); - if (i915_mmio_reg_valid(data_reg)) - return; val &= ~hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); @@ -398,12 +396,11 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, POSTING_READ(ctl_reg); } -static bool hsw_infoframe_enabled(struct drm_encoder *encoder) +static bool hsw_infoframe_enabled(struct drm_encoder *encoder, + const struct intel_crtc_state *pipe_config) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder)); + struct drm_i915_private *dev_priv = to_i915(encoder->dev); + u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | @@ -639,7 +636,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) if (HAS_DDI(dev_priv)) reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); - else if (IS_VALLEYVIEW(dev_priv)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); else if (HAS_PCH_SPLIT(dev_priv->dev)) reg = TVIDEO_DIP_GCP(crtc->pipe); @@ -927,7 +924,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (tmp & HDMI_MODE_SELECT_HDMI) pipe_config->has_hdmi_sink = true; - if (intel_hdmi->infoframe_enabled(&encoder->base)) + if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) pipe_config->has_infoframe = true; if (tmp & SDVO_AUDIO_ENABLE) @@ -2102,7 +2099,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, BUG(); } - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { intel_hdmi->write_infoframe = vlv_write_infoframe; intel_hdmi->set_infoframes = vlv_set_infoframes; intel_hdmi->infoframe_enabled = vlv_infoframe_enabled; @@ -2167,7 +2164,7 @@ void intel_hdmi_init(struct drm_device *dev, intel_encoder = &intel_dig_port->base; drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); intel_encoder->compute_config = intel_hdmi_compute_config; if (HAS_PCH_SPLIT(dev)) { diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index b17785719598..bee673005d48 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -407,7 +407,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, * hotplug bits itself. So only WARN about unexpected * interrupts on saner platforms. */ - WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), + WARN_ONCE(!HAS_GMCH_DISPLAY(dev), "Received HPD interrupt on pin %d although disabled\n", i); continue; } @@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) list_for_each_entry(connector, &mode_config->connector_list, head) { struct intel_connector *intel_connector = to_intel_connector(connector); connector->polled = intel_connector->polled; - if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) - connector->polled = DRM_CONNECTOR_POLL_HPD; + + /* MST has a dynamic intel_connector->encoder and it's reprobing + * is all handled by the MST helpers. */ if (intel_connector->mst_port) + continue; + + if (!connector->polled && I915_HAS_HOTPLUG(dev) && + intel_connector->encoder->hpd_pin > HPD_NONE) connector->polled = DRM_CONNECTOR_POLL_HPD; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 1110c83953cf..25254b5c1ac5 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -472,9 +472,7 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) } static int -gmbus_xfer(struct i2c_adapter *adapter, - struct i2c_msg *msgs, - int num) +do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, @@ -483,14 +481,6 @@ gmbus_xfer(struct i2c_adapter *adapter, int i = 0, inc, try = 0; int ret = 0; - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - mutex_lock(&dev_priv->gmbus_mutex); - - if (bus->force_bit) { - ret = i2c_bit_algo.master_xfer(adapter, msgs, num); - goto out; - } - retry: I915_WRITE(GMBUS0, bus->reg0); @@ -505,17 +495,13 @@ retry: ret = gmbus_xfer_write(dev_priv, &msgs[i]); } + if (!ret) + ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE, + GMBUS_HW_WAIT_EN); if (ret == -ETIMEDOUT) goto timeout; - if (ret == -ENXIO) + else if (ret) goto clear_err; - - ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE, - GMBUS_HW_WAIT_EN); - if (ret == -ENXIO) - goto clear_err; - if (ret) - goto timeout; } /* Generate a STOP condition on the bus. Note that gmbus can't generata @@ -589,13 +575,34 @@ timeout: bus->adapter.name, bus->reg0 & 0xff); I915_WRITE(GMBUS0, 0); - /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ + /* + * Hardware may not support GMBUS over these pins? Try GPIO bitbanging + * instead. Use EAGAIN to have i2c core retry. + */ bus->force_bit = 1; - ret = i2c_bit_algo.master_xfer(adapter, msgs, num); + ret = -EAGAIN; out: - mutex_unlock(&dev_priv->gmbus_mutex); + return ret; +} +static int +gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) +{ + struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + int ret; + + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + mutex_lock(&dev_priv->gmbus_mutex); + + if (bus->force_bit) + ret = i2c_bit_algo.master_xfer(adapter, msgs, num); + else + ret = do_gmbus_xfer(adapter, msgs, num); + + mutex_unlock(&dev_priv->gmbus_mutex); intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); return ret; @@ -629,7 +636,7 @@ int intel_setup_gmbus(struct drm_device *dev) if (HAS_PCH_NOP(dev)) return 0; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; else if (!HAS_GMCH_DISPLAY(dev_priv)) dev_priv->gpio_mmio_base = diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 4ebafab53f30..3aa614731d7e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -372,7 +372,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); reg_state[CTX_RING_TAIL+1] = rq->tail; @@ -1425,7 +1425,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring) return ret; } - page = i915_gem_object_get_page(wa_ctx->obj, 0); + page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0); batch = kmap_atomic(page); offset = 0; @@ -1894,8 +1894,10 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) dev_priv = ring->dev->dev_private; - intel_logical_ring_stop(ring); - WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); + if (ring->buffer) { + intel_logical_ring_stop(ring); + WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); + } if (ring->cleanup) ring->cleanup(ring); @@ -1909,6 +1911,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) } lrc_destroy_wa_ctx_obj(ring); + ring->dev = NULL; } static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) @@ -1931,11 +1934,11 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin ret = i915_cmd_parser_init_ring(ring); if (ret) - return ret; + goto error; ret = intel_lr_context_deferred_alloc(ring->default_context, ring); if (ret) - return ret; + goto error; /* As this is the default context, always pin it */ ret = intel_lr_context_do_pin( @@ -1946,9 +1949,13 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin DRM_ERROR( "Failed to pin and map ringbuffer %s: %d\n", ring->name, ret); - return ret; + goto error; } + return 0; + +error: + intel_logical_ring_cleanup(ring); return ret; } @@ -2257,7 +2264,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM @@ -2343,9 +2350,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o } kunmap_atomic(reg_state); - - ctx_obj->dirty = 1; - set_page_dirty(page); i915_gem_object_unpin_pages(ctx_obj); return 0; @@ -2529,7 +2533,7 @@ void intel_lr_context_reset(struct drm_device *dev, WARN(1, "Failed get_pages for context obj\n"); continue; } - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); reg_state[CTX_RING_HEAD+1] = 0; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 61f1145f6579..0da0240caf81 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -1025,7 +1025,7 @@ void intel_lvds_init(struct drm_device *dev) DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); intel_encoder->enable = intel_enable_lvds; intel_encoder->pre_enable = intel_pre_enable_lvds; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index e362a30776fa..c15718b4862a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -26,6 +26,7 @@ */ #include +#include #include #include @@ -46,6 +47,7 @@ #define OPREGION_SWSCI_OFFSET 0x200 #define OPREGION_ASLE_OFFSET 0x300 #define OPREGION_VBT_OFFSET 0x400 +#define OPREGION_ASLE_EXT_OFFSET 0x1C00 #define OPREGION_SIGNATURE "IntelGraphicsMem" #define MBOX_ACPI (1<<0) @@ -120,7 +122,16 @@ struct opregion_asle { u64 fdss; u32 fdsp; u32 stat; - u8 rsvd[70]; + u64 rvda; /* Physical address of raw vbt data */ + u32 rvds; /* Size of raw vbt data */ + u8 rsvd[58]; +} __packed; + +/* OpRegion mailbox #5: ASLE ext */ +struct opregion_asle_ext { + u32 phed; /* Panel Header */ + u8 bddc[256]; /* Panel EDID */ + u8 rsvd[764]; } __packed; /* Driver readiness indicator */ @@ -411,7 +422,7 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_connector *intel_connector; + struct intel_connector *connector; struct opregion_asle *asle = dev_priv->opregion.asle; DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); @@ -435,8 +446,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) * only one). */ DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); - list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) - intel_panel_set_backlight_acpi(intel_connector, bclp, 255); + for_each_intel_connector(dev, connector) + intel_panel_set_backlight_acpi(connector, bclp, 255); asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID; drm_modeset_unlock(&dev->mode_config.connection_mutex); @@ -826,6 +837,10 @@ void intel_opregion_fini(struct drm_device *dev) /* just clear all opregion memory pointers now */ memunmap(opregion->header); + if (opregion->rvda) { + memunmap(opregion->rvda); + opregion->rvda = NULL; + } opregion->header = NULL; opregion->acpi = NULL; opregion->swsci = NULL; @@ -894,6 +909,25 @@ static void swsci_setup(struct drm_device *dev) static inline void swsci_setup(struct drm_device *dev) {} #endif /* CONFIG_ACPI */ +static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG_KMS("Falling back to manually reading VBT from " + "VBIOS ROM for %s\n", id->ident); + return 1; +} + +static const struct dmi_system_id intel_no_opregion_vbt[] = { + { + .callback = intel_no_opregion_vbt_callback, + .ident = "ThinkCentre A57", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), + }, + }, + { } +}; + int intel_opregion_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -907,6 +941,7 @@ int intel_opregion_setup(struct drm_device *dev) BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); + BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); @@ -931,8 +966,6 @@ int intel_opregion_setup(struct drm_device *dev) goto err_out; } opregion->header = base; - opregion->vbt = base + OPREGION_VBT_OFFSET; - opregion->lid_state = base + ACPI_CLID; mboxes = opregion->header->mboxes; @@ -946,6 +979,7 @@ int intel_opregion_setup(struct drm_device *dev) opregion->swsci = base + OPREGION_SWSCI_OFFSET; swsci_setup(dev); } + if (mboxes & MBOX_ASLE) { DRM_DEBUG_DRIVER("ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; @@ -953,6 +987,37 @@ int intel_opregion_setup(struct drm_device *dev) opregion->asle->ardy = ASLE_ARDY_NOT_READY; } + if (mboxes & MBOX_ASLE_EXT) + DRM_DEBUG_DRIVER("ASLE extension supported\n"); + + if (!dmi_check_system(intel_no_opregion_vbt)) { + const void *vbt = NULL; + u32 vbt_size = 0; + + if (opregion->header->opregion_ver >= 2 && opregion->asle && + opregion->asle->rvda && opregion->asle->rvds) { + opregion->rvda = memremap(opregion->asle->rvda, + opregion->asle->rvds, + MEMREMAP_WB); + vbt = opregion->rvda; + vbt_size = opregion->asle->rvds; + } + + if (intel_bios_is_valid_vbt(vbt, vbt_size)) { + DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n"); + opregion->vbt = vbt; + opregion->vbt_size = vbt_size; + } else { + vbt = base + OPREGION_VBT_OFFSET; + vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET; + if (intel_bios_is_valid_vbt(vbt, vbt_size)) { + DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); + opregion->vbt = vbt; + opregion->vbt_size = vbt_size; + } + } + } + return 0; err_out: diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a24df35e11e7..21ee6477bf98 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -461,8 +461,7 @@ static inline u32 scale_hw_to_user(struct intel_connector *connector, static u32 intel_panel_compute_brightness(struct intel_connector *connector, u32 val) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; WARN_ON(panel->backlight.max == 0); @@ -480,45 +479,40 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, static u32 lpt_get_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; } static u32 pch_get_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; } static u32 i9xx_get_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 val; val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_INFO(dev_priv)->gen < 4) val >>= 1; if (panel->backlight.combination_mode) { u8 lbpc; - pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); + pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc); val *= lbpc; } return val; } -static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe) +static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) return 0; @@ -527,17 +521,16 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe) static u32 vlv_get_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); enum pipe pipe = intel_get_pipe_from_connector(connector); - return _vlv_get_backlight(dev, pipe); + return _vlv_get_backlight(dev_priv, pipe); } static u32 bxt_get_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - struct drm_i915_private *dev_priv = dev->dev_private; return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller)); } @@ -553,8 +546,7 @@ static u32 pwm_get_backlight(struct intel_connector *connector) static u32 intel_panel_get_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 val = 0; @@ -573,16 +565,14 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) static void lpt_set_backlight(struct intel_connector *connector, u32 level) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; I915_WRITE(BLC_PWM_PCH_CTL2, val | level); } static void pch_set_backlight(struct intel_connector *connector, u32 level) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 tmp; tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; @@ -591,8 +581,7 @@ static void pch_set_backlight(struct intel_connector *connector, u32 level) static void i9xx_set_backlight(struct intel_connector *connector, u32 level) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 tmp, mask; @@ -603,10 +592,10 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level) lbpc = level * 0xfe / panel->backlight.max + 1; level /= lbpc; - pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); + pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc); } - if (IS_GEN4(dev)) { + if (IS_GEN4(dev_priv)) { mask = BACKLIGHT_DUTY_CYCLE_MASK; } else { level <<= 1; @@ -619,8 +608,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level) static void vlv_set_backlight(struct intel_connector *connector, u32 level) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); enum pipe pipe = intel_get_pipe_from_connector(connector); u32 tmp; @@ -633,8 +621,7 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level) static void bxt_set_backlight(struct intel_connector *connector, u32 level) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level); @@ -663,8 +650,7 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level) static void intel_panel_set_backlight(struct intel_connector *connector, u32 user_level, u32 user_max) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 hw_level; @@ -690,8 +676,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector, void intel_panel_set_backlight_acpi(struct intel_connector *connector, u32 user_level, u32 user_max) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); u32 hw_level; @@ -726,8 +711,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector, static void lpt_disable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 tmp; intel_panel_actually_set_backlight(connector, 0); @@ -752,8 +736,7 @@ static void lpt_disable_backlight(struct intel_connector *connector) static void pch_disable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 tmp; intel_panel_actually_set_backlight(connector, 0); @@ -772,8 +755,7 @@ static void i9xx_disable_backlight(struct intel_connector *connector) static void i965_disable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 tmp; intel_panel_actually_set_backlight(connector, 0); @@ -784,8 +766,7 @@ static void i965_disable_backlight(struct intel_connector *connector) static void vlv_disable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); enum pipe pipe = intel_get_pipe_from_connector(connector); u32 tmp; @@ -800,8 +781,7 @@ static void vlv_disable_backlight(struct intel_connector *connector) static void bxt_disable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 tmp, val; @@ -830,8 +810,7 @@ static void pwm_disable_backlight(struct intel_connector *connector) void intel_panel_disable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; if (!panel->backlight.present) @@ -843,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector) * backlight. This will leave the backlight on unnecessarily when * another client is not activated. */ - if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { + if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); return; } @@ -860,8 +839,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector) static void lpt_enable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 pch_ctl1, pch_ctl2; @@ -893,8 +871,7 @@ static void lpt_enable_backlight(struct intel_connector *connector) static void pch_enable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); enum transcoder cpu_transcoder = @@ -940,8 +917,7 @@ static void pch_enable_backlight(struct intel_connector *connector) static void i9xx_enable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 ctl, freq; @@ -958,7 +934,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) ctl = freq << 17; if (panel->backlight.combination_mode) ctl |= BLM_LEGACY_MODE; - if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) + if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm) ctl |= BLM_POLARITY_PNV; I915_WRITE(BLC_PWM_CTL, ctl); @@ -972,14 +948,13 @@ static void i9xx_enable_backlight(struct intel_connector *connector) * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 * that has backlight. */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); } static void i965_enable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); u32 ctl, ctl2, freq; @@ -1012,8 +987,7 @@ static void i965_enable_backlight(struct intel_connector *connector) static void vlv_enable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); u32 ctl, ctl2; @@ -1044,8 +1018,7 @@ static void vlv_enable_backlight(struct intel_connector *connector) static void bxt_enable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); u32 pwm_ctl, val; @@ -1102,8 +1075,7 @@ static void pwm_enable_backlight(struct intel_connector *connector) void intel_panel_enable_backlight(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); @@ -1263,6 +1235,14 @@ static void intel_backlight_device_unregister(struct intel_connector *connector) } #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ +/* + * BXT: PWM clock frequency = 19.2 MHz. + */ +static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +{ + return KHz(19200) / pwm_freq_hz; +} + /* * SPT: This value represents the period of the PWM stream in clock periods * multiplied by 16 (default increment) or 128 (alternate increment selected in @@ -1270,8 +1250,7 @@ static void intel_backlight_device_unregister(struct intel_connector *connector) */ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 mul, clock; if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY) @@ -1291,8 +1270,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) */ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 mul, clock; if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY) @@ -1300,7 +1278,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) else mul = 128; - if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) + if (HAS_PCH_LPT_H(dev_priv)) clock = MHz(135); /* LPT:H */ else clock = MHz(24); /* LPT:LP */ @@ -1335,22 +1313,28 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) int clock; if (IS_PINEVIEW(dev)) - clock = intel_hrawclk(dev); + clock = MHz(intel_hrawclk(dev)); else - clock = 1000 * dev_priv->display.get_display_clock_speed(dev); + clock = 1000 * dev_priv->cdclk_freq; return clock / (pwm_freq_hz * 32); } /* * Gen4: This value represents the period of the PWM stream in display core - * clocks multiplied by 128. + * clocks ([DevCTG] HRAW clocks) multiplied by 128. + * */ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int clock = 1000 * dev_priv->display.get_display_clock_speed(dev); + int clock; + + if (IS_G4X(dev_priv)) + clock = MHz(intel_hrawclk(dev)); + else + clock = 1000 * dev_priv->cdclk_freq; return clock / (pwm_freq_hz * 128); } @@ -1379,20 +1363,23 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) static u32 get_backlight_max_vbt(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; u32 pwm; - if (!pwm_freq_hz) { - DRM_DEBUG_KMS("backlight frequency not specified in VBT\n"); + if (!panel->backlight.hz_to_pwm) { + DRM_DEBUG_KMS("backlight frequency conversion not supported\n"); return 0; } - if (!panel->backlight.hz_to_pwm) { - DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n"); - return 0; + if (pwm_freq_hz) { + DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", + pwm_freq_hz); + } else { + pwm_freq_hz = 200; + DRM_DEBUG_KMS("default backlight frequency %u Hz\n", + pwm_freq_hz); } pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz); @@ -1401,8 +1388,6 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector) return 0; } - DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz); - return pwm; } @@ -1411,8 +1396,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector) */ static u32 get_backlight_min_vbt(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; int min; @@ -1437,8 +1421,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector) static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 pch_ctl1, pch_ctl2, val; @@ -1467,8 +1450,7 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; @@ -1498,17 +1480,16 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 ctl, val; ctl = I915_READ(BLC_PWM_CTL); - if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev)) + if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; - if (IS_PINEVIEW(dev)) + if (IS_PINEVIEW(dev_priv)) panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; panel->backlight.max = ctl >> 17; @@ -1536,8 +1517,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 ctl, ctl2, val; @@ -1570,8 +1550,7 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 ctl, ctl2, val; @@ -1592,7 +1571,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe panel->backlight.min = get_backlight_min_vbt(connector); - val = _vlv_get_backlight(dev, pipe); + val = _vlv_get_backlight(dev_priv, pipe); panel->backlight.level = intel_panel_compute_brightness(connector, val); panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) && @@ -1604,8 +1583,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe static int bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 pwm_ctl, val; @@ -1683,8 +1661,7 @@ static int pwm_setup_backlight(struct intel_connector *connector, int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_panel *panel = &intel_connector->panel; int ret; @@ -1739,35 +1716,35 @@ void intel_panel_destroy_backlight(struct drm_connector *connector) static void intel_panel_init_backlight_funcs(struct intel_panel *panel) { - struct intel_connector *intel_connector = + struct intel_connector *connector = container_of(panel, struct intel_connector, panel); - struct drm_device *dev = intel_connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { panel->backlight.setup = bxt_setup_backlight; panel->backlight.enable = bxt_enable_backlight; panel->backlight.disable = bxt_disable_backlight; panel->backlight.set = bxt_set_backlight; panel->backlight.get = bxt_get_backlight; - } else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) { + panel->backlight.hz_to_pwm = bxt_hz_to_pwm; + } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) { panel->backlight.setup = lpt_setup_backlight; panel->backlight.enable = lpt_enable_backlight; panel->backlight.disable = lpt_disable_backlight; panel->backlight.set = lpt_set_backlight; panel->backlight.get = lpt_get_backlight; - if (HAS_PCH_LPT(dev)) + if (HAS_PCH_LPT(dev_priv)) panel->backlight.hz_to_pwm = lpt_hz_to_pwm; else panel->backlight.hz_to_pwm = spt_hz_to_pwm; - } else if (HAS_PCH_SPLIT(dev)) { + } else if (HAS_PCH_SPLIT(dev_priv)) { panel->backlight.setup = pch_setup_backlight; panel->backlight.enable = pch_enable_backlight; panel->backlight.disable = pch_disable_backlight; panel->backlight.set = pch_set_backlight; panel->backlight.get = pch_get_backlight; panel->backlight.hz_to_pwm = pch_hz_to_pwm; - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (dev_priv->vbt.has_mipi) { panel->backlight.setup = pwm_setup_backlight; panel->backlight.enable = pwm_enable_backlight; @@ -1782,7 +1759,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) panel->backlight.get = vlv_get_backlight; panel->backlight.hz_to_pwm = vlv_hz_to_pwm; } - } else if (IS_GEN4(dev)) { + } else if (IS_GEN4(dev_priv)) { panel->backlight.setup = i965_setup_backlight; panel->backlight.enable = i965_enable_backlight; panel->backlight.disable = i965_disable_backlight; @@ -1828,7 +1805,7 @@ void intel_backlight_register(struct drm_device *dev) { struct intel_connector *connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) + for_each_intel_connector(dev, connector) intel_backlight_device_register(connector); } @@ -1836,6 +1813,6 @@ void intel_backlight_unregister(struct drm_device *dev) { struct intel_connector *connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) + for_each_intel_connector(dev, connector) intel_backlight_device_unregister(connector); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 038a81d03b17..eb5fa05cf476 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -66,6 +66,14 @@ static void bxt_init_clock_gating(struct drm_device *dev) */ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); + + /* + * Wa: Backlight PWM may stop in the asserted state, causing backlight + * to stay fully on. + */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) + I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | + PWM1_GATING_DIS | PWM2_GATING_DIS); } static void i915_pineview_get_mem_freq(struct drm_device *dev) @@ -283,7 +291,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) struct drm_device *dev = dev_priv->dev; u32 val; - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); POSTING_READ(FW_BLC_SELF_VLV); dev_priv->wm.vlv.cxsr = enable; @@ -2422,7 +2430,7 @@ static void ilk_wm_merge(struct drm_device *dev, * enabled sometime later. */ if (IS_GEN5(dev) && !merged->fbc_wm_enabled && - intel_fbc_enabled(dev_priv)) { + intel_fbc_is_active(dev_priv)) { for (level = 2; level <= max_level; level++) { struct intel_wm_level *wm = &merged->wm[level]; @@ -3306,7 +3314,7 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv, struct drm_device *dev = dev_priv->dev; struct intel_crtc *crtc; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + for_each_intel_crtc(dev, crtc) { int i, level, max_level = ilk_wm_max_level(dev); enum pipe pipe = crtc->pipe; @@ -3515,8 +3523,7 @@ static void skl_update_other_pipe_wm(struct drm_device *dev, * Otherwise, because of this_crtc being freshly enabled/disabled, the * other active pipes need new DDB allocation and WM values. */ - list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, - base.head) { + for_each_intel_crtc(dev, intel_crtc) { struct skl_pipe_wm pipe_wm = {}; bool wm_changed; @@ -4397,7 +4404,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_set_rps_idle(dev_priv); else gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); @@ -4450,7 +4457,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, void intel_set_rps(struct drm_device *dev, u8 val) { - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) valleyview_set_rps(dev, val); else gen6_set_rps(dev, val); @@ -4494,7 +4501,7 @@ static void valleyview_disable_rps(struct drm_device *dev) static void intel_print_rc6_info(struct drm_device *dev, u32 mode) { - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) mode = GEN6_RC_CTL_RC6_ENABLE; else @@ -5091,7 +5098,17 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) { - return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; + u32 val; + + val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; + /* + * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value + * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on + * a BYT-M B0 the above register contains 0xbf. Moreover when setting + * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 + * to make sure it matches what Punit accepts. + */ + return max_t(u32, val, 0xc0); } /* Check that the pctx buffer wasn't move under us. */ @@ -5996,7 +6013,17 @@ static void intel_init_emon(struct drm_device *dev) void intel_init_gt_powersave(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); + /* + * RPM depends on RC6 to save restore the GT HW context, so make RC6 a + * requirement. + */ + if (!i915.enable_rc6) { + DRM_INFO("RC6 disabled, disabling runtime PM support\n"); + intel_runtime_pm_get(dev_priv); + } if (IS_CHERRYVIEW(dev)) cherryview_init_gt_powersave(dev); @@ -6006,10 +6033,15 @@ void intel_init_gt_powersave(struct drm_device *dev) void intel_cleanup_gt_powersave(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + if (IS_CHERRYVIEW(dev)) return; else if (IS_VALLEYVIEW(dev)) valleyview_cleanup_gt_powersave(dev); + + if (!i915.enable_rc6) + intel_runtime_pm_put(dev_priv); } static void gen6_suspend_rps(struct drm_device *dev) @@ -7213,4 +7245,6 @@ void intel_pm_setup(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link); dev_priv->pm.suspended = false; + atomic_set(&dev_priv->pm.wakeref_count, 0); + atomic_set(&dev_priv->pm.atomic_seq, 0); } diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index bc5ea2a6cf4c..9ccff3011523 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -191,9 +191,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); - /* Enable AUX frame sync at sink */ if (dev_priv->psr.aux_frame_sync) drm_dp_dpcd_writeb(&intel_dp->aux, @@ -270,25 +267,20 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t max_sleep_time = 0x1f; - /* Lately it was identified that depending on panel idle frame count - * calculated at HW can be off by 1. So let's use what came - * from VBT + 1. - * There are also other cases where panel demands at least 4 - * but VBT is not being set. To cover these 2 cases lets use - * at least 5 when VBT isn't set to be on the safest side. + /* + * Let's respect VBT in case VBT asks a higher idle_frame value. + * Let's use 6 as the minimum to cover all known cases including + * the off-by-one issue that HW has in some cases. Also there are + * cases where sink should be able to train + * with the 5 or 6 idle patterns. */ - uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ? - dev_priv->vbt.psr.idle_frames + 1 : 5; + uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); uint32_t val = 0x0; - const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; - if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { - /* Sink should be able to train with the 5 or 6 idle patterns */ - idle_frames += 4; - } + if (IS_HASWELL(dev)) + val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; I915_WRITE(EDP_PSR_CTL, val | - (IS_BROADWELL(dev) ? 0 : link_entry_time) | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | EDP_PSR_ENABLE); @@ -335,8 +327,8 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) return false; } - if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) || - (dig_port->port != PORT_A))) { + if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && + ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) { DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n"); return false; } @@ -414,9 +406,14 @@ void intel_psr_enable(struct intel_dp *intel_dp) skl_psr_setup_su_vsc(intel_dp); } - /* Avoid continuous PSR exit by masking memup and hpd */ + /* + * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD. + * Also mask LPSP to avoid dependency on other drivers that + * might block runtime_pm besides preventing other hw tracking + * issues now we can rely on frontbuffer tracking. + */ I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD); + EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); /* Enable PSR on the panel */ hsw_psr_enable_sink(intel_dp); @@ -522,11 +519,15 @@ void intel_psr_disable(struct intel_dp *intel_dp) return; } + /* Disable PSR on Source */ if (HAS_DDI(dev)) hsw_psr_disable(intel_dp); else vlv_psr_disable(intel_dp); + /* Disable PSR on Sink */ + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); + dev_priv->psr.enabled = NULL; mutex_unlock(&dev_priv->psr.lock); @@ -644,7 +645,7 @@ void intel_psr_single_frame_update(struct drm_device *dev, * Single frame update is already supported on BDW+ but it requires * many W/A and it isn't really needed. */ - if (!IS_VALLEYVIEW(dev)) + if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) return; mutex_lock(&dev_priv->psr.lock); @@ -737,25 +738,9 @@ void intel_psr_flush(struct drm_device *dev, frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; - if (HAS_DDI(dev)) { - /* - * By definition every flush should mean invalidate + flush, - * however on core platforms let's minimize the - * disable/re-enable so we can avoid the invalidate when flip - * originated the flush. - */ - if (frontbuffer_bits && origin != ORIGIN_FLIP) - intel_psr_exit(dev); - } else { - /* - * On Valleyview and Cherryview we don't use hardware tracking - * so any plane updates or cursor moves don't result in a PSR - * invalidating. Which means we need to manually fake this in - * software for all flushes. - */ - if (frontbuffer_bits) - intel_psr_exit(dev); - } + /* By definition flush = invalidate + flush */ + if (frontbuffer_bits) + intel_psr_exit(dev); if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) if (!work_busy(&dev_priv->psr.work.work)) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 57d78f264b53..339701d7a9a5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -27,29 +27,13 @@ * */ +#include #include #include "i915_drv.h" #include #include "i915_trace.h" #include "intel_drv.h" -bool -intel_ring_initialized(struct intel_engine_cs *ring) -{ - struct drm_device *dev = ring->dev; - - if (!dev) - return false; - - if (i915.enable_execlists) { - struct intel_context *dctx = ring->default_context; - struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf; - - return ringbuf->obj; - } else - return ring->buffer && ring->buffer->obj; -} - int __intel_ring_space(int head, int tail, int size) { int space = head - tail; @@ -995,7 +979,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring) * Only consider slices where one, and only one, subslice has 7 * EUs */ - if (hweight8(dev_priv->info.subslice_7eu[i]) != 1) + if (!is_power_of_2(dev_priv->info.subslice_7eu[i])) continue; /* @@ -1034,10 +1018,6 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) return ret; if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { - /* WaDisableHDCInvalidation:skl */ - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - BDW_DISABLE_HDC_INVALIDATION); - /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ I915_WRITE(FF_SLICE_CS_CHICKEN2, _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); @@ -1062,7 +1042,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { + if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { /* *Use Force Non-Coherent whenever executing a 3D context. This * is a workaround for a possible hang in the unlikely event @@ -1071,6 +1051,10 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) /* WaForceEnableNonCoherent:skl */ WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FORCE_NON_COHERENT); + + /* WaDisableHDCInvalidation:skl */ + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | + BDW_DISABLE_HDC_INVALIDATION); } /* WaBarrierPerformanceFixDisable:skl */ @@ -2167,8 +2151,10 @@ static int intel_init_ring_buffer(struct drm_device *dev, init_waitqueue_head(&ring->irq_queue); ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); - if (IS_ERR(ringbuf)) - return PTR_ERR(ringbuf); + if (IS_ERR(ringbuf)) { + ret = PTR_ERR(ringbuf); + goto error; + } ring->buffer = ringbuf; if (I915_NEED_GFX_HWS(dev)) { @@ -2197,8 +2183,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, return 0; error: - intel_ringbuffer_free(ringbuf); - ring->buffer = NULL; + intel_cleanup_ring_buffer(ring); return ret; } @@ -2211,12 +2196,14 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) dev_priv = to_i915(ring->dev); - intel_stop_ring_buffer(ring); - WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); + if (ring->buffer) { + intel_stop_ring_buffer(ring); + WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); - intel_unpin_ringbuffer_obj(ring->buffer); - intel_ringbuffer_free(ring->buffer); - ring->buffer = NULL; + intel_unpin_ringbuffer_obj(ring->buffer); + intel_ringbuffer_free(ring->buffer); + ring->buffer = NULL; + } if (ring->cleanup) ring->cleanup(ring); @@ -2225,6 +2212,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) i915_cmd_parser_fini_ring(ring); i915_gem_batch_pool_fini(&ring->batch_pool); + ring->dev = NULL; } static int ring_wait_for_space(struct intel_engine_cs *ring, int n) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5d1eb206151d..49574ffe54bc 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -350,7 +350,11 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); }; -bool intel_ring_initialized(struct intel_engine_cs *ring); +static inline bool +intel_ring_initialized(struct intel_engine_cs *ring) +{ + return ring->dev != NULL; +} static inline unsigned intel_ring_flag(struct intel_engine_cs *ring) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index afca6c940b9a..ddbdbffe829a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -65,6 +65,72 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, int power_well_id); +const char * +intel_display_power_domain_str(enum intel_display_power_domain domain) +{ + switch (domain) { + case POWER_DOMAIN_PIPE_A: + return "PIPE_A"; + case POWER_DOMAIN_PIPE_B: + return "PIPE_B"; + case POWER_DOMAIN_PIPE_C: + return "PIPE_C"; + case POWER_DOMAIN_PIPE_A_PANEL_FITTER: + return "PIPE_A_PANEL_FITTER"; + case POWER_DOMAIN_PIPE_B_PANEL_FITTER: + return "PIPE_B_PANEL_FITTER"; + case POWER_DOMAIN_PIPE_C_PANEL_FITTER: + return "PIPE_C_PANEL_FITTER"; + case POWER_DOMAIN_TRANSCODER_A: + return "TRANSCODER_A"; + case POWER_DOMAIN_TRANSCODER_B: + return "TRANSCODER_B"; + case POWER_DOMAIN_TRANSCODER_C: + return "TRANSCODER_C"; + case POWER_DOMAIN_TRANSCODER_EDP: + return "TRANSCODER_EDP"; + case POWER_DOMAIN_PORT_DDI_A_LANES: + return "PORT_DDI_A_LANES"; + case POWER_DOMAIN_PORT_DDI_B_LANES: + return "PORT_DDI_B_LANES"; + case POWER_DOMAIN_PORT_DDI_C_LANES: + return "PORT_DDI_C_LANES"; + case POWER_DOMAIN_PORT_DDI_D_LANES: + return "PORT_DDI_D_LANES"; + case POWER_DOMAIN_PORT_DDI_E_LANES: + return "PORT_DDI_E_LANES"; + case POWER_DOMAIN_PORT_DSI: + return "PORT_DSI"; + case POWER_DOMAIN_PORT_CRT: + return "PORT_CRT"; + case POWER_DOMAIN_PORT_OTHER: + return "PORT_OTHER"; + case POWER_DOMAIN_VGA: + return "VGA"; + case POWER_DOMAIN_AUDIO: + return "AUDIO"; + case POWER_DOMAIN_PLLS: + return "PLLS"; + case POWER_DOMAIN_AUX_A: + return "AUX_A"; + case POWER_DOMAIN_AUX_B: + return "AUX_B"; + case POWER_DOMAIN_AUX_C: + return "AUX_C"; + case POWER_DOMAIN_AUX_D: + return "AUX_D"; + case POWER_DOMAIN_GMBUS: + return "GMBUS"; + case POWER_DOMAIN_INIT: + return "INIT"; + case POWER_DOMAIN_MODESET: + return "MODESET"; + default: + MISSING_CASE(domain); + return "?"; + } +} + static void intel_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -472,8 +538,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), "DC5 already programmed to be enabled.\n"); - WARN_ONCE(dev_priv->pm.suspended, - "DC5 cannot be enabled, if platform is runtime-suspended.\n"); + assert_rpm_wakelock_held(dev_priv); assert_csr_loaded(dev_priv); } @@ -487,8 +552,7 @@ static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) if (dev_priv->power_domains.initializing) return; - WARN_ONCE(dev_priv->pm.suspended, - "Disabling of DC5 while platform is runtime-suspended should never happen.\n"); + assert_rpm_wakelock_held(dev_priv); } static void gen9_enable_dc5(struct drm_i915_private *dev_priv) @@ -1433,11 +1497,15 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, mutex_lock(&power_domains->lock); - WARN_ON(!power_domains->domain_use_count[domain]); + WARN(!power_domains->domain_use_count[domain], + "Use count on domain %s is already zero\n", + intel_display_power_domain_str(domain)); power_domains->domain_use_count[domain]--; for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { - WARN_ON(!power_well->count); + WARN(!power_well->count, + "Use count on power well %s is already zero", + power_well->name); if (!--power_well->count) intel_power_well_disable(dev_priv, power_well); @@ -1841,7 +1909,7 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, if (disable_power_well >= 0) return !!disable_power_well; - if (IS_SKYLAKE(dev_priv)) { + if (IS_BROXTON(dev_priv)) { DRM_DEBUG_KMS("Disabling display power well support\n"); return 0; } @@ -1905,14 +1973,29 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ void intel_power_domains_fini(struct drm_i915_private *dev_priv) { - /* The i915.ko module is still not prepared to be loaded when + struct device *device = &dev_priv->dev->pdev->dev; + + /* + * The i915.ko module is still not prepared to be loaded when * the power well is not enabled, so just enable it in case - * we're going to unload/reload. */ + * we're going to unload/reload. + * The following also reacquires the RPM reference the core passed + * to the driver during loading, which is dropped in + * intel_runtime_pm_enable(). We have to hand back the control of the + * device to the core with this reference held. + */ intel_display_set_init_power(dev_priv, true); /* Remove the refcount we took to keep power well support disabled. */ if (!i915.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + /* + * Remove the refcount we took in intel_runtime_pm_enable() in case + * the platform doesn't support runtime PM. + */ + if (!HAS_RUNTIME_PM(dev_priv)) + pm_runtime_put(device); } static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) @@ -2156,11 +2239,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) struct drm_device *dev = dev_priv->dev; struct device *device = &dev->pdev->dev; - if (!HAS_RUNTIME_PM(dev)) - return; - pm_runtime_get_sync(device); - WARN(dev_priv->pm.suspended, "Device still suspended.\n"); + + atomic_inc(&dev_priv->pm.wakeref_count); + assert_rpm_wakelock_held(dev_priv); } /** @@ -2185,11 +2267,10 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) struct drm_device *dev = dev_priv->dev; struct device *device = &dev->pdev->dev; - if (!HAS_RUNTIME_PM(dev)) - return; - - WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); + assert_rpm_wakelock_held(dev_priv); pm_runtime_get_noresume(device); + + atomic_inc(&dev_priv->pm.wakeref_count); } /** @@ -2205,8 +2286,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) struct drm_device *dev = dev_priv->dev; struct device *device = &dev->pdev->dev; - if (!HAS_RUNTIME_PM(dev)) - return; + assert_rpm_wakelock_held(dev_priv); + if (atomic_dec_and_test(&dev_priv->pm.wakeref_count)) + atomic_inc(&dev_priv->pm.atomic_seq); pm_runtime_mark_last_busy(device); pm_runtime_put_autosuspend(device); @@ -2227,22 +2309,27 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) struct drm_device *dev = dev_priv->dev; struct device *device = &dev->pdev->dev; - if (!HAS_RUNTIME_PM(dev)) - return; - - /* - * RPM depends on RC6 to save restore the GT HW context, so make RC6 a - * requirement. - */ - if (!intel_enable_rc6(dev)) { - DRM_INFO("RC6 disabled, disabling runtime PM support\n"); - return; - } - pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ pm_runtime_mark_last_busy(device); - pm_runtime_use_autosuspend(device); + /* + * Take a permanent reference to disable the RPM functionality and drop + * it only when unloading the driver. Use the low level get/put helpers, + * so the driver's own RPM reference tracking asserts also work on + * platforms without RPM support. + */ + if (!HAS_RUNTIME_PM(dev)) { + pm_runtime_dont_use_autosuspend(device); + pm_runtime_get_sync(device); + } else { + pm_runtime_use_autosuspend(device); + } + + /* + * The core calls the driver load handler with an RPM reference held. + * We drop that here and will reacquire it during unloading in + * intel_power_domains_fini(). + */ pm_runtime_put_autosuspend(device); } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 06679f164b3e..2e1da060b0e1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2978,7 +2978,8 @@ bool intel_sdvo_init(struct drm_device *dev, /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; - drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); + drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, + NULL); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 2b96f336589e..4ff7a1f4183e 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -951,7 +951,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) return -EINVAL; - if (IS_VALLEYVIEW(dev) && + if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && set->flags & I915_SET_COLORKEY_DESTINATION) return -EINVAL; @@ -1086,7 +1086,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) intel_plane->max_downscale = 1; } - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { intel_plane->update_plane = vlv_update_plane; intel_plane->disable_plane = vlv_disable_plane; @@ -1123,7 +1123,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, &intel_plane_funcs, plane_formats, num_plane_formats, - DRM_PLANE_TYPE_OVERLAY); + DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) { kfree(intel_plane); goto out; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 6bea78944cd6..948cbff6c62e 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1645,7 +1645,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_CONNECTOR_SVIDEO); drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, - DRM_MODE_ENCODER_TVDAC); + DRM_MODE_ENCODER_TVDAC, NULL); intel_encoder->compute_config = intel_tv_compute_config; intel_encoder->get_config = intel_tv_get_config; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c2358ba78b30..277e60ae0e47 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -50,13 +50,6 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) return "unknown"; } -static void -assert_device_not_suspended(struct drm_i915_private *dev_priv) -{ - WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, - "Device suspended\n"); -} - static inline void fw_domain_reset(const struct intel_uncore_forcewake_domain *d) { @@ -236,7 +229,7 @@ static void intel_uncore_fw_release_timer(unsigned long arg) struct intel_uncore_forcewake_domain *domain = (void *)arg; unsigned long irqflags; - assert_device_not_suspended(domain->i915); + assert_rpm_device_not_suspended(domain->i915); spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); if (WARN_ON(domain->wake_count == 0)) @@ -411,7 +404,7 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, if (!dev_priv->uncore.funcs.force_wake_get) return; - WARN_ON(dev_priv->pm.suspended); + assert_rpm_wakelock_held(dev_priv); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); __intel_uncore_forcewake_get(dev_priv, fw_domains); @@ -628,7 +621,7 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) #define GEN2_READ_HEADER(x) \ u##x val = 0; \ - assert_device_not_suspended(dev_priv); + assert_rpm_wakelock_held(dev_priv); #define GEN2_READ_FOOTER \ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ @@ -670,7 +663,7 @@ __gen2_read(64) u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ u##x val = 0; \ - assert_device_not_suspended(dev_priv); \ + assert_rpm_wakelock_held(dev_priv); \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) #define GEN6_READ_FOOTER \ @@ -803,7 +796,7 @@ __gen6_read(64) #define VGPU_READ_HEADER(x) \ unsigned long irqflags; \ u##x val = 0; \ - assert_device_not_suspended(dev_priv); \ + assert_rpm_device_not_suspended(dev_priv); \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) #define VGPU_READ_FOOTER \ @@ -830,7 +823,7 @@ __vgpu_read(64) #define GEN2_WRITE_HEADER \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - assert_device_not_suspended(dev_priv); \ + assert_rpm_wakelock_held(dev_priv); \ #define GEN2_WRITE_FOOTER @@ -870,7 +863,7 @@ __gen2_write(64) u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - assert_device_not_suspended(dev_priv); \ + assert_rpm_wakelock_held(dev_priv); \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) #define GEN6_WRITE_FOOTER \ @@ -1046,7 +1039,7 @@ __gen6_write(64) #define VGPU_WRITE_HEADER \ unsigned long irqflags; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - assert_device_not_suspended(dev_priv); \ + assert_rpm_device_not_suspended(dev_priv); \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) #define VGPU_WRITE_FOOTER \ @@ -1115,7 +1108,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); } - if (IS_VALLEYVIEW(dev_priv)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) d->reg_post = FORCEWAKE_ACK_VLV; else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) d->reg_post = ECOBUS; @@ -1148,7 +1141,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) FORCEWAKE_ACK_BLITTER_GEN9); fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; if (!IS_CHERRYVIEW(dev)) dev_priv->uncore.funcs.force_wake_put = diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 98605ea2ad9d..063825fecbe2 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -137,7 +137,7 @@ static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder) imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24); } -static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { +static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { .mode_fixup = dw_hdmi_imx_encoder_mode_fixup, .mode_set = dw_hdmi_imx_encoder_mode_set, .prepare = dw_hdmi_imx_encoder_prepare, @@ -145,7 +145,7 @@ static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { .disable = dw_hdmi_imx_encoder_disable, }; -static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { +static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { .destroy = drm_encoder_cleanup, }; @@ -251,7 +251,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs); drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); } diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 882cf3d4b7a8..2f57d7967417 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -39,13 +39,12 @@ struct imx_drm_component { struct imx_drm_device { struct drm_device *drm; struct imx_drm_crtc *crtc[MAX_CRTC]; - int pipes; + unsigned int pipes; struct drm_fbdev_cma *fbhelper; }; struct imx_drm_crtc { struct drm_crtc *crtc; - int pipe; struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs; }; @@ -54,9 +53,9 @@ static int legacyfb_depth = 16; module_param(legacyfb_depth, int, 0444); #endif -int imx_drm_crtc_id(struct imx_drm_crtc *crtc) +unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc) { - return crtc->pipe; + return drm_crtc_index(crtc->crtc); } EXPORT_SYMBOL_GPL(imx_drm_crtc_id); @@ -124,19 +123,19 @@ EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) { - return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); + return drm_crtc_vblank_get(imx_drm_crtc->crtc); } EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get); void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc) { - drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); + drm_crtc_vblank_put(imx_drm_crtc->crtc); } EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put); void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc) { - drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); + drm_crtc_handle_vblank(imx_drm_crtc->crtc); } EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); @@ -215,7 +214,7 @@ static void imx_drm_output_poll_changed(struct drm_device *drm) drm_fbdev_cma_hotplug_event(imxdrm->fbhelper); } -static struct drm_mode_config_funcs imx_drm_mode_config_funcs = { +static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = imx_drm_output_poll_changed, }; @@ -306,6 +305,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); legacyfb_depth = 16; } + drm_helper_disable_unused_functions(drm); imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, drm->mode_config.num_crtc, MAX_CRTC); if (IS_ERR(imxdrm->fbhelper)) { @@ -356,12 +356,11 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, return -ENOMEM; imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs; - imx_drm_crtc->pipe = imxdrm->pipes++; imx_drm_crtc->crtc = crtc; crtc->port = port; - imxdrm->crtc[imx_drm_crtc->pipe] = imx_drm_crtc; + imxdrm->crtc[imxdrm->pipes++] = imx_drm_crtc; *new_crtc = imx_drm_crtc; @@ -373,12 +372,12 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, - imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); + imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL); return 0; err_register: - imxdrm->crtc[imx_drm_crtc->pipe] = NULL; + imxdrm->crtc[--imxdrm->pipes] = NULL; kfree(imx_drm_crtc); return ret; } @@ -390,10 +389,11 @@ EXPORT_SYMBOL_GPL(imx_drm_add_crtc); int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc) { struct imx_drm_device *imxdrm = imx_drm_crtc->crtc->dev->dev_private; + unsigned int pipe = drm_crtc_index(imx_drm_crtc->crtc); drm_crtc_cleanup(imx_drm_crtc->crtc); - imxdrm->crtc[imx_drm_crtc->pipe] = NULL; + imxdrm->crtc[pipe] = NULL; kfree(imx_drm_crtc); diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index 83284b4d4be1..71cf6d9c714f 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -13,7 +13,7 @@ struct drm_plane; struct imx_drm_crtc; struct platform_device; -int imx_drm_crtc_id(struct imx_drm_crtc *crtc); +unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc); struct imx_drm_crtc_helper_funcs { int (*enable_vblank)(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index abacc8f67469..22ac482231ed 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -358,23 +358,23 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) drm_panel_unprepare(imx_ldb_ch->panel); } -static struct drm_connector_funcs imx_ldb_connector_funcs = { +static const struct drm_connector_funcs imx_ldb_connector_funcs = { .dpms = drm_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = imx_ldb_connector_detect, .destroy = imx_drm_connector_destroy, }; -static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { +static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { .get_modes = imx_ldb_connector_get_modes, .best_encoder = imx_ldb_connector_best_encoder, }; -static struct drm_encoder_funcs imx_ldb_encoder_funcs = { +static const struct drm_encoder_funcs imx_ldb_encoder_funcs = { .destroy = imx_drm_encoder_destroy, }; -static struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { +static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { .dpms = imx_ldb_encoder_dpms, .mode_fixup = imx_ldb_encoder_mode_fixup, .prepare = imx_ldb_encoder_prepare, @@ -422,7 +422,7 @@ static int imx_ldb_register(struct drm_device *drm, drm_encoder_helper_add(&imx_ldb_ch->encoder, &imx_ldb_encoder_helper_funcs); drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); drm_connector_helper_add(&imx_ldb_ch->connector, &imx_ldb_connector_helper_funcs); diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index f9597146dc67..292349f0b132 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -360,24 +360,24 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder) tve_disable(tve); } -static struct drm_connector_funcs imx_tve_connector_funcs = { +static const struct drm_connector_funcs imx_tve_connector_funcs = { .dpms = drm_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = imx_tve_connector_detect, .destroy = imx_drm_connector_destroy, }; -static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = { +static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = { .get_modes = imx_tve_connector_get_modes, .best_encoder = imx_tve_connector_best_encoder, .mode_valid = imx_tve_connector_mode_valid, }; -static struct drm_encoder_funcs imx_tve_encoder_funcs = { +static const struct drm_encoder_funcs imx_tve_encoder_funcs = { .destroy = imx_drm_encoder_destroy, }; -static struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = { +static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = { .dpms = imx_tve_encoder_dpms, .mode_fixup = imx_tve_encoder_mode_fixup, .prepare = imx_tve_encoder_prepare, @@ -508,7 +508,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs); drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs, - encoder_type); + encoder_type, NULL); drm_connector_helper_add(&tve->connector, &imx_tve_connector_helper_funcs); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 4ab841eebee1..30a57185bdb4 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -270,7 +270,7 @@ static void ipu_crtc_commit(struct drm_crtc *crtc) ipu_fb_enable(ipu_crtc); } -static struct drm_crtc_helper_funcs ipu_helper_funcs = { +static const struct drm_crtc_helper_funcs ipu_helper_funcs = { .dpms = ipu_crtc_dpms, .mode_fixup = ipu_crtc_mode_fixup, .mode_set = ipu_crtc_mode_set, diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index e2ff410bab74..591ba2f1ae03 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -401,7 +401,8 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs, &ipu_plane_funcs, ipu_plane_formats, - ARRAY_SIZE(ipu_plane_formats), type); + ARRAY_SIZE(ipu_plane_formats), type, + NULL); if (ret) { DRM_ERROR("failed to initialize plane\n"); kfree(ipu_plane); diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 2e9b9f1b5cd2..0ffef172afb4 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -148,23 +148,23 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder) drm_panel_unprepare(imxpd->panel); } -static struct drm_connector_funcs imx_pd_connector_funcs = { +static const struct drm_connector_funcs imx_pd_connector_funcs = { .dpms = drm_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = imx_pd_connector_detect, .destroy = imx_drm_connector_destroy, }; -static struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { +static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { .get_modes = imx_pd_connector_get_modes, .best_encoder = imx_pd_connector_best_encoder, }; -static struct drm_encoder_funcs imx_pd_encoder_funcs = { +static const struct drm_encoder_funcs imx_pd_encoder_funcs = { .destroy = imx_drm_encoder_destroy, }; -static struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = { +static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = { .dpms = imx_pd_encoder_dpms, .mode_fixup = imx_pd_encoder_mode_fixup, .prepare = imx_pd_encoder_prepare, @@ -192,7 +192,7 @@ static int imx_pd_register(struct drm_device *drm, drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs); drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs, - DRM_MODE_ENCODER_NONE); + DRM_MODE_ENCODER_NONE, NULL); drm_connector_helper_add(&imxpd->connector, &imx_pd_connector_helper_funcs); @@ -204,8 +204,6 @@ static int imx_pd_register(struct drm_device *drm, drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder); - imxpd->connector.encoder = &imxpd->encoder; - return 0; } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index c99d3fe12881..19c18b7af28a 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1538,7 +1538,7 @@ static struct drm_encoder *mga_encoder_init(struct drm_device *dev) encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs); return encoder; @@ -1684,13 +1684,13 @@ static void mga_connector_destroy(struct drm_connector *connector) kfree(connector); } -struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = { +static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = { .get_modes = mga_vga_get_modes, .mode_valid = mga_vga_mode_valid, .best_encoder = mga_connector_best_encoder, }; -struct drm_connector_funcs mga_vga_connector_funcs = { +static const struct drm_connector_funcs mga_vga_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = mga_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 84d3ec98e6b9..215495c2780c 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -54,3 +54,11 @@ config DRM_MSM_DSI_20NM_PHY default y help Choose this option if the 20nm DSI PHY is used on the platform. + +config DRM_MSM_DSI_28NM_8960_PHY + bool "Enable DSI 28nm 8960 PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if the 28nm DSI PHY 8960 variant is used on the + platform. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 1c90290be716..065ad4138799 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -54,6 +54,7 @@ msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ + mdp/mdp4/mdp4_dsi_encoder.o \ dsi/dsi_cfg.o \ dsi/dsi_host.o \ dsi/dsi_manager.o \ @@ -62,10 +63,12 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o +msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) msm-y += dsi/pll/dsi_pll.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o +msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o endif obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 1ea2df524fac..950d27d26b30 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -19,10 +19,6 @@ #include "adreno_gpu.h" -#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF) -# include -#endif - #define ANY_ID 0xff bool hang_debug = false; @@ -168,7 +164,6 @@ static void set_gpu_pdev(struct drm_device *dev, static int adreno_bind(struct device *dev, struct device *master, void *data) { static struct adreno_platform_config config = {}; -#ifdef CONFIG_OF struct device_node *child, *node = dev->of_node; u32 val; int ret; @@ -205,53 +200,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) return -ENXIO; } -#else - struct kgsl_device_platform_data *pdata = dev->platform_data; - uint32_t version = socinfo_get_version(); - if (cpu_is_apq8064ab()) { - config.fast_rate = 450000000; - config.slow_rate = 27000000; - config.bus_freq = 4; - config.rev = ADRENO_REV(3, 2, 1, 0); - } else if (cpu_is_apq8064()) { - config.fast_rate = 400000000; - config.slow_rate = 27000000; - config.bus_freq = 4; - - if (SOCINFO_VERSION_MAJOR(version) == 2) - config.rev = ADRENO_REV(3, 2, 0, 2); - else if ((SOCINFO_VERSION_MAJOR(version) == 1) && - (SOCINFO_VERSION_MINOR(version) == 1)) - config.rev = ADRENO_REV(3, 2, 0, 1); - else - config.rev = ADRENO_REV(3, 2, 0, 0); - - } else if (cpu_is_msm8960ab()) { - config.fast_rate = 400000000; - config.slow_rate = 320000000; - config.bus_freq = 4; - - if (SOCINFO_VERSION_MINOR(version) == 0) - config.rev = ADRENO_REV(3, 2, 1, 0); - else - config.rev = ADRENO_REV(3, 2, 1, 1); - - } else if (cpu_is_msm8930()) { - config.fast_rate = 400000000; - config.slow_rate = 27000000; - config.bus_freq = 3; - - if ((SOCINFO_VERSION_MAJOR(version) == 1) && - (SOCINFO_VERSION_MINOR(version) == 2)) - config.rev = ADRENO_REV(3, 0, 5, 2); - else - config.rev = ADRENO_REV(3, 0, 5, 0); - - } -# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING - config.bus_scale_table = pdata->bus_scale_table; -# endif -#endif dev->platform_data = &config; set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); return 0; diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 5f5a3732cdf6..749fbb28ec3d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -31,10 +31,12 @@ enum msm_dsi_phy_type { MSM_DSI_PHY_28NM_HPM, MSM_DSI_PHY_28NM_LP, MSM_DSI_PHY_20NM, + MSM_DSI_PHY_28NM_8960, MSM_DSI_PHY_MAX }; #define DSI_DEV_REGULATOR_MAX 8 +#define DSI_BUS_CLK_MAX 4 /* Regulators for DSI devices */ struct dsi_reg_entry { @@ -89,7 +91,7 @@ int msm_dsi_manager_phy_enable(int id, u32 *clk_pre, u32 *clk_post); void msm_dsi_manager_phy_disable(int id); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); -bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len); +bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); int msm_dsi_manager_register(struct msm_dsi *msm_dsi); void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); @@ -143,7 +145,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg); void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, - u32 iova, u32 len); + u32 dma_base, u32 len); int msm_dsi_host_enable(struct mipi_dsi_host *host); int msm_dsi_host_disable(struct mipi_dsi_host *host); int msm_dsi_host_power_on(struct mipi_dsi_host *host); diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 5872d5e5934f..2a827d8093a2 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -13,9 +13,26 @@ #include "dsi_cfg.h" -/* DSI v2 has not been supported by now */ -static const struct msm_dsi_config dsi_v2_cfg = { +static const char * const dsi_v2_bus_clk_names[] = { + "core_mmss_clk", "iface_clk", "bus_clk", +}; + +static const struct msm_dsi_config apq8064_dsi_cfg = { .io_offset = 0, + .reg_cfg = { + .num = 3, + .regs = { + {"vdda", 1200000, 1200000, 100000, 100}, + {"avdd", 3000000, 3000000, 110000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + .bus_clk_names = dsi_v2_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names), +}; + +static const char * const dsi_6g_bus_clk_names[] = { + "mdp_core_clk", "iface_clk", "bus_clk", "core_mmss_clk", }; static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { @@ -29,6 +46,12 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { {"vddio", 1800000, 1800000, 100000, 100}, }, }, + .bus_clk_names = dsi_6g_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), +}; + +static const char * const dsi_8916_bus_clk_names[] = { + "mdp_core_clk", "iface_clk", "bus_clk", }; static const struct msm_dsi_config msm8916_dsi_cfg = { @@ -42,6 +65,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = { {"vddio", 1800000, 1800000, 100000, 100}, }, }, + .bus_clk_names = dsi_8916_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names), }; static const struct msm_dsi_config msm8994_dsi_cfg = { @@ -57,11 +82,13 @@ static const struct msm_dsi_config msm8994_dsi_cfg = { {"lab_reg", -1, -1, -1, -1}, {"ibb_reg", -1, -1, -1, -1}, }, - } + }, + .bus_clk_names = dsi_6g_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), }; static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { - {MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg}, + {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, &msm8974_apq8084_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1, diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index 4cf887240177..a68c836744a3 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -25,11 +25,15 @@ #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 +#define MSM_DSI_V2_VER_MINOR_8064 0x0 + #define DSI_6G_REG_SHIFT 4 struct msm_dsi_config { u32 io_offset; struct dsi_reg_config reg_cfg; + const char * const *bus_clk_names; + const int num_bus_clks; }; struct msm_dsi_cfg_handler { diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 4c49868efcda..48f9967b4a1b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -24,26 +24,36 @@ #include #include #include +#include +#include #include