mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-09-04 20:19:47 +08:00 
			
		
		
		
	drm/i915: Enable RC6 immediately
Now that PCU communication is reasonably fast, we do not need to defer RC6 initialisation to a workqueue. References: https://bugs.freedesktop.org/show_bug.cgi?id=97017 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
		
							parent
							
								
									65fe29eeec
								
							
						
					
					
						commit
						b12e0ee208
					
				| @ -1630,7 +1630,6 @@ static int i915_drm_resume(struct drm_device *dev) | ||||
| 
 | ||||
| 	intel_opregion_notify_adapter(dev_priv, PCI_D0); | ||||
| 
 | ||||
| 	intel_autoenable_gt_powersave(dev_priv); | ||||
| 	drm_kms_helper_poll_enable(dev); | ||||
| 
 | ||||
| 	enable_rpm_wakeref_asserts(dev_priv); | ||||
| @ -1812,7 +1811,8 @@ int i915_reset(struct drm_i915_private *dev_priv) | ||||
| 	 * previous concerns that it doesn't respond well to some forms | ||||
| 	 * of re-init after reset. | ||||
| 	 */ | ||||
| 	intel_autoenable_gt_powersave(dev_priv); | ||||
| 	if (INTEL_GEN(dev_priv) > 5) | ||||
| 		intel_enable_gt_powersave(dev_priv); | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| @ -2440,6 +2440,7 @@ static int intel_runtime_resume(struct device *device) | ||||
| 	i915_gem_init_swizzling(dev); | ||||
| 
 | ||||
| 	intel_runtime_pm_enable_interrupts(dev_priv); | ||||
| 	intel_enable_gt_powersave(dev_priv); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * On VLV/CHV display interrupts are part of the display | ||||
|  | ||||
| @ -1192,7 +1192,6 @@ struct intel_gen6_power_mgmt { | ||||
| 	bool client_boost; | ||||
| 
 | ||||
| 	bool enabled; | ||||
| 	struct delayed_work autoenable_work; | ||||
| 	unsigned boosts; | ||||
| 
 | ||||
| 	/* manual wa residency calculations */ | ||||
|  | ||||
| @ -4355,8 +4355,6 @@ i915_gem_suspend(struct drm_device *dev) | ||||
| 	struct drm_i915_private *dev_priv = to_i915(dev); | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	intel_suspend_gt_powersave(dev_priv); | ||||
| 
 | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 
 | ||||
| 	/* We have to flush all the executing contexts to main memory so
 | ||||
|  | ||||
| @ -405,7 +405,6 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine) | ||||
| 	intel_runtime_pm_get_noresume(dev_priv); | ||||
| 	dev_priv->gt.awake = true; | ||||
| 
 | ||||
| 	intel_enable_gt_powersave(dev_priv); | ||||
| 	i915_update_gfx_val(dev_priv); | ||||
| 	if (INTEL_GEN(dev_priv) >= 6) | ||||
| 		gen6_rps_busy(dev_priv); | ||||
|  | ||||
| @ -15502,6 +15502,7 @@ void intel_modeset_init_hw(struct drm_device *dev) | ||||
| 	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; | ||||
| 
 | ||||
| 	intel_init_clock_gating(dev); | ||||
| 	intel_enable_gt_powersave(dev_priv); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -1691,12 +1691,9 @@ void intel_pm_setup(struct drm_device *dev); | ||||
| void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | ||||
| void intel_gpu_ips_teardown(void); | ||||
| void intel_init_gt_powersave(struct drm_i915_private *dev_priv); | ||||
| void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); | ||||
| void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv); | ||||
| void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); | ||||
| void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv); | ||||
| void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); | ||||
| void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); | ||||
| void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); | ||||
| void gen6_rps_busy(struct drm_i915_private *dev_priv); | ||||
| void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); | ||||
| void gen6_rps_idle(struct drm_i915_private *dev_priv); | ||||
|  | ||||
| @ -6526,8 +6526,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| 	dev_priv->rps.boost_freq = dev_priv->rps.max_freq; | ||||
| 
 | ||||
| 	mutex_unlock(&dev_priv->rps.hw_lock); | ||||
| 
 | ||||
| 	intel_autoenable_gt_powersave(dev_priv); | ||||
| } | ||||
| 
 | ||||
| void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| @ -6541,31 +6539,10 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| 		intel_runtime_pm_put(dev_priv); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * intel_suspend_gt_powersave - suspend PM work and helper threads | ||||
|  * @dev_priv: i915 device | ||||
|  * | ||||
|  * We don't want to disable RC6 or other features here, we just want | ||||
|  * to make sure any work we've queued has finished and won't bother | ||||
|  * us while we're suspended. | ||||
|  */ | ||||
| void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	if (INTEL_GEN(dev_priv) < 6) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work)) | ||||
| 		intel_runtime_pm_put(dev_priv); | ||||
| 
 | ||||
| 	/* gen6_rps_idle() will be called later to disable interrupts */ | ||||
| } | ||||
| 
 | ||||
| void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	dev_priv->rps.enabled = true; /* force disabling */ | ||||
| 	intel_disable_gt_powersave(dev_priv); | ||||
| 
 | ||||
| 	gen6_reset_rps_interrupts(dev_priv); | ||||
| } | ||||
| 
 | ||||
| void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| @ -6590,13 +6567,12 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| 
 | ||||
| 	dev_priv->rps.enabled = false; | ||||
| 	mutex_unlock(&dev_priv->rps.hw_lock); | ||||
| 
 | ||||
| 	gen6_reset_rps_interrupts(dev_priv); | ||||
| } | ||||
| 
 | ||||
| void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	/* We shouldn't be disabling as we submit, so this should be less
 | ||||
| 	 * racy than it appears! | ||||
| 	 */ | ||||
| 	if (READ_ONCE(dev_priv->rps.enabled)) | ||||
| 		return; | ||||
| 
 | ||||
| @ -6632,75 +6608,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| 	WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); | ||||
| 	WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); | ||||
| 
 | ||||
| 	dev_priv->rps.enabled = true; | ||||
| 	mutex_unlock(&dev_priv->rps.hw_lock); | ||||
| } | ||||
| 
 | ||||
| static void __intel_autoenable_gt_powersave(struct work_struct *work) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = | ||||
| 		container_of(work, typeof(*dev_priv), rps.autoenable_work.work); | ||||
| 	struct intel_engine_cs *rcs; | ||||
| 	struct drm_i915_gem_request *req; | ||||
| 
 | ||||
| 	if (READ_ONCE(dev_priv->rps.enabled)) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	rcs = &dev_priv->engine[RCS]; | ||||
| 	if (rcs->last_context) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	if (!rcs->init_context) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	mutex_lock(&dev_priv->drm.struct_mutex); | ||||
| 
 | ||||
| 	req = i915_gem_request_alloc(rcs, dev_priv->kernel_context); | ||||
| 	if (IS_ERR(req)) | ||||
| 		goto unlock; | ||||
| 
 | ||||
| 	if (!i915.enable_execlists && i915_switch_context(req) == 0) | ||||
| 		rcs->init_context(req); | ||||
| 
 | ||||
| 	/* Mark the device busy, calling intel_enable_gt_powersave() */ | ||||
| 	i915_add_request_no_flush(req); | ||||
| 
 | ||||
| unlock: | ||||
| 	mutex_unlock(&dev_priv->drm.struct_mutex); | ||||
| out: | ||||
| 	intel_runtime_pm_put(dev_priv); | ||||
| } | ||||
| 
 | ||||
| void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv) | ||||
| { | ||||
| 	if (READ_ONCE(dev_priv->rps.enabled)) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (IS_IRONLAKE_M(dev_priv)) { | ||||
| 		ironlake_enable_drps(dev_priv); | ||||
| 		mutex_lock(&dev_priv->drm.struct_mutex); | ||||
| 		intel_init_emon(dev_priv); | ||||
| 		mutex_unlock(&dev_priv->drm.struct_mutex); | ||||
| 	} else if (INTEL_INFO(dev_priv)->gen >= 6) { | ||||
| 		/*
 | ||||
| 		 * PCU communication is slow and this doesn't need to be | ||||
| 		 * done at any specific time, so do this out of our fast path | ||||
| 		 * to make resume and init faster. | ||||
| 		 * | ||||
| 		 * We depend on the HW RC6 power context save/restore | ||||
| 		 * mechanism when entering D3 through runtime PM suspend. So | ||||
| 		 * disable RPM until RPS/RC6 is properly setup. We can only | ||||
| 		 * get here via the driver load/system resume/runtime resume | ||||
| 		 * paths, so the _noresume version is enough (and in case of | ||||
| 		 * runtime resume it's necessary). | ||||
| 		 */ | ||||
| 		if (queue_delayed_work(dev_priv->wq, | ||||
| 				       &dev_priv->rps.autoenable_work, | ||||
| 				       round_jiffies_up_relative(HZ))) | ||||
| 			intel_runtime_pm_get_noresume(dev_priv); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void ibx_init_clock_gating(struct drm_device *dev) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = to_i915(dev); | ||||
| @ -7806,8 +7716,6 @@ void intel_pm_setup(struct drm_device *dev) | ||||
| 	mutex_init(&dev_priv->rps.hw_lock); | ||||
| 	spin_lock_init(&dev_priv->rps.client_lock); | ||||
| 
 | ||||
| 	INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, | ||||
| 			  __intel_autoenable_gt_powersave); | ||||
| 	INIT_LIST_HEAD(&dev_priv->rps.clients); | ||||
| 
 | ||||
| 	dev_priv->pm.suspended = false; | ||||
|  | ||||
| @ -435,7 +435,7 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv) | ||||
| 	i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); | ||||
| 
 | ||||
| 	/* BIOS often leaves RC6 enabled, but disable it for hw init */ | ||||
| 	intel_sanitize_gt_powersave(dev_priv); | ||||
| 	intel_disable_gt_powersave(dev_priv); | ||||
| } | ||||
| 
 | ||||
| static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user
	 Chris Wilson
						Chris Wilson