2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00
linux/drivers/gpu/drm/msm/msm_kms.c
Linus Torvalds 1d6d399223 Kthreads affinity follow either of 4 existing different patterns:
1) Per-CPU kthreads must stay affine to a single CPU and never execute
    relevant code on any other CPU. This is currently handled by smpboot
    code which takes care of CPU-hotplug operations. Affinity here is
    a correctness constraint.
 
 2) Some kthreads _have_ to be affine to a specific set of CPUs and can't
    run anywhere else. The affinity is set through kthread_bind_mask()
    and the subsystem takes care by itself to handle CPU-hotplug
    operations. Affinity here is assumed to be a correctness constraint.
 
 3) Per-node kthreads _prefer_ to be affine to a specific NUMA node. This
    is not a correctness constraint but merely a preference in terms of
    memory locality. kswapd and kcompactd both fall into this category.
    The affinity is set manually like for any other task and CPU-hotplug
    is supposed to be handled by the relevant subsystem so that the task
    is properly reaffined whenever a given CPU from the node comes up.
    Also care should be taken so that the node affinity doesn't cross
    isolated (nohz_full) cpumask boundaries.
 
 4) Similar to the previous point except kthreads have a _preferred_
    affinity different than a node. Both RCU boost kthreads and RCU
    exp kworkers fall into this category as they refer to "RCU nodes"
    from a distinctly distributed tree.
 
 Currently the preferred affinity patterns (3 and 4) have at least 4
 identified users, with more or less success when it comes to handle
 CPU-hotplug operations and CPU isolation. Each of which do it in its own
 ad-hoc way.
 
 This is an infrastructure proposal to handle this with the following API
 changes:
 
 _ kthread_create_on_node() automatically affines the created kthread to
   its target node unless it has been set as per-cpu or bound with
   kthread_bind[_mask]() before the first wake-up.
 
 - kthread_affine_preferred() is a new function that can be called right
   after kthread_create_on_node() to specify a preferred affinity
   different than the specified node.
 
 When the preferred affinity can't be applied because the possible
 targets are offline or isolated (nohz_full), the kthread is affine
 to the housekeeping CPUs (which means to all online CPUs most of the
 time or only the non-nohz_full CPUs when nohz_full= is set).
 
 kswapd, kcompactd, RCU boost kthreads and RCU exp kworkers have been
 converted, along with a few old drivers.
 
 Summary of the changes:
 
 * Consolidate a bunch of ad-hoc implementations of kthread_run_on_cpu()
 
 * Introduce task_cpu_fallback_mask() that defines the default last
   resort affinity of a task to become nohz_full aware
 
 * Add some correctness check to ensure kthread_bind() is always called
   before the first kthread wake up.
 
 * Default affine kthread to its preferred node.
 
 * Convert kswapd / kcompactd and remove their halfway working ad-hoc
   affinity implementation
 
 * Implement kthreads preferred affinity
 
 * Unify kthread worker and kthread API's style
 
 * Convert RCU kthreads to the new API and remove the ad-hoc affinity
   implementation.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEd76+gtGM8MbftQlOhSRUR1COjHcFAmeNf8gACgkQhSRUR1CO
 jHedQQ/+IxTjjqQiItzrq41TES2S0desHDq8lNJFb7rsR/DtKFyLx3s67cOYV+cM
 Yx54QHg2m/Fz4nXMQ7Po5ygOtJGCKBc5C5QQy7y0lVKeTQK+daDfEtBSa3oG7j3C
 u+E3tTY6qxkbCzymUyaKkHN4/ay2vLvjFS50luV7KMyI3x47Aji+t7VdCX4LCPP2
 eAwOALWD0+7qLJ/VF6gsmQLKA4Qx7PQAzBa3KSBmUN9UcN8Gk1bQHCTIQKDHP9LQ
 v8BXrNZtYX1o2+snNYpX2z6/ECjxkdwriOgqqZY5306hd9RAQ1u46Dx3byrIqjGn
 ULG/XQ2istPyhTqb/h+RbrobdOcwEUIeqk8hRRbBXE8bPpqUz9EMuaCMxWDbQjgH
 NTuKG4ifKJ/IqstkkuDkdOiByE/ysMmwqrTXgSnu2ITNL9yY3BEgFbvA95hgo42s
 f7QCxEfZb1MHcNEMENSMwM3xw5lLMGMpxVZcMQ3gLwyotMBRrhFZm1qZJG7TITYW
 IDIeCbH4JOMdQwLs3CcWTXio0N5/85NhRNFV+IDn96OrgxObgnMtV8QwNgjXBAJ5
 wGeJWt8s34W1Zo3qS9gEuVzEhW4XaxISQQMkHe8faKkK6iHmIB/VjSQikDwwUNQ/
 AspYj82RyWBCDZsqhiYh71kpxjvS6Xp0bj39Ce1sNsOnuksxKkQ=
 =g8In
 -----END PGP SIGNATURE-----

Merge tag 'kthread-for-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks

Pull kthread updates from Frederic Weisbecker:
 "Kthreads affinity follow either of 4 existing different patterns:

   1) Per-CPU kthreads must stay affine to a single CPU and never
      execute relevant code on any other CPU. This is currently handled
      by smpboot code which takes care of CPU-hotplug operations.
      Affinity here is a correctness constraint.

   2) Some kthreads _have_ to be affine to a specific set of CPUs and
      can't run anywhere else. The affinity is set through
      kthread_bind_mask() and the subsystem takes care by itself to
      handle CPU-hotplug operations. Affinity here is assumed to be a
      correctness constraint.

   3) Per-node kthreads _prefer_ to be affine to a specific NUMA node.
      This is not a correctness constraint but merely a preference in
      terms of memory locality. kswapd and kcompactd both fall into this
      category. The affinity is set manually like for any other task and
      CPU-hotplug is supposed to be handled by the relevant subsystem so
      that the task is properly reaffined whenever a given CPU from the
      node comes up. Also care should be taken so that the node affinity
      doesn't cross isolated (nohz_full) cpumask boundaries.

   4) Similar to the previous point except kthreads have a _preferred_
      affinity different than a node. Both RCU boost kthreads and RCU
      exp kworkers fall into this category as they refer to "RCU nodes"
      from a distinctly distributed tree.

  Currently the preferred affinity patterns (3 and 4) have at least 4
  identified users, with more or less success when it comes to handle
  CPU-hotplug operations and CPU isolation. Each of which do it in its
  own ad-hoc way.

  This is an infrastructure proposal to handle this with the following
  API changes:

   - kthread_create_on_node() automatically affines the created kthread
     to its target node unless it has been set as per-cpu or bound with
     kthread_bind[_mask]() before the first wake-up.

   - kthread_affine_preferred() is a new function that can be called
     right after kthread_create_on_node() to specify a preferred
     affinity different than the specified node.

  When the preferred affinity can't be applied because the possible
  targets are offline or isolated (nohz_full), the kthread is affine to
  the housekeeping CPUs (which means to all online CPUs most of the time
  or only the non-nohz_full CPUs when nohz_full= is set).

  kswapd, kcompactd, RCU boost kthreads and RCU exp kworkers have been
  converted, along with a few old drivers.

  Summary of the changes:

   - Consolidate a bunch of ad-hoc implementations of
     kthread_run_on_cpu()

   - Introduce task_cpu_fallback_mask() that defines the default last
     resort affinity of a task to become nohz_full aware

   - Add some correctness check to ensure kthread_bind() is always
     called before the first kthread wake up.

   - Default affine kthread to its preferred node.

   - Convert kswapd / kcompactd and remove their halfway working ad-hoc
     affinity implementation

   - Implement kthreads preferred affinity

   - Unify kthread worker and kthread API's style

   - Convert RCU kthreads to the new API and remove the ad-hoc affinity
     implementation"

* tag 'kthread-for-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks:
  kthread: modify kernel-doc function name to match code
  rcu: Use kthread preferred affinity for RCU exp kworkers
  treewide: Introduce kthread_run_worker[_on_cpu]()
  kthread: Unify kthread_create_on_cpu() and kthread_create_worker_on_cpu() automatic format
  rcu: Use kthread preferred affinity for RCU boost
  kthread: Implement preferred affinity
  mm: Create/affine kswapd to its preferred node
  mm: Create/affine kcompactd to its preferred node
  kthread: Default affine kthread to its preferred NUMA node
  kthread: Make sure kthread hasn't started while binding it
  sched,arm64: Handle CPU isolation on last resort fallback rq selection
  arm64: Exclude nohz_full CPUs from 32bits el0 support
  lib: test_objpool: Use kthread_run_on_cpu()
  kallsyms: Use kthread_run_on_cpu()
  soc/qman: test: Use kthread_run_on_cpu()
  arm/bL_switcher: Use kthread_run_on_cpu()
2025-01-21 17:10:05 -08:00

345 lines
7.9 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/aperture.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_drv.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_vblank.h>
#include "disp/msm_disp_snapshot.h"
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
#include "msm_mmu.h"
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.atomic_check = msm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
.atomic_commit_tail = msm_atomic_commit_tail,
};
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
return kms->funcs->irq(kms);
}
static void msm_irq_preinstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
kms->funcs->irq_preinstall(kms);
}
static int msm_irq_postinstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
if (kms->funcs->irq_postinstall)
return kms->funcs->irq_postinstall(kms);
return 0;
}
static int msm_irq_install(struct drm_device *dev, unsigned int irq)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
int ret;
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
msm_irq_preinstall(dev);
ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
if (ret)
return ret;
kms->irq_requested = true;
ret = msm_irq_postinstall(dev);
if (ret) {
free_irq(irq, dev);
return ret;
}
return 0;
}
static void msm_irq_uninstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
kms->funcs->irq_uninstall(kms);
if (kms->irq_requested)
free_irq(kms->irq, dev);
}
struct msm_vblank_work {
struct work_struct work;
struct drm_crtc *crtc;
bool enable;
struct msm_drm_private *priv;
};
static void vblank_ctrl_worker(struct work_struct *work)
{
struct msm_vblank_work *vbl_work = container_of(work,
struct msm_vblank_work, work);
struct msm_drm_private *priv = vbl_work->priv;
struct msm_kms *kms = priv->kms;
if (vbl_work->enable)
kms->funcs->enable_vblank(kms, vbl_work->crtc);
else
kms->funcs->disable_vblank(kms, vbl_work->crtc);
kfree(vbl_work);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
struct drm_crtc *crtc, bool enable)
{
struct msm_vblank_work *vbl_work;
vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
if (!vbl_work)
return -ENOMEM;
INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
vbl_work->crtc = crtc;
vbl_work->enable = enable;
vbl_work->priv = priv;
queue_work(priv->wq, &vbl_work->work);
return 0;
}
int msm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
void msm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
return;
drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}
struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
{
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
struct device *iommu_dev;
/*
* IOMMUs can be a part of MDSS device tree binding, or the
* MDP/DPU device.
*/
if (device_iommu_mapped(mdp_dev))
iommu_dev = mdp_dev;
else
iommu_dev = mdss_dev;
mmu = msm_iommu_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
if (!mmu) {
drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
return NULL;
}
aspace = msm_gem_address_space_create(mmu, "mdp_kms",
0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
mmu->funcs->destroy(mmu);
}
return aspace;
}
void msm_drm_kms_uninit(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *ddev = priv->dev;
struct msm_kms *kms = priv->kms;
int i;
BUG_ON(!kms);
/* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->event_thread[i].worker)
kthread_destroy_worker(priv->event_thread[i].worker);
}
drm_kms_helper_poll_fini(ddev);
msm_disp_snapshot_destroy(ddev);
pm_runtime_get_sync(dev);
msm_irq_uninstall(ddev);
pm_runtime_put_sync(dev);
if (kms && kms->funcs)
kms->funcs->destroy(kms);
}
int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv->dev;
struct msm_kms *kms = priv->kms;
struct drm_crtc *crtc;
int ret;
/* the fw fb could be anywhere in memory */
ret = aperture_remove_all_conflicting_devices(drv->name);
if (ret)
return ret;
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
return ret;
}
/* Enable normalization of plane zpos */
ddev->mode_config.normalize_zpos = true;
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
kms->dev = ddev;
ret = kms->funcs->hw_init(kms);
if (ret) {
DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
goto err_msm_uninit;
}
drm_helper_move_panel_connectors_to_head(ddev);
drm_for_each_crtc(crtc, ddev) {
struct msm_drm_thread *ev_thread;
/* initialize event thread */
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
ret = PTR_ERR(ev_thread->worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
ev_thread->worker = NULL;
goto err_msm_uninit;
}
sched_set_fifo(ev_thread->worker->task);
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
}
pm_runtime_get_sync(dev);
ret = msm_irq_install(ddev, kms->irq);
pm_runtime_put_sync(dev);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
goto err_msm_uninit;
}
ret = msm_disp_snapshot_init(ddev);
if (ret)
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
drm_mode_config_reset(ddev);
return 0;
err_msm_uninit:
return ret;
}
int msm_kms_pm_prepare(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return 0;
return drm_mode_config_helper_suspend(ddev);
}
void msm_kms_pm_complete(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return;
drm_mode_config_helper_resume(ddev);
}
void msm_kms_shutdown(struct platform_device *pdev)
{
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *drm = priv ? priv->dev : NULL;
/*
* Shutdown the hw if we're far enough along where things might be on.
* If we run this too early, we'll end up panicking in any variety of
* places. Since we don't register the drm device until late in
* msm_drm_init, drm_dev->registered is used as an indicator that the
* shutdown will be successful.
*/
if (drm && drm->registered && priv->kms)
drm_atomic_helper_shutdown(drm);
}