Commit 0e31225f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2019-08-02-1' of git://anongit.freedesktop.org/drm/drm

Pull more drm fixes from Daniel Vetter:
 "Dave sends his pull, everyone realizes they've been asleep at the
  wheel and hits send on their own pulls :-/

  Normally I'd just ignore these all because w/e for me and Dave. But
  this time around the latecomers also included drm-intel-fixes, which
  failed to send out a -fixes pull thus far for this release (screwed up
  vacation coverage, despite that 2/3 maintainers were around ... they
  all look appropriately guilty), and that really is overdue to get
  landed.

  And since I had to do a pull request anyway I pulled the other two
  late ones too.

  intel fixes (didn't have any ever since the main merge window pull):
   - gvt fixes (2 cc: stable)
   - fix gpu reset vs mm-shrinker vs wakeup fun (needed a few patches)
   - two gem locking fixes (one cc: stable)
   - pile of misc fixes all over with minor impact, 6 cc: stable, others
     from this window

  exynos:
   - misc minor fixes

  misc:
   - some build/Kconfig fixes
   - regression fix for vm scalability perf test which seems to mostly
     exercise dmesg/console logging ...
   - the vgem cache flush fix for arm64 broke the world on x86, so
     that's reverted again

* tag 'drm-fixes-2019-08-02-1' of git://anongit.freedesktop.org/drm/drm: (42 commits)
  Revert "drm/vgem: fix cache synchronization on arm/arm64"
  drm/exynos: fix missing decrement of retry counter
  drm/exynos: add CONFIG_MMU dependency
  drm/exynos: remove redundant assignment to pointer 'node'
  drm/exynos: using dev_get_drvdata directly
  drm/bochs: Use shadow buffer for bochs framebuffer console
  drm/fb-helper: Instanciate shadow FB if configured in device's mode_config
  drm/fb-helper: Map DRM client buffer only when required
  drm/client: Support unmapping of DRM client buffers
  drm/i915: Only recover active engines
  drm/i915: Add a wakeref getter for iff the wakeref is already active
  drm/i915: Lift intel_engines_resume() to callers
  drm/vgem: fix cache synchronization on arm/arm64
  drm/i810: Use CONFIG_PREEMPTION
  drm/bridge: tc358764: Fix build error
  drm/bridge: lvds-encoder: Fix build error while CONFIG_DRM_KMS_HELPER=m
  drm/i915/gvt: Adding ppgtt to GVT GEM context after shadow pdps settled.
  drm/i915/gvt: grab runtime pm first for forcewake use
  drm/i915/gvt: fix incorrect cache entry for guest page mapping
  drm/i915/gvt: Checking workload's gma earlier
  ...
parents 4f1a6ef1 9c8c9c7c
Showing with 234 additions and 115 deletions
+234 -115
......@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
{
intel_wakeref_init(&engine->wakeref);
}
int intel_engines_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
intel_gt_pm_get(i915);
for_each_engine(engine, i915, id) {
intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
dev_err(i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
break;
}
}
intel_gt_pm_put(i915);
return err;
}
......@@ -7,16 +7,22 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
#include "intel_engine_types.h"
#include "intel_wakeref.h"
struct drm_i915_private;
struct intel_engine_cs;
void intel_engine_pm_get(struct intel_engine_cs *engine);
void intel_engine_pm_put(struct intel_engine_cs *engine);
static inline bool
intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
{
return intel_wakeref_get_if_active(&engine->wakeref);
}
void intel_engine_park(struct intel_engine_cs *engine);
void intel_engine_init__pm(struct intel_engine_cs *engine);
int intel_engines_resume(struct drm_i915_private *i915);
#endif /* INTEL_ENGINE_PM_H */
......@@ -70,6 +70,18 @@ struct intel_ring {
struct list_head request_list;
struct list_head active_link;
/*
* As we have two types of rings, one global to the engine used
* by ringbuffer submission and those that are exclusive to a
* context used by execlists, we have to play safe and allow
* atomic updates to the pin_count. However, the actual pinning
* of the context is either done during initialisation for
* ringbuffer submission or serialised as part of the context
* pinning for execlists, and so we do not need a mutex ourselves
* to serialise intel_ring_pin/intel_ring_unpin.
*/
atomic_t pin_count;
u32 head;
u32 tail;
u32 emit;
......
......@@ -5,6 +5,7 @@
*/
#include "i915_drv.h"
#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_pm.h"
#include "intel_wakeref.h"
......@@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
intel_engine_reset(engine, false);
}
void intel_gt_resume(struct drm_i915_private *i915)
int intel_gt_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/*
* After resume, we may need to poke into the pinned kernel
......@@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915)
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
intel_gt_pm_get(i915);
for_each_engine(engine, i915, id) {
struct intel_context *ce;
intel_engine_pm_get(engine);
ce = engine->kernel_context;
if (ce)
ce->ops->reset(ce);
......@@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915)
ce = engine->preempt_context;
if (ce)
ce->ops->reset(ce);
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
dev_err(i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
break;
}
}
intel_gt_pm_put(i915);
return err;
}
......@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915);
void intel_gt_pm_init(struct drm_i915_private *i915);
void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
void intel_gt_resume(struct drm_i915_private *i915);
int intel_gt_resume(struct drm_i915_private *i915);
#endif /* INTEL_GT_PM_H */
......@@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->state)
......@@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce)
{
i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
intel_ring_unpin(ce->ring);
}
static void
......@@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce,
goto unpin_active;
}
ret = intel_ring_pin(ce->ring);
if (ret)
goto unpin_map;
ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret)
goto unpin_ring;
goto unpin_map;
ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
......@@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce,
return 0;
unpin_ring:
intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_active:
......
......@@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
intel_engine_pm_get(engine);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
......@@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915)
}
}
static void reset_prepare(struct drm_i915_private *i915)
static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
intel_gt_pm_get(i915);
for_each_engine(engine, i915, id)
for_each_engine(engine, i915, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
}
intel_uc_reset_prepare(i915);
return awake;
}
static void gt_revoke(struct drm_i915_private *i915)
......@@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915,
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
intel_engine_pm_put(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
intel_engine_signal_breadcrumbs(engine);
}
static void reset_finish(struct drm_i915_private *i915)
static void reset_finish(struct drm_i915_private *i915,
intel_engine_mask_t awake)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
reset_finish_engine(engine);
intel_engine_signal_breadcrumbs(engine);
if (awake & engine->mask)
intel_engine_pm_put(engine);
}
intel_gt_pm_put(i915);
}
static void nop_submit_request(struct i915_request *request)
......@@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_engine_mask_t awake;
enum intel_engine_id id;
if (test_bit(I915_WEDGED, &error->flags))
......@@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
reset_prepare(i915);
awake = reset_prepare(i915);
/* Even if the GPU reset fails, it should still stop the engines */
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
......@@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
reset_finish(i915);
reset_finish(i915, awake);
GEM_TRACE("end\n");
}
......@@ -951,6 +958,21 @@ static int do_reset(struct drm_i915_private *i915,
return gt_reset(i915, stalled_mask);
}
static int resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
for_each_engine(engine, i915, id) {
ret = engine->resume(engine);
if (ret)
return ret;
}
return 0;
}
/**
* i915_reset - reset chip after a hang
* @i915: #drm_i915_private to reset
......@@ -973,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
intel_engine_mask_t awake;
int ret;
GEM_TRACE("flags=%lx\n", error->flags);
......@@ -989,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915,
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
error->reset_count++;
reset_prepare(i915);
awake = reset_prepare(i915);
if (!intel_has_gpu_reset(i915)) {
if (i915_modparams.reset)
......@@ -1024,13 +1047,17 @@ void i915_reset(struct drm_i915_private *i915,
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
goto error;
goto taint;
}
ret = resume(i915);
if (ret)
goto taint;
i915_queue_hangcheck(i915);
finish:
reset_finish(i915);
reset_finish(i915, awake);
unlock:
mutex_unlock(&error->wedge_mutex);
return;
......@@ -1081,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
if (!intel_wakeref_active(&engine->wakeref))
if (!intel_engine_pm_get_if_awake(engine))
return 0;
reset_prepare_engine(engine);
......@@ -1116,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* process to program RING_MODE, HWSP and re-enable submission.
*/
ret = engine->resume(engine);
if (ret)
goto out;
out:
intel_engine_cancel_stop_cs(engine);
reset_finish_engine(engine);
intel_engine_pm_put(engine);
return ret;
}
......
......@@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq,
int intel_ring_pin(struct intel_ring *ring)
{
struct i915_vma *vma = ring->vma;
enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
unsigned int flags;
void *addr;
int ret;
GEM_BUG_ON(ring->vaddr);
if (atomic_fetch_inc(&ring->pin_count))
return 0;
ret = i915_timeline_pin(ring->timeline);
if (ret)
return ret;
goto err_unpin;
flags = PIN_GLOBAL;
......@@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring)
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
goto unpin_timeline;
goto err_timeline;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
else
addr = i915_gem_object_pin_map(vma->obj, map);
addr = i915_gem_object_pin_map(vma->obj,
i915_coherent_map_type(vma->vm->i915));
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto unpin_ring;
goto err_ring;
}
vma->obj->pin_global++;
GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
return 0;
unpin_ring:
err_ring:
i915_vma_unpin(vma);
unpin_timeline:
err_timeline:
i915_timeline_unpin(ring->timeline);
err_unpin:
atomic_dec(&ring->pin_count);
return ret;
}
......@@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
void intel_ring_unpin(struct intel_ring *ring)
{
GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr);
if (!atomic_dec_and_test(&ring->pin_count))
return;
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
GEM_BUG_ON(!ring->vma);
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
i915_gem_object_unpin_map(ring->vma->obj);
GEM_BUG_ON(!ring->vaddr);
ring->vaddr = NULL;
ring->vma->obj->pin_global--;
......@@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
intel_ring_unpin(engine->buffer);
intel_ring_put(engine->buffer);
intel_engine_cleanup_common(engine);
kfree(engine);
}
......
......@@ -1098,10 +1098,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine)
static void cfl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(&engine->whitelist);
gen9_whitelist_build(w);
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
*
* This covers 4 register which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
}
static void cnl_whitelist_build(struct intel_engine_cs *engine)
......@@ -1129,6 +1144,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
/* WaEnableStateCacheRedirectToCS:icl */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
*
* This covers 4 register which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
break;
case VIDEO_DECODE_CLASS:
......@@ -1258,8 +1286,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_DEMAND_PREFETCH |
GEN7_DISABLE_SAMPLER_PREFETCH);
GEN7_DISABLE_DEMAND_PREFETCH);
/* Wa_1606682166:icl */
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
}
if (IS_GEN_RANGE(i915, 9, 11)) {
......
......@@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->base.effective_size = sz;
ring->base.vaddr = (void *)(ring + 1);
ring->base.timeline = &ring->timeline;
atomic_set(&ring->base.pin_count, 1);
INIT_LIST_HEAD(&ring->base.request_list);
intel_ring_update_space(&ring->base);
......
......@@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg)
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
intel_engine_mask_t awake;
GEM_TRACE("intel_gpu_reset under %s\n", p->name);
awake = reset_prepare(i915);
p->critical_section_begin();
reset_prepare(i915);
err = intel_gpu_reset(i915, ALL_ENGINES);
reset_finish(i915);
p->critical_section_end();
reset_finish(i915, awake);
if (err) {
pr_err("intel_gpu_reset failed under %s\n", p->name);
......
......@@ -925,7 +925,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
err = 0;
for (i = 0; i < engine->whitelist.count; i++) {
if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
const struct i915_wa *wa = &engine->whitelist.list[i];
if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
continue;
if (!fn(engine, a[i], b[i], wa->reg))
err = -EINVAL;
}
......
......@@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail)
return 0;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL;
goto out;
}
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
......@@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.workload = workload;
s.is_ctx_wa = true;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL;
goto out;
}
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
......
......@@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
......@@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
alpha_plane, alpha_force);
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
......@@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format;
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
......
......@@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
unsigned long index = off >> info->gtt_entry_size_shift;
unsigned long gma;
struct intel_gvt_gtt_entry e;
if (bytes != 4 && bytes != 8)
return -EINVAL;
gma = index << I915_GTT_PAGE_SHIFT;
if (!intel_gvt_ggtt_validate_range(vgpu,
gma, 1 << I915_GTT_PAGE_SHIFT)) {
gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
memset(p_data, 0, bytes);
return 0;
}
ggtt_get_guest_entry(ggtt_mm, &e, index);
memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
bytes);
......
......@@ -1904,6 +1904,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
if (!entry) {
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else if (entry->size != size) {
/* the same gfn with different size: unmap and re-map */
gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
__gvt_cache_remove_entry(vgpu, entry);
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
......
......@@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.shadow_va = NULL;
}
static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx)
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
return -EINVAL;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
} else {
......@@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
return 0;
}
static int
......@@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
int ring = workload->ring_id;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
......@@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return ret;
}
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
!workload->shadow_mm->ppgtt_mm.shadowed) {
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return -EINVAL;
}
update_shadow_pdps(workload);
set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n");
......@@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
......@@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = set_context_ppgtt_from_shadow(workload,
s->shadow[ring_id]->gem_context);
if (ret < 0) {
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
goto err_req;
}
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
......@@ -990,6 +987,7 @@ static int workload_thread(void *priv)
int ret;
bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
kfree(p);
......@@ -1013,6 +1011,8 @@ static int workload_thread(void *priv)
workload->ring_id, workload,
workload->vgpu->id);
intel_runtime_pm_get(rpm);
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
......@@ -1042,6 +1042,7 @@ static int workload_thread(void *priv)
intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
intel_runtime_pm_put_unchecked(rpm);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
......@@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
if (!intel_gvt_ggtt_validate_range(vgpu, start,
_RING_CTL_BUF_SIZE(ctl))) {
gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
return ERR_PTR(-EINVAL);
}
workload = alloc_workload(vgpu);
if (IS_ERR(workload))
return workload;
......@@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
if (workload->wa_ctx.indirect_ctx.size != 0) {
if (!intel_gvt_ggtt_validate_range(vgpu,
workload->wa_ctx.indirect_ctx.guest_gma,
workload->wa_ctx.indirect_ctx.size)) {
kmem_cache_free(s->workloads, workload);
gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
workload->wa_ctx.indirect_ctx.guest_gma);
return ERR_PTR(-EINVAL);
}
}
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
if (workload->wa_ctx.per_ctx.valid) {
if (!intel_gvt_ggtt_validate_range(vgpu,
workload->wa_ctx.per_ctx.guest_gma,
CACHELINE_BYTES)) {
kmem_cache_free(s->workloads, workload);
gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
workload->wa_ctx.per_ctx.guest_gma);
return ERR_PTR(-EINVAL);
}
}
}
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
......
......@@ -28,8 +28,6 @@
*
*/
#include "trace.h"
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
......
......@@ -1674,8 +1674,9 @@ struct drm_i915_private {
} dram_info;
struct intel_bw_info {
int num_planes;
int deratedbw[3];
unsigned int deratedbw[3]; /* for each QGV point */
u8 num_qgv_points;
u8 num_planes;
} max_bw[6];
struct drm_private_obj bw_obj;
......
......@@ -46,7 +46,6 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
......@@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
intel_mocs_init_l3cc_table(dev_priv);
/* Only when the HW is re-initialised, can we replay the requests */
ret = intel_engines_resume(dev_priv);
if (ret)
goto cleanup_uc;
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_engines_set_scheduler_caps(dev_priv);
return 0;
cleanup_uc:
intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret;
}
......@@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto err_uc_init;
/* Only when the HW is re-initialised, can we replay the requests */
ret = intel_gt_resume(dev_priv);
if (ret)
goto err_init_hw;
/*
* Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
......@@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
ret = intel_engines_verify_workarounds(dev_priv);
if (ret)
goto err_init_hw;
goto err_gt;
ret = __intel_engines_record_defaults(dev_priv);
if (ret)
goto err_init_hw;
goto err_gt;
if (i915_inject_load_failure()) {
ret = -ENODEV;
goto err_init_hw;
goto err_gt;
}
if (i915_inject_load_failure()) {
ret = -EIO;
goto err_init_hw;
goto err_gt;
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
......@@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime.
*/
err_init_hw:
err_gt:
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_set_wedged(dev_priv);
......@@ -1630,6 +1626,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
err_init_hw:
intel_uc_fini_hw(dev_priv);
err_uc_init:
intel_uc_fini(dev_priv);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment